ion_buffer_destroy(buffer);
}
-static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
return vaddr;
}
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
buffer->kmap_cnt--;
if (!buffer->kmap_cnt) {
}
}
+#ifndef CONFIG_ION_EXYNOS
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
return 0;
}
+#endif
const struct dma_buf_ops ion_dma_buf_ops = {
#ifdef CONFIG_ION_EXYNOS
.map_dma_buf = ion_exynos_map_dma_buf,
.unmap_dma_buf = ion_exynos_unmap_dma_buf,
+ .begin_cpu_access = ion_exynos_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_exynos_dma_buf_end_cpu_access,
#else
.attach = ion_dma_buf_attach,
.detach = ion_dma_buf_detatch,
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
#endif
.mmap = ion_mmap,
.release = ion_dma_buf_release,
- .begin_cpu_access = ion_dma_buf_begin_cpu_access,
- .end_cpu_access = ion_dma_buf_end_cpu_access,
.map_atomic = ion_dma_buf_kmap,
.unmap_atomic = ion_dma_buf_kunmap,
.map = ion_dma_buf_kmap,
void ion_contig_heap_show_buffers(struct ion_heap *heap,
phys_addr_t base, size_t pool_size);
+void *ion_buffer_kmap_get(struct ion_buffer *buffer);
+void ion_buffer_kmap_put(struct ion_buffer *buffer);
+
#endif /* _ION_H */
#include <linux/slab.h>
#include <linux/exynos_iovmm.h>
+#include <asm/cacheflush.h>
#include "ion.h"
#include "ion_exynos.h"
dma_sync_sg_for_cpu(attachment->dev, table->sgl,
table->nents, direction);
}
+
+static void exynos_flush_sg(struct device *dev,
+ struct scatterlist *sgl, int nelems)
+{
+ struct scatterlist *sg;
+ int i;
+ void *virt;
+
+ for_each_sg(sgl, sg, nelems, i) {
+ virt = phys_to_virt(dma_to_phys(dev, sg->dma_address));
+
+ __dma_flush_area(virt, nelems);
+ }
+}
+
+int ion_exynos_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ }
+
+ if (!ion_buffer_cached(buffer))
+ return 0;
+
+ mutex_lock(&buffer->lock);
+ if (direction == DMA_BIDIRECTIONAL) {
+ exynos_flush_sg(buffer->dev->dev.this_device,
+ buffer->sg_table->sgl,
+ buffer->sg_table->orig_nents);
+ } else {
+ dma_sync_sg_for_cpu(buffer->dev->dev.this_device,
+ buffer->sg_table->sgl,
+ buffer->sg_table->orig_nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+int ion_exynos_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+ }
+
+ if (!ion_buffer_cached(buffer))
+ return 0;
+
+ mutex_lock(&buffer->lock);
+ if (direction == DMA_BIDIRECTIONAL) {
+ exynos_flush_sg(buffer->dev->dev.this_device,
+ buffer->sg_table->sgl,
+ buffer->sg_table->orig_nents);
+ } else {
+ dma_sync_sg_for_device(buffer->dev->dev.this_device,
+ buffer->sg_table->sgl,
+ buffer->sg_table->orig_nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
void ion_exynos_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction);
-
+int ion_exynos_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction);
+int ion_exynos_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction);
void ion_debug_initialize(struct ion_device *idev);
#else
}
#define ion_exynos_unmap_dma_buf(attachment, table, direction) do { } while (0)
+
+static inline int ion_exynos_dma_buf_begin_cpu_access(
+ struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ return 0;
+}
+
+static inline int ion_exynos_dma_buf_end_cpu_access(
+ struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ return 0;
+}
+
#define ion_debug_initialize(idev) do { } while (0)
#endif