[COMMON] ion: add ion_exynos_dma_buf_start[end]_cpu_access
authorhyesoo.yu <hyesoo.yu@samsung.com>
Thu, 31 May 2018 09:30:06 +0000 (18:30 +0900)
committerJanghyuck Kim <janghyuck.kim@samsung.com>
Mon, 23 Jul 2018 05:39:31 +0000 (14:39 +0900)
The function provides cache flush for the buffer
if start[end] of cpu access from dmabuf framework
has direction of DMA_BIDIRECTIONAL even if there
is no attached device.

Change-Id: I22800e0abcc51b7f4e93d1d58dc55c0fd84901bb
Signed-off-by: hyesoo.yu <hyesoo.yu@samsung.com>
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion.h
drivers/staging/android/ion/ion_exynos.c
drivers/staging/android/ion/ion_exynos.h

index 42b0fa3c275e2a748de87b83761d90b8b8e7c6f2..5902601c43aeaf30b64777a8200484b0cf6dac80 100644 (file)
@@ -156,7 +156,7 @@ static void _ion_buffer_destroy(struct ion_buffer *buffer)
                ion_buffer_destroy(buffer);
 }
 
-static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 {
        void *vaddr;
 
@@ -178,7 +178,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
        return vaddr;
 }
 
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+void ion_buffer_kmap_put(struct ion_buffer *buffer)
 {
        buffer->kmap_cnt--;
        if (!buffer->kmap_cnt) {
@@ -352,6 +352,7 @@ static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *ptr)
        }
 }
 
+#ifndef CONFIG_ION_EXYNOS
 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                                        enum dma_data_direction direction)
 {
@@ -403,21 +404,24 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 
        return 0;
 }
+#endif
 
 const struct dma_buf_ops ion_dma_buf_ops = {
 #ifdef CONFIG_ION_EXYNOS
        .map_dma_buf = ion_exynos_map_dma_buf,
        .unmap_dma_buf = ion_exynos_unmap_dma_buf,
+       .begin_cpu_access = ion_exynos_dma_buf_begin_cpu_access,
+       .end_cpu_access = ion_exynos_dma_buf_end_cpu_access,
 #else
        .attach = ion_dma_buf_attach,
        .detach = ion_dma_buf_detatch,
        .map_dma_buf = ion_map_dma_buf,
        .unmap_dma_buf = ion_unmap_dma_buf,
+       .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+       .end_cpu_access = ion_dma_buf_end_cpu_access,
 #endif
        .mmap = ion_mmap,
        .release = ion_dma_buf_release,
-       .begin_cpu_access = ion_dma_buf_begin_cpu_access,
-       .end_cpu_access = ion_dma_buf_end_cpu_access,
        .map_atomic = ion_dma_buf_kmap,
        .unmap_atomic = ion_dma_buf_kunmap,
        .map = ion_dma_buf_kmap,
index 114a6efccb56a2bd79326e5bf57727c3ef282f07..00f8902a0092e145f014122783cc2e9e3d263e94 100644 (file)
@@ -365,4 +365,7 @@ int ion_query_heaps(struct ion_heap_query *query);
 void ion_contig_heap_show_buffers(struct ion_heap *heap,
                                  phys_addr_t base, size_t pool_size);
 
+void *ion_buffer_kmap_get(struct ion_buffer *buffer);
+void ion_buffer_kmap_put(struct ion_buffer *buffer);
+
 #endif /* _ION_H */
index e41f51a00771d4f2dc7800ca6b91c61fa7c38197..fa760e52411ae76d3131ceb8ea84ca1102cf41ee 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/slab.h>
 #include <linux/exynos_iovmm.h>
+#include <asm/cacheflush.h>
 
 #include "ion.h"
 #include "ion_exynos.h"
@@ -242,3 +243,78 @@ void ion_exynos_unmap_dma_buf(struct dma_buf_attachment *attachment,
                dma_sync_sg_for_cpu(attachment->dev, table->sgl,
                                    table->nents, direction);
 }
+
+static void exynos_flush_sg(struct device *dev,
+                           struct scatterlist *sgl, int nelems)
+{
+       struct scatterlist *sg;
+       int i;
+       void *virt;
+
+       for_each_sg(sgl, sg, nelems, i) {
+               virt = phys_to_virt(dma_to_phys(dev, sg->dma_address));
+
+               __dma_flush_area(virt, nelems);
+       }
+}
+
+int ion_exynos_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+                                       enum dma_data_direction direction)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       void *vaddr;
+
+       if (buffer->heap->ops->map_kernel) {
+               mutex_lock(&buffer->lock);
+               vaddr = ion_buffer_kmap_get(buffer);
+               mutex_unlock(&buffer->lock);
+       }
+
+       if (!ion_buffer_cached(buffer))
+               return 0;
+
+       mutex_lock(&buffer->lock);
+       if (direction == DMA_BIDIRECTIONAL) {
+               exynos_flush_sg(buffer->dev->dev.this_device,
+                               buffer->sg_table->sgl,
+                               buffer->sg_table->orig_nents);
+       } else {
+               dma_sync_sg_for_cpu(buffer->dev->dev.this_device,
+                                   buffer->sg_table->sgl,
+                                   buffer->sg_table->orig_nents,
+                                   direction);
+       }
+       mutex_unlock(&buffer->lock);
+
+       return 0;
+}
+
+int ion_exynos_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+                                     enum dma_data_direction direction)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+
+       if (buffer->heap->ops->map_kernel) {
+               mutex_lock(&buffer->lock);
+               ion_buffer_kmap_put(buffer);
+               mutex_unlock(&buffer->lock);
+       }
+
+       if (!ion_buffer_cached(buffer))
+               return 0;
+
+       mutex_lock(&buffer->lock);
+       if (direction == DMA_BIDIRECTIONAL) {
+               exynos_flush_sg(buffer->dev->dev.this_device,
+                               buffer->sg_table->sgl,
+                               buffer->sg_table->orig_nents);
+       } else {
+               dma_sync_sg_for_device(buffer->dev->dev.this_device,
+                                      buffer->sg_table->sgl,
+                                      buffer->sg_table->orig_nents,
+                                      direction);
+       }
+       mutex_unlock(&buffer->lock);
+
+       return 0;
+}
index c22c5770dabf604a95fad7e9a36e8d07779693a8..05872136fa00929422c2b0b72021cae7b2bab40f 100644 (file)
@@ -102,7 +102,10 @@ struct sg_table *ion_exynos_map_dma_buf(struct dma_buf_attachment *attachment,
 void ion_exynos_unmap_dma_buf(struct dma_buf_attachment *attachment,
                              struct sg_table *table,
                              enum dma_data_direction direction);
-
+int ion_exynos_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+                                       enum dma_data_direction direction);
+int ion_exynos_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+                                     enum dma_data_direction direction);
 void ion_debug_initialize(struct ion_device *idev);
 
 #else
@@ -140,6 +143,21 @@ static inline struct sg_table *ion_exynos_map_dma_buf(
 }
 
 #define ion_exynos_unmap_dma_buf(attachment, table, direction) do { } while (0)
+
+static inline int ion_exynos_dma_buf_begin_cpu_access(
+                                       struct dma_buf *dmabuf,
+                                       enum dma_data_direction direction)
+{
+       return 0;
+}
+
+static inline int ion_exynos_dma_buf_end_cpu_access(
+                                       struct dma_buf *dmabuf,
+                                       enum dma_data_direction direction)
+{
+       return 0;
+}
+
 #define ion_debug_initialize(idev) do { } while (0)
 #endif