android: ion: add dmabuf cpu access ops for exynos
authorCho KyongHo <pullip.cho@samsung.com>
Fri, 2 Mar 2018 14:03:48 +0000 (23:03 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:24 +0000 (19:45 +0900)
.begin_cpu_access and .end_cpu_access of dmabuf ops provided by ION
needs scatter-gather list because they rely on dma-mapping api.

After 'commit 8dbbf7b ("android: ion: do not map in dma_map_attach()
for Exynos")', ion_dma_buf_begin_cpu_access() and
ion_dma_buf_end_cpu_access() loose scatter-gather list stored in
the dma-buf attachment that is eliminated by the above commit.

Instead, dma-mapping api can be used with ion_buffer.sg_table and the
miscdevice of ion_device because they are initialized at buffer
allocation and ION initialization, respectively since 'commit 10a41c2
("android: ion: add fixups to ion exynos extension")'.

Uncached buffers are not required to maintain CPU caches for CPU
accesses because dma_buf_kmap() returns the kernel address with the
cacheability attribute of the buffer.

Change-Id: I6656f22b696ae96de9f2766751970b25f17572f6
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
drivers/staging/android/ion/ion.c

index b726485f5cc3e7b3b56db703b5235cefacb6aad0..a30dcbb1ddad0dc86296f89393cfaa9fb5c21808 100644 (file)
@@ -368,12 +368,23 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                mutex_unlock(&buffer->lock);
        }
 
-       mutex_lock(&dmabuf->lock);
-       list_for_each_entry(att, &dmabuf->attachments, node) {
-               struct sg_table *table = att->priv;
+       if (!ion_buffer_cached(buffer))
+               return 0;
 
-               dma_sync_sg_for_cpu(att->dev, table->sgl, table->nents,
-                                   direction);
+       mutex_lock(&dmabuf->lock);
+       if (IS_ENABLED(CONFIG_ION_EXYNOS)) {
+               if (!list_empty(&dmabuf->attachments))
+                       dma_sync_sg_for_cpu(buffer->dev->dev.this_device,
+                                           buffer->sg_table->sgl,
+                                           buffer->sg_table->orig_nents,
+                                           direction);
+       } else {
+               list_for_each_entry(att, &dmabuf->attachments, node) {
+                       struct sg_table *table = att->priv;
+
+                       dma_sync_sg_for_cpu(att->dev, table->sgl, table->nents,
+                                           direction);
+               }
        }
        mutex_unlock(&dmabuf->lock);
 
@@ -392,12 +403,23 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                mutex_unlock(&buffer->lock);
        }
 
-       mutex_lock(&dmabuf->lock);
-       list_for_each_entry(att, &dmabuf->attachments, node) {
-               struct sg_table *table = att->priv;
+       if (!ion_buffer_cached(buffer))
+               return 0;
 
-               dma_sync_sg_for_device(att->dev, table->sgl, table->nents,
-                                      direction);
+       mutex_lock(&dmabuf->lock);
+       if (IS_ENABLED(CONFIG_ION_EXYNOS)) {
+               if (!list_empty(&dmabuf->attachments))
+                       dma_sync_sg_for_device(buffer->dev->dev.this_device,
+                                              buffer->sg_table->sgl,
+                                              buffer->sg_table->orig_nents,
+                                              direction);
+       } else {
+               list_for_each_entry(att, &dmabuf->attachments, node) {
+                       struct sg_table *table = att->priv;
+
+                       dma_sync_sg_for_device(att->dev, table->sgl,
+                                              table->nents, direction);
+               }
        }
        mutex_unlock(&dmabuf->lock);