android: ion: do not map in dma_map_attach() for Exynos
authorCho KyongHo <pullip.cho@samsung.com>
Thu, 22 Feb 2018 06:40:48 +0000 (15:40 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:24 +0000 (19:45 +0900)
dma_buf ops of ION does two unneccessary works. One is duplicating
sg_table in dma_map_attach() and the other is dma_map_sg() call in
dma_buf_map_attachment(). The duplication of sg_table is neccessary
in fact because dma_map_sg() updates dma_address fields of scatter-
gather list in sg_table with the DMA address of the given device.
It is true with the devices with IOMMU that provides dma-mapping ops.
But our IOMMU, System MMU driver does not provide dma-mapping ops.
Therefor, DMA addresses of the same physical memory location are not
different between devices in Exynos SoCs.

ion_buffer_create() calls dma_map_sg() to initialize dma_address
fields sg_table from the following commit:
'commit 8215d29 ("android: ion: add fixups to ion exynos extension")'

Now we can use the sg_table in ion_buffer for all devices. We can
avoid duplicating sg_table on every dma_buf_attach() call. We can
replace dma_map_sg() with dma_sync_sg_for_device() in
dma_buf_map_attachment().
ion_dma_buf_attach() and ion_dma_buf_detach() are no more required.
it does nothing after sg_table duplication is removed.

For the maintenance reason, we do not remove the unused dma_buf ops
of ION in ion.c. Instead we adds our implementation of map_dma_buf()
and unmap_dma_buf() ops to dma_buf ops of ION if CONFIG_ION_EXYNOS
is configured.

Change-Id: I1764a7eb62e948f074b596e316fe3f48ed3f8662
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_exynos.c
drivers/staging/android/ion/ion_exynos.h

index 2ff694890d377a8ef0514172571f5567774b2e98..6f71a00715fa96d3789093d748053c9c975597d5 100644 (file)
@@ -182,6 +182,7 @@ static void ion_buffer_kmap_put(struct ion_buffer *buffer)
        }
 }
 
+#ifndef CONFIG_ION_EXYNOS
 static struct sg_table *dup_sg_table(struct sg_table *table)
 {
        struct sg_table *new_table;
@@ -263,6 +264,7 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
        dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents,
                           direction, attrs);
 }
+#endif /* !CONFIG_ION_EXYNOS */
 
 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 {
@@ -398,12 +400,17 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 }
 
 const struct dma_buf_ops ion_dma_buf_ops = {
+#ifdef CONFIG_ION_EXYNOS
+       .map_dma_buf = ion_exynos_map_dma_buf,
+       .unmap_dma_buf = ion_exynos_unmap_dma_buf,
+#else
+       .attach = ion_dma_buf_attach,
+       .detach = ion_dma_buf_detatch,
        .map_dma_buf = ion_map_dma_buf,
        .unmap_dma_buf = ion_unmap_dma_buf,
+#endif
        .mmap = ion_mmap,
        .release = ion_dma_buf_release,
-       .attach = ion_dma_buf_attach,
-       .detach = ion_dma_buf_detatch,
        .begin_cpu_access = ion_dma_buf_begin_cpu_access,
        .end_cpu_access = ion_dma_buf_end_cpu_access,
        .map_atomic = ion_dma_buf_kmap,
index 0542e04b6fdb55e27ccfb8297270c91490459649..fafb9dae1500c1055dadbcf3e23040e60c6c79ef 100644 (file)
@@ -207,3 +207,26 @@ void exynos_ion_free_fixup(struct ion_buffer *buffer)
                           table->orig_nents, DMA_TO_DEVICE,
                           DMA_ATTR_SKIP_CPU_SYNC);
 }
+
+struct sg_table *ion_exynos_map_dma_buf(struct dma_buf_attachment *attachment,
+                                       enum dma_data_direction direction)
+{
+       struct ion_buffer *buffer = attachment->dmabuf->priv;
+
+       if (ion_buffer_cached(buffer))
+               dma_sync_sg_for_device(attachment->dev, buffer->sg_table->sgl,
+                                      buffer->sg_table->nents, direction);
+
+       return buffer->sg_table;
+}
+
+void ion_exynos_unmap_dma_buf(struct dma_buf_attachment *attachment,
+                             struct sg_table *table,
+                             enum dma_data_direction direction)
+{
+       struct ion_buffer *buffer = attachment->dmabuf->priv;
+
+       if (ion_buffer_cached(buffer))
+               dma_sync_sg_for_cpu(attachment->dev, table->sgl,
+                                   table->nents, direction);
+}
index f1abf8924109b55e631de86cf197a400b1f68bc9..9a6209810c82ae475cc0fb35b531df15ee28a665 100644 (file)
 #ifndef _ION_EXYNOS_H_
 #define _ION_EXYNOS_H_
 
+#include <linux/dma-direction.h>
 struct cma;
 struct dma_buf;
 struct ion_heap;
 struct ion_platform_heap;
 struct ion_device;
 struct ion_buffer;
+struct dma_buf_attachment;
 
 /**
  * struct ion_buffer_prot_info - buffer protection information
@@ -73,6 +75,11 @@ void ion_buffer_unprotect(void *priv);
 void exynos_ion_fixup(struct ion_device *idev);
 int exynos_ion_alloc_fixup(struct ion_device *idev, struct ion_buffer *buffer);
 void exynos_ion_free_fixup(struct ion_buffer *buffer);
+struct sg_table *ion_exynos_map_dma_buf(struct dma_buf_attachment *attachment,
+                                       enum dma_data_direction direction);
+void ion_exynos_unmap_dma_buf(struct dma_buf_attachment *attachment,
+                             struct sg_table *table,
+                             enum dma_data_direction direction);
 
 #else
 static inline void *ion_buffer_protect_single(unsigned int protection_id,
@@ -91,6 +98,14 @@ static inline int exynos_ion_alloc_fixup(struct ion_device *idev,
 }
 
 #define exynos_ion_free_fixup(buffer) do { } while (0)
+
+static struct sg_table *ion_exynos_map_dma_buf(struct dma_buf_attachment *att,
+                                              enum dma_data_direction dir)
+{
+       return ERR_PTR(-ENODEV);
+}
+
+#define ion_exynos_unmap_dma_buf(attachment, table, direction) do { } while (0)
 #endif
 
 extern const struct dma_buf_ops ion_dma_buf_ops;