[RAMEN9610-20778][9610] drivers: gpu: check ion buffer is cached before flush v4
authorSeYeong Byeon <sy.byeon@samsung.com>
Thu, 10 Oct 2019 09:21:12 +0000 (18:21 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:23:37 +0000 (20:23 +0300)
With LEGACY_COMPAT option enabled,
only flush an ion buffer if it is a cached buffer.

Update 19.10.17:
Prevent wrongly printed warning logs when non-cached buffer
is not flushed as intended

Change-Id: Icce58c5c39c8898f5804d47ea1bf6e83776ab02a
Signed-off-by: SeYeong Byeon <sy.byeon@samsung.com>
drivers/gpu/arm/b_r19p0/mali_kbase_mem_linux.c

index d2019fb8d917d78fe6235069db17b8da1b4e8d48..7c3800e4ed39014e36a569bc24ee9b131e3306a8 100644 (file)
 #include <linux/cache.h>
 #include <linux/memory_group_manager.h>
 
+#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE
+#include <linux/ion.h>
+#else
+#include <linux/ion_exynos.h>
+#endif
+
 #include <mali_kbase.h>
 #include <mali_kbase_mem_linux.h>
 #include <mali_kbase_tracepoints.h>
@@ -976,7 +982,7 @@ out:
 int kbase_mem_do_sync_imported(struct kbase_context *kctx,
                struct kbase_va_region *reg, enum kbase_sync_type sync_fn)
 {
-       int ret = -EINVAL;
+       int ret = 0;
 #ifdef CONFIG_DMA_SHARED_BUFFER
        struct dma_buf *dma_buf;
        enum dma_data_direction dir = DMA_BIDIRECTIONAL;
@@ -990,7 +996,7 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
 
        /* Currently only handle dma-bufs */
        if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM)
-               return ret;
+               return -EINVAL;
        /*
         * Attempting to sync with CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND
         * enabled can expose us to a Linux Kernel issue between v4.6 and
@@ -1003,7 +1009,7 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
         */
        if (IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND) &&
            !reg->gpu_alloc->imported.umm.current_mapping_usage_count)
-               return ret;
+               return -EINVAL;
 
        dma_buf = reg->gpu_alloc->imported.umm.dma_buf;
 
@@ -1012,6 +1018,12 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
                dev_dbg(kctx->kbdev->dev,
                        "Syncing imported buffer at GPU VA %llx to GPU\n",
                        reg->start_pfn);
+
+#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE
+               if (ion_cached_needsync_dmabuf(dma_buf)) {
+#else
+               if (ion_cached_dmabuf(dma_buf)) {
+#endif
 #ifdef KBASE_MEM_ION_SYNC_WORKAROUND
                if (!WARN_ON(!reg->gpu_alloc->imported.umm.dma_attachment)) {
                        struct dma_buf_attachment *attachment = reg->gpu_alloc->imported.umm.dma_attachment;
@@ -1036,11 +1048,18 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
                                dir);
 #endif
 #endif /* KBASE_MEM_ION_SYNC_WORKAROUND */
+               }
                break;
        case KBASE_SYNC_TO_CPU:
                dev_dbg(kctx->kbdev->dev,
                        "Syncing imported buffer at GPU VA %llx to CPU\n",
                        reg->start_pfn);
+
+#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE
+               if (ion_cached_needsync_dmabuf(dma_buf)) {
+#else
+               if (ion_cached_dmabuf(dma_buf)) {
+#endif
 #ifdef KBASE_MEM_ION_SYNC_WORKAROUND
                if (!WARN_ON(!reg->gpu_alloc->imported.umm.dma_attachment)) {
                        struct dma_buf_attachment *attachment = reg->gpu_alloc->imported.umm.dma_attachment;
@@ -1057,6 +1076,7 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
 #endif
                                dir);
 #endif /* KBASE_MEM_ION_SYNC_WORKAROUND */
+               }
                break;
        };