From ebc66647782d78b96607a94611575f28f5aca913 Mon Sep 17 00:00:00 2001 From: SeYeong Byeon Date: Thu, 10 Oct 2019 18:21:12 +0900 Subject: [PATCH] [RAMEN9610-20778][9610] drivers: gpu: check ion buffer is cached before flush v4 With LEGACY_COMPAT option enabled, only flush an ion buffer if it is a cached buffer. Update 19.10.17: Prevent wrongly printed warning logs when non-cached buffer is not flushed as intended Change-Id: Icce58c5c39c8898f5804d47ea1bf6e83776ab02a Signed-off-by: SeYeong Byeon --- .../gpu/arm/b_r19p0/mali_kbase_mem_linux.c | 26 ++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/arm/b_r19p0/mali_kbase_mem_linux.c b/drivers/gpu/arm/b_r19p0/mali_kbase_mem_linux.c index d2019fb8d917..7c3800e4ed39 100644 --- a/drivers/gpu/arm/b_r19p0/mali_kbase_mem_linux.c +++ b/drivers/gpu/arm/b_r19p0/mali_kbase_mem_linux.c @@ -46,6 +46,12 @@ #include #include +#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE +#include +#else +#include +#endif + #include #include #include @@ -976,7 +982,7 @@ out: int kbase_mem_do_sync_imported(struct kbase_context *kctx, struct kbase_va_region *reg, enum kbase_sync_type sync_fn) { - int ret = -EINVAL; + int ret = 0; #ifdef CONFIG_DMA_SHARED_BUFFER struct dma_buf *dma_buf; enum dma_data_direction dir = DMA_BIDIRECTIONAL; @@ -990,7 +996,7 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx, /* Currently only handle dma-bufs */ if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM) - return ret; + return -EINVAL; /* * Attempting to sync with CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND * enabled can expose us to a Linux Kernel issue between v4.6 and @@ -1003,7 +1009,7 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx, */ if (IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND) && !reg->gpu_alloc->imported.umm.current_mapping_usage_count) - return ret; + return -EINVAL; dma_buf = reg->gpu_alloc->imported.umm.dma_buf; @@ -1012,6 +1018,12 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx, dev_dbg(kctx->kbdev->dev, "Syncing imported buffer at GPU VA %llx to GPU\n", reg->start_pfn); + +#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE + if (ion_cached_needsync_dmabuf(dma_buf)) { +#else + if (ion_cached_dmabuf(dma_buf)) { +#endif #ifdef KBASE_MEM_ION_SYNC_WORKAROUND if (!WARN_ON(!reg->gpu_alloc->imported.umm.dma_attachment)) { struct dma_buf_attachment *attachment = reg->gpu_alloc->imported.umm.dma_attachment; @@ -1036,11 +1048,18 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx, dir); #endif #endif /* KBASE_MEM_ION_SYNC_WORKAROUND */ + } break; case KBASE_SYNC_TO_CPU: dev_dbg(kctx->kbdev->dev, "Syncing imported buffer at GPU VA %llx to CPU\n", reg->start_pfn); + +#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE + if (ion_cached_needsync_dmabuf(dma_buf)) { +#else + if (ion_cached_dmabuf(dma_buf)) { +#endif #ifdef KBASE_MEM_ION_SYNC_WORKAROUND if (!WARN_ON(!reg->gpu_alloc->imported.umm.dma_attachment)) { struct dma_buf_attachment *attachment = reg->gpu_alloc->imported.umm.dma_attachment; @@ -1057,6 +1076,7 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx, #endif dir); #endif /* KBASE_MEM_ION_SYNC_WORKAROUND */ + } break; }; -- 2.20.1