From: Cho KyongHo Date: Fri, 2 Mar 2018 14:03:48 +0000 (+0900) Subject: android: ion: add dmabuf cpu access ops for exynos X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=6f51c5003d3dab4291d6b7276d98dcb99ac00024;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git android: ion: add dmabuf cpu access ops for exynos .begin_cpu_access and .end_cpu_access of dmabuf ops provided by ION needs scatter-gather list because they rely on dma-mapping api. After 'commit 8dbbf7b ("android: ion: do not map in dma_map_attach() for Exynos")', ion_dma_buf_begin_cpu_access() and ion_dma_buf_end_cpu_access() loose scatter-gather list stored in the dma-buf attachment that is eliminated by the above commit. Instead, dma-mapping api can be used with ion_buffer.sg_table and the miscdevice of ion_device because they are initialized at buffer allocation and ION initialization, respectively since 'commit 10a41c2 ("android: ion: add fixups to ion exynos extension")'. Uncached buffers are not required to maintain CPU caches for CPU accesses because dma_buf_kmap() returns the kernel address with the cacheability attribute of the buffer. Change-Id: I6656f22b696ae96de9f2766751970b25f17572f6 Signed-off-by: Cho KyongHo --- diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index b726485f5cc3..a30dcbb1ddad 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -368,12 +368,23 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, mutex_unlock(&buffer->lock); } - mutex_lock(&dmabuf->lock); - list_for_each_entry(att, &dmabuf->attachments, node) { - struct sg_table *table = att->priv; + if (!ion_buffer_cached(buffer)) + return 0; - dma_sync_sg_for_cpu(att->dev, table->sgl, table->nents, - direction); + mutex_lock(&dmabuf->lock); + if (IS_ENABLED(CONFIG_ION_EXYNOS)) { + if (!list_empty(&dmabuf->attachments)) + dma_sync_sg_for_cpu(buffer->dev->dev.this_device, + buffer->sg_table->sgl, + buffer->sg_table->orig_nents, + direction); + } else { + list_for_each_entry(att, &dmabuf->attachments, node) { + struct sg_table *table = att->priv; + + dma_sync_sg_for_cpu(att->dev, table->sgl, table->nents, + direction); + } } mutex_unlock(&dmabuf->lock); @@ -392,12 +403,23 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, mutex_unlock(&buffer->lock); } - mutex_lock(&dmabuf->lock); - list_for_each_entry(att, &dmabuf->attachments, node) { - struct sg_table *table = att->priv; + if (!ion_buffer_cached(buffer)) + return 0; - dma_sync_sg_for_device(att->dev, table->sgl, table->nents, - direction); + mutex_lock(&dmabuf->lock); + if (IS_ENABLED(CONFIG_ION_EXYNOS)) { + if (!list_empty(&dmabuf->attachments)) + dma_sync_sg_for_device(buffer->dev->dev.this_device, + buffer->sg_table->sgl, + buffer->sg_table->orig_nents, + direction); + } else { + list_for_each_entry(att, &dmabuf->attachments, node) { + struct sg_table *table = att->priv; + + dma_sync_sg_for_device(att->dev, table->sgl, + table->nents, direction); + } } mutex_unlock(&dmabuf->lock);