From 70c9d8317b234b13492af2d481dfc6dea95a7d8e Mon Sep 17 00:00:00 2001 From: Cho KyongHo Date: Mon, 12 Feb 2018 11:19:32 +0900 Subject: [PATCH] android: ion: add ion_iovmm_[un]map() iovmm_map() provided by exynos-iovmm is time-consuming. If the same buffer is iovm mapped to a device, iovmm_map() does the same thing with much time. iovmm_map() is not able to avoid this repeat because it has no information to identify a buffer. Thus, ION provides the avoidance of repeating mapping the same buffer. Change-Id: I4c62f615dca6dfb5f8d2fe366ccc0a89af1d485e Signed-off-by: Cho KyongHo --- drivers/staging/android/ion/ion.c | 1 + drivers/staging/android/ion/ion.h | 1 + drivers/staging/android/ion/ion_exynos.c | 119 +++++++++++++++++++++++ include/linux/ion_exynos.h | 20 ++++ 4 files changed, 141 insertions(+) diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 1939579677d2..4e5db84dec74 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -115,6 +115,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, buffer->dev = dev; buffer->size = len; + INIT_LIST_HEAD(&buffer->iovas); mutex_init(&buffer->lock); mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index 9f5599b5e3bb..d962dd3d1afe 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -85,6 +85,7 @@ struct ion_buffer { int kmap_cnt; void *vaddr; struct sg_table *sg_table; + struct list_head iovas; }; void ion_buffer_destroy(struct ion_buffer *buffer); diff --git a/drivers/staging/android/ion/ion_exynos.c b/drivers/staging/android/ion/ion_exynos.c index b1cd1bf60177..2f067d1ceed5 100644 --- a/drivers/staging/android/ion/ion_exynos.c +++ b/drivers/staging/android/ion/ion_exynos.c @@ -14,6 +14,10 @@ * */ +#include +#include +#include + #include "ion.h" #include "ion_exynos.h" @@ -29,3 +33,118 @@ struct dma_buf *ion_alloc_dmabuf(const char *heap_name, return __ion_alloc(len, 1 << heap->id, flags); } + +struct ion_iovm_map { + struct list_head list; + struct device *dev; + struct iommu_domain *domain; + dma_addr_t iova; + atomic_t mapcnt; + int prop; +}; + +static struct ion_iovm_map *ion_buffer_iova_create(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction dir, + int prop) +{ + struct ion_iovm_map *iovm_map; + + iovm_map = kzalloc(sizeof(*iovm_map), GFP_KERNEL); + if (!iovm_map) + return ERR_PTR(-ENOMEM); + + iovm_map->iova = iovmm_map(dev, buffer->sg_table->sgl, + 0, buffer->size, dir, prop); + if (IS_ERR_VALUE(iovm_map->iova)) { + int ret = (int)iovm_map->iova; + + kfree(iovm_map); + dev_err(dev, "%s: failed to allocate iova (err %d)\n", + __func__, ret); + return ERR_PTR(ret); + } + + iovm_map->dev = dev; + iovm_map->domain = get_domain_from_dev(dev); + iovm_map->prop= prop; + + atomic_inc(&iovm_map->mapcnt); + + return iovm_map; +} + +dma_addr_t ion_iovmm_map(struct dma_buf_attachment *attachment, + off_t offset, size_t size, + enum dma_data_direction direction, int prop) +{ + struct ion_buffer *buffer = attachment->dmabuf->priv; + struct ion_iovm_map *iovm_map; + struct iommu_domain *domain; + + BUG_ON(attachment->dmabuf->ops != &ion_dma_buf_ops); + + if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) && + (buffer->flags & ION_FLAG_PROTECTED)) { + struct ion_buffer_prot_info *prot = buffer->priv_virt; + + return prot->dma_addr; + } + + domain = get_domain_from_dev(attachment->dev); + if (!domain) { + dev_err(attachment->dev, "%s: no iommu domain\n", __func__); + return -EINVAL; + } + + mutex_lock(&buffer->lock); + + if (!ion_buffer_cached(buffer)) + prop &= ~IOMMU_CACHE; + + list_for_each_entry(iovm_map, &buffer->iovas, list) { + if ((domain == iovm_map->domain) && (prop == iovm_map->prop)) { + mutex_unlock(&buffer->lock); + atomic_inc(&iovm_map->mapcnt); + return iovm_map->iova; + } + } + + iovm_map = ion_buffer_iova_create(buffer, attachment->dev, + direction, prop); + if (IS_ERR(iovm_map)) { + mutex_unlock(&buffer->lock); + return PTR_ERR(iovm_map); + } + + list_add_tail(&iovm_map->list, &buffer->iovas); + + mutex_unlock(&buffer->lock); + + return iovm_map->iova; +} + +/* unmapping is deferred until buffer is freed for performance */ +void ion_iovmm_unmap(struct dma_buf_attachment *attachment, dma_addr_t iova) +{ + struct ion_buffer *buffer = attachment->dmabuf->priv; + struct ion_iovm_map *iovm_map; + struct iommu_domain *domain; + + domain = get_domain_from_dev(attachment->dev); + if (!domain) { + dev_err(attachment->dev, "%s: no iommu domain\n", __func__); + return; + } + + mutex_lock(&buffer->lock); + list_for_each_entry(iovm_map, &buffer->iovas, list) { + if ((domain == iovm_map->domain) && (iova == iovm_map->iova)) { + mutex_unlock(&buffer->lock); + atomic_dec(&iovm_map->mapcnt); + return; + } + } + + WARN(1, "iova %pad found for %s\n", &iova, dev_name(attachment->dev)); +} diff --git a/include/linux/ion_exynos.h b/include/linux/ion_exynos.h index b0e3da259aa1..b43fb18147bb 100644 --- a/include/linux/ion_exynos.h +++ b/include/linux/ion_exynos.h @@ -16,6 +16,10 @@ #ifndef __LINUX_ION_EXYNOS_H__ #define __LINUX_ION_EXYNOS_H__ +#include + +struct dma_buf_attachment; + #define ION_FLAG_CACHED 1 #define ION_FLAG_PROTECTED 16 @@ -30,4 +34,20 @@ static inline struct dma_buf *ion_alloc_dmabuf(const char *heap_name, } #endif +#if defined(CONFIG_EXYNOS_IOVMM) && defined(CONFIG_ION_EXYNOS) +dma_addr_t ion_iovmm_map(struct dma_buf_attachment *attachment, + off_t offset, size_t size, + enum dma_data_direction direction, int prop); +void ion_iovmm_unmap(struct dma_buf_attachment *attachment, dma_addr_t iova); +#else +static inline dma_addr_t ion_iovmm_map(struct dma_buf_attachment *attachment, + off_t offset, size_t size, + enum dma_data_direction direction, + int prop) +{ + return -ENODEV; +} +#define ion_iovmm_unmap(attachment, iova) do { } while (0) +#endif + #endif /* __LINUX_ION_EXYNOS_H__ */ -- 2.20.1