From: Cho KyongHo Date: Thu, 11 Jan 2018 00:51:20 +0000 (+0900) Subject: media: vb2: add support for iovmm in vb2-dma-sg X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=2add19dd0c3e600d1ccd7eb3c911c00ff6ce0e02;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git media: vb2: add support for iovmm in vb2-dma-sg Address space management of DMA above System MMU in Exynos SoCs is not constructed in dma-mapping but in iovmm that only provides address space management for such DMAs. Therefore we should call iovmm_map() and iovmm_unmap() explicitely even though videobuf2-dma-sg already calls dma_map_sg() for mapping. The client v4l2 drivers are intended to get DMA address from the scatter-gather list returned by vb2_dma_sg_plane_desc() but iovmm_map() leave it untouched because dma addresses in the scatter- gather list is also used by cache maintenance. Therefore, DMA addresses returned by iovmm_map() should be maintained separately. We also provide vb2_dma_sg_plane_dma_addr() to find the DMA address of a plane. Change-Id: I4773d342716431d7ea889bb5e11ac3512d8193ba Signed-off-by: Cho KyongHo --- diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 6808231a6bdc..1b1a45224b4c 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -18,6 +18,8 @@ #include #include +#include + #include #include #include @@ -51,6 +53,12 @@ struct vb2_dma_sg_buf { struct vb2_vmarea_handler handler; struct dma_buf_attachment *db_attach; + /* + * Our IO address space is not managed by dma-mapping. Therefore + * scatterlist.dma_address should not be corrupted by the IO address + * returned by iovmm_map() because it is used by cache maintenance. + */ + dma_addr_t iova; }; static void vb2_dma_sg_put(void *buf_priv); @@ -104,6 +112,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, struct sg_table *sgt; int ret; int num_pages; + int ioprot = IOMMU_READ | IOMMU_WRITE; if (WARN_ON(!dev)) return ERR_PTR(-EINVAL); @@ -138,13 +147,13 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, buf->dev = get_device(dev); sgt = &buf->sg_table; - /* - * No need to sync to the device, this will happen later when the - * prepare() memop is called. - */ - sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); - if (!sgt->nents) + + if (device_get_dma_attr(dev) == DEV_DMA_COHERENT) + ioprot |= IOMMU_CACHE; + + buf->iova = iovmm_map(buf->dev, sgt->sgl, 0, size, + DMA_BIDIRECTIONAL, ioprot); + if (IS_ERR_VALUE(buf->iova)) goto fail_map; buf->handler.refcount = &buf->refcount; @@ -174,14 +183,12 @@ fail_pages_array_alloc: static void vb2_dma_sg_put(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; if (refcount_dec_and_test(&buf->refcount)) { dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, buf->num_pages); - dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); + iovmm_unmap(buf->dev, buf->iova); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); @@ -225,6 +232,9 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, struct vb2_dma_sg_buf *buf; struct sg_table *sgt; struct frame_vector *vec; + struct scatterlist *s; + int i; + int ioprot = IOMMU_READ | IOMMU_WRITE; if (WARN_ON(!dev)) return ERR_PTR(-EINVAL); @@ -255,13 +265,18 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, goto userptr_fail_sgtable; sgt = &buf->sg_table; - /* - * No need to sync to the device, this will happen later when the - * prepare() memop is called. - */ - sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); - if (!sgt->nents) + + /* Just fixup of scatter-gather list not initialized by dma-mapping. */ + sgt->nents = sgt->orig_nents; + for_each_sg(sgt->sgl, s, sgt->orig_nents, i) + s->dma_address = sg_phys(s); + + if (device_get_dma_attr(dev) == DEV_DMA_COHERENT) + ioprot |= IOMMU_CACHE; + + buf->iova = iovmm_map(buf->dev, sgt->sgl, 0, size, + DMA_BIDIRECTIONAL, ioprot); + if (IS_ERR_VALUE(buf->iova)) goto userptr_fail_map; return buf; @@ -282,13 +297,10 @@ userptr_fail_pfnvec: static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; dprintk(1, "%s: Releasing userspace buffer of %d pages\n", __func__, buf->num_pages); - dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, - DMA_ATTR_SKIP_CPU_SYNC); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); @@ -546,6 +558,7 @@ static int vb2_dma_sg_map_dmabuf(void *mem_priv) { struct vb2_dma_sg_buf *buf = mem_priv; struct sg_table *sgt; + int ioprot = IOMMU_READ | IOMMU_WRITE; if (WARN_ON(!buf->db_attach)) { pr_err("trying to pin a non attached buffer\n"); @@ -564,6 +577,20 @@ static int vb2_dma_sg_map_dmabuf(void *mem_priv) return -EINVAL; } + if ((buf->iova == 0) || IS_ERR_VALUE(buf->iova)) { + if (device_get_dma_attr(buf->dev) == DEV_DMA_COHERENT) + ioprot |= IOMMU_CACHE; + + buf->iova = iovmm_map(buf->dev, sgt->sgl, 0, buf->size, + DMA_BIDIRECTIONAL, ioprot); + if (IS_ERR_VALUE(buf->iova)) { + dma_buf_unmap_attachment(buf->db_attach, + sgt, buf->dma_dir); + pr_err("Error from iovmm_map()=%pad\n", &buf->iova); + return (int)buf->iova; + } + } + buf->dma_sgt = sgt; buf->vaddr = NULL; @@ -602,6 +629,8 @@ static void vb2_dma_sg_detach_dmabuf(void *mem_priv) if (WARN_ON(buf->dma_sgt)) vb2_dma_sg_unmap_dmabuf(buf); + iovmm_unmap(buf->dev, buf->iova); + /* detach this attachment */ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); kfree(buf); @@ -632,6 +661,7 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf, return dba; } + buf->iova = 0; buf->dma_dir = dma_dir; buf->size = size; buf->db_attach = dba; @@ -646,6 +676,14 @@ static void *vb2_dma_sg_cookie(void *buf_priv) return buf->dma_sgt; } +dma_addr_t vb2_dma_sg_plane_dma_addr(struct vb2_buffer *vb, + unsigned int plane_no) +{ + struct vb2_dma_sg_buf *buf = vb->planes[plane_no].mem_priv; + + return buf->iova; +} + const struct vb2_mem_ops vb2_dma_sg_memops = { .alloc = vb2_dma_sg_alloc, .put = vb2_dma_sg_put, diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h index 52afa0e2bb17..8175c7204ae5 100644 --- a/include/media/videobuf2-dma-sg.h +++ b/include/media/videobuf2-dma-sg.h @@ -21,6 +21,9 @@ static inline struct sg_table *vb2_dma_sg_plane_desc( return (struct sg_table *)vb2_plane_cookie(vb, plane_no); } +dma_addr_t vb2_dma_sg_plane_dma_addr(struct vb2_buffer *vb, + unsigned int plane_no); + extern const struct vb2_mem_ops vb2_dma_sg_memops; #endif