#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/exynos_iovmm.h>
+
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-memops.h>
#include <media/videobuf2-dma-sg.h>
struct vb2_vmarea_handler handler;
struct dma_buf_attachment *db_attach;
+ /*
+ * Our IO address space is not managed by dma-mapping. Therefore
+ * scatterlist.dma_address should not be corrupted by the IO address
+ * returned by iovmm_map() because it is used by cache maintenance.
+ */
+ dma_addr_t iova;
};
static void vb2_dma_sg_put(void *buf_priv);
struct sg_table *sgt;
int ret;
int num_pages;
+ int ioprot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(!dev))
return ERR_PTR(-EINVAL);
buf->dev = get_device(dev);
sgt = &buf->sg_table;
- /*
- * No need to sync to the device, this will happen later when the
- * prepare() memop is called.
- */
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (!sgt->nents)
+
+ if (device_get_dma_attr(dev) == DEV_DMA_COHERENT)
+ ioprot |= IOMMU_CACHE;
+
+ buf->iova = iovmm_map(buf->dev, sgt->sgl, 0, size,
+ DMA_BIDIRECTIONAL, ioprot);
+ if (IS_ERR_VALUE(buf->iova))
goto fail_map;
buf->handler.refcount = &buf->refcount;
static void vb2_dma_sg_put(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
if (refcount_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ iovmm_unmap(buf->dev, buf->iova);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
struct frame_vector *vec;
+ struct scatterlist *s;
+ int i;
+ int ioprot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(!dev))
return ERR_PTR(-EINVAL);
goto userptr_fail_sgtable;
sgt = &buf->sg_table;
- /*
- * No need to sync to the device, this will happen later when the
- * prepare() memop is called.
- */
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (!sgt->nents)
+
+ /* Just fixup of scatter-gather list not initialized by dma-mapping. */
+ sgt->nents = sgt->orig_nents;
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ s->dma_address = sg_phys(s);
+
+ if (device_get_dma_attr(dev) == DEV_DMA_COHERENT)
+ ioprot |= IOMMU_CACHE;
+
+ buf->iova = iovmm_map(buf->dev, sgt->sgl, 0, size,
+ DMA_BIDIRECTIONAL, ioprot);
+ if (IS_ERR_VALUE(buf->iova))
goto userptr_fail_map;
return buf;
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
- DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
{
struct vb2_dma_sg_buf *buf = mem_priv;
struct sg_table *sgt;
+ int ioprot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(!buf->db_attach)) {
pr_err("trying to pin a non attached buffer\n");
return -EINVAL;
}
+ if ((buf->iova == 0) || IS_ERR_VALUE(buf->iova)) {
+ if (device_get_dma_attr(buf->dev) == DEV_DMA_COHERENT)
+ ioprot |= IOMMU_CACHE;
+
+ buf->iova = iovmm_map(buf->dev, sgt->sgl, 0, buf->size,
+ DMA_BIDIRECTIONAL, ioprot);
+ if (IS_ERR_VALUE(buf->iova)) {
+ dma_buf_unmap_attachment(buf->db_attach,
+ sgt, buf->dma_dir);
+ pr_err("Error from iovmm_map()=%pad\n", &buf->iova);
+ return (int)buf->iova;
+ }
+ }
+
buf->dma_sgt = sgt;
buf->vaddr = NULL;
if (WARN_ON(buf->dma_sgt))
vb2_dma_sg_unmap_dmabuf(buf);
+ iovmm_unmap(buf->dev, buf->iova);
+
/* detach this attachment */
dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
kfree(buf);
return dba;
}
+ buf->iova = 0;
buf->dma_dir = dma_dir;
buf->size = size;
buf->db_attach = dba;
return buf->dma_sgt;
}
+dma_addr_t vb2_dma_sg_plane_dma_addr(struct vb2_buffer *vb,
+ unsigned int plane_no)
+{
+ struct vb2_dma_sg_buf *buf = vb->planes[plane_no].mem_priv;
+
+ return buf->iova;
+}
+
const struct vb2_mem_ops vb2_dma_sg_memops = {
.alloc = vb2_dma_sg_alloc,
.put = vb2_dma_sg_put,