struct sg_table *sgt = buf->dma_sgt;
if (sgt) {
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+ /*
+ * No need to sync to CPU, it's already synced to the CPU
+ * since the finish() memop will have been called before this.
+ */
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
if (!vma_is_io(buf->vma))
vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
struct sg_table *sgt;
unsigned long contig_size;
unsigned long dma_align = dma_get_cache_alignment();
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/* Only cache aligned DMA transfers are reliable */
if (!IS_ALIGNED(vaddr | size, dma_align)) {
kfree(pages);
pages = NULL;
- sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir);
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
if (sgt->nents <= 0) {
pr_err("failed to map scatterlist\n");
ret = -EIO;
return buf;
fail_map_sg:
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
fail_sgt_init:
if (!vma_is_io(buf->vma))
struct sg_table *sgt;
int ret;
int num_pages;
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
if (WARN_ON(alloc_ctx == NULL))
return NULL;
buf->dev = get_device(conf->dev);
sgt = &buf->sg_table;
- if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs) == 0)
goto fail_map;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
int num_pages_from_user;
struct vm_area_struct *vma;
struct sg_table *sgt;
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
goto userptr_fail_alloc_table_from_pages;
sgt = &buf->sg_table;
- if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs) == 0)
goto userptr_fail_map;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
return buf;
userptr_fail_map:
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);