* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
- buf->dma_dir, &attrs) == 0)
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
+ if (!sgt->nents)
goto fail_map;
buf->handler.refcount = &buf->refcount;
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
buf->dma_dir, &attrs);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
- buf->dma_dir, &attrs) == 0)
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
+ if (!sgt->nents)
goto userptr_fail_map;
+
return buf;
userptr_fail_map:
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
+ &attrs);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt;
- int ret;
mutex_lock(lock);
}
/* mapping to the client with new direction */
- ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
- if (ret <= 0) {
+ sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ dma_dir);
+ if (!sgt->nents) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);