int offset;
enum dma_data_direction dma_dir;
struct sg_table sg_table;
+ /*
+ * This will point to sg_table when used with the MMAP or USERPTR
+ * memory model, and to the dma_buf sglist when used with the
+ * DMABUF memory model.
+ */
+ struct sg_table *dma_sgt;
size_t size;
unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
struct vm_area_struct *vma;
+
+ struct dma_buf_attachment *db_attach;
};
static void vb2_dma_sg_put(void *buf_priv);
buf->size = size;
/* size is already page aligned */
buf->num_pages = size >> PAGE_SHIFT;
+ buf->dma_sgt = &buf->sg_table;
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (ret)
goto fail_pages_alloc;
- ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, 0, size, GFP_KERNEL);
if (ret)
goto fail_table_alloc;
fail_map:
put_device(buf->dev);
- sg_free_table(sgt);
+ sg_free_table(buf->dma_sgt);
fail_table_alloc:
num_pages = buf->num_pages;
while (num_pages--)
dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
- sg_free_table(&buf->sg_table);
+ sg_free_table(buf->dma_sgt);
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
static void vb2_dma_sg_prepare(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- struct sg_table *sgt = &buf->sg_table;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (buf->db_attach)
+ return;
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}
static void vb2_dma_sg_finish(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- struct sg_table *sgt = &buf->sg_table;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (buf->db_attach)
+ return;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}
buf->dma_dir = dma_dir;
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
+ buf->dma_sgt = &buf->sg_table;
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
- if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, buf->offset, size, 0))
goto userptr_fail_alloc_table_from_pages;
dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
- sg_free_table(&buf->sg_table);
+ sg_free_table(buf->dma_sgt);
while (--i >= 0) {
if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]);
BUG_ON(!buf);
- if (!buf->vaddr)
- buf->vaddr = vm_map_ram(buf->pages,
- buf->num_pages,
- -1,
- PAGE_KERNEL);
+ if (!buf->vaddr) {
+ if (buf->db_attach)
+ buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+ else
+ buf->vaddr = vm_map_ram(buf->pages,
+ buf->num_pages, -1, PAGE_KERNEL);
+ }
/* add offset in case userptr is not page-aligned */
- return buf->vaddr + buf->offset;
+ return buf->vaddr ? buf->vaddr + buf->offset : NULL;
}
static unsigned int vb2_dma_sg_num_users(void *buf_priv)
return 0;
}
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dma_sg_map_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+ struct sg_table *sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ buf->dma_sgt = sgt;
+ buf->vaddr = NULL;
+
+ return 0;
+}
+
+static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
+ return;
+ }
+
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ if (buf->vaddr) {
+ dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ buf->vaddr = NULL;
+ }
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_sgt))
+ vb2_dma_sg_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
+ kfree(buf);
+}
+
+static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, enum dma_data_direction dma_dir)
+{
+ struct vb2_dma_sg_conf *conf = alloc_ctx;
+ struct vb2_dma_sg_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = dma_dir;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
static void *vb2_dma_sg_cookie(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- return &buf->sg_table;
+ return buf->dma_sgt;
}
const struct vb2_mem_ops vb2_dma_sg_memops = {
.vaddr = vb2_dma_sg_vaddr,
.mmap = vb2_dma_sg_mmap,
.num_users = vb2_dma_sg_num_users,
+ .map_dmabuf = vb2_dma_sg_map_dmabuf,
+ .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
+ .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
+ .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
.cookie = vb2_dma_sg_cookie,
};
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);