From: Chris Wilson Date: Mon, 11 Jul 2016 13:08:07 +0000 (+0100) Subject: drm/vgem: Enable dmabuf interface for export X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e6f15b763ab2;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git drm/vgem: Enable dmabuf interface for export Enable the standard GEM dma-buf interface provided by the DRM core, but only for exporting the VGEM object. This allows passing around the VGEM objects created from the dumb interface and using them as sources elsewhere. Creating a VGEM object for a foriegn handle is not supported. v2: With additional completeness. v3: Need to clear the CPU cache upon exporting the dma-addresses. v4: Use drm_gem_put_pages() as well. v5: Use drm_prime_pages_to_sg() Testcase: igt/vgem_basic/dmabuf-* Testcase: igt/prime_vgem Signed-off-by: Chris Wilson Cc: Sean Paul Cc: Zach Reizner Acked-by: Zach Reizner Reviewed-by: Matthew Auld Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1468242488-1505-3-git-send-email-chris@chris-wilson.co.uk --- diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index c161b6d7e427..b5fb968d2d5c 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -192,14 +192,101 @@ static const struct file_operations vgem_driver_fops = { .release = drm_release, }; +static int vgem_prime_pin(struct drm_gem_object *obj) +{ + long n_pages = obj->size >> PAGE_SHIFT; + struct page **pages; + + /* Flush the object from the CPU cache so that importers can rely + * on coherent indirect access via the exported dma-address. + */ + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + drm_clflush_pages(pages, n_pages); + drm_gem_put_pages(obj, pages, true, false); + + return 0; +} + +static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct sg_table *st; + struct page **pages; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return ERR_CAST(pages); + + st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT); + drm_gem_put_pages(obj, pages, false, false); + + return st; +} + +static void *vgem_prime_vmap(struct drm_gem_object *obj) +{ + long n_pages = obj->size >> PAGE_SHIFT; + struct page **pages; + void *addr; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return NULL; + + addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL_IO)); + drm_gem_put_pages(obj, pages, false, false); + + return addr; +} + +static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + vunmap(vaddr); +} + +static int vgem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int ret; + + if (obj->size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (!obj->filp) + return -ENODEV; + + ret = obj->filp->f_op->mmap(obj->filp, vma); + if (ret) + return ret; + + fput(vma->vm_file); + vma->vm_file = get_file(obj->filp); + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + + return 0; +} + static struct drm_driver vgem_driver = { - .driver_features = DRIVER_GEM, + .driver_features = DRIVER_GEM | DRIVER_PRIME, .gem_free_object_unlocked = vgem_gem_free_object, .gem_vm_ops = &vgem_gem_vm_ops, .ioctls = vgem_ioctls, .fops = &vgem_driver_fops, + .dumb_create = vgem_gem_dumb_create, .dumb_map_offset = vgem_gem_dumb_map, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .gem_prime_pin = vgem_prime_pin, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = vgem_prime_get_sg_table, + .gem_prime_vmap = vgem_prime_vmap, + .gem_prime_vunmap = vgem_prime_vunmap, + .gem_prime_mmap = vgem_prime_mmap, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE,