drm/udl: add support to export a handle to a FD on UDL.
authorHaixia Shi <hshi@chromium.org>
Thu, 13 Nov 2014 02:33:53 +0000 (18:33 -0800)
committerDave Airlie <airlied@redhat.com>
Thu, 20 Nov 2014 01:41:37 +0000 (11:41 +1000)
Only importing an FD to a handle is currently supported on UDL,
but the exporting functionality is equally useful.

Signed-off-by: Haixia Shi <hshi@chromium.org>
Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/udl/Makefile
drivers/gpu/drm/udl/udl_dmabuf.c [new file with mode: 0644]
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_gem.c

index 05c7481bfd40d3ff8b49e7ffd6676d5a37eae600..195bcac0b6c8d9e429964d5e942ad1b7a2d9d74d 100644 (file)
@@ -1,6 +1,6 @@
 
 ccflags-y := -Iinclude/drm
 
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
 
 obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
new file mode 100644 (file)
index 0000000..1d85c3a
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * udl_dmabuf.c
+ *
+ * Copyright (c) 2014 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+#include "udl_drv.h"
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+
+struct udl_drm_dmabuf_attachment {
+       struct sg_table sgt;
+       enum dma_data_direction dir;
+       bool is_mapped;
+};
+
+static int udl_attach_dma_buf(struct dma_buf *dmabuf,
+                             struct device *dev,
+                             struct dma_buf_attachment *attach)
+{
+       struct udl_drm_dmabuf_attachment *udl_attach;
+
+       DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+                       attach->dmabuf->size);
+
+       udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
+       if (!udl_attach)
+               return -ENOMEM;
+
+       udl_attach->dir = DMA_NONE;
+       attach->priv = udl_attach;
+
+       return 0;
+}
+
+static void udl_detach_dma_buf(struct dma_buf *dmabuf,
+                              struct dma_buf_attachment *attach)
+{
+       struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+       struct sg_table *sgt;
+
+       if (!udl_attach)
+               return;
+
+       DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+                       attach->dmabuf->size);
+
+       sgt = &udl_attach->sgt;
+
+       if (udl_attach->dir != DMA_NONE)
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+                               udl_attach->dir);
+
+       sg_free_table(sgt);
+       kfree(udl_attach);
+       attach->priv = NULL;
+}
+
+static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
+                                       enum dma_data_direction dir)
+{
+       struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+       struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
+       struct drm_device *dev = obj->base.dev;
+       struct scatterlist *rd, *wr;
+       struct sg_table *sgt = NULL;
+       unsigned int i;
+       int page_count;
+       int nents, ret;
+
+       DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
+                       attach->dmabuf->size, dir);
+
+       /* just return current sgt if already requested. */
+       if (udl_attach->dir == dir && udl_attach->is_mapped)
+               return &udl_attach->sgt;
+
+       if (!obj->pages) {
+               DRM_ERROR("pages is null.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       page_count = obj->base.size / PAGE_SIZE;
+       obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
+       if (!obj->sg) {
+               DRM_ERROR("sg is null.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       sgt = &udl_attach->sgt;
+
+       ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
+       if (ret) {
+               DRM_ERROR("failed to alloc sgt.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
+       rd = obj->sg->sgl;
+       wr = sgt->sgl;
+       for (i = 0; i < sgt->orig_nents; ++i) {
+               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+               rd = sg_next(rd);
+               wr = sg_next(wr);
+       }
+
+       if (dir != DMA_NONE) {
+               nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+               if (!nents) {
+                       DRM_ERROR("failed to map sgl with iommu.\n");
+                       sg_free_table(sgt);
+                       sgt = ERR_PTR(-EIO);
+                       goto err_unlock;
+               }
+       }
+
+       udl_attach->is_mapped = true;
+       udl_attach->dir = dir;
+       attach->priv = udl_attach;
+
+err_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return sgt;
+}
+
+static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
+                             struct sg_table *sgt,
+                             enum dma_data_direction dir)
+{
+       /* Nothing to do. */
+       DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
+                       attach->dmabuf->size, dir);
+}
+
+static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+       /* TODO */
+
+       return NULL;
+}
+
+static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+                                   unsigned long page_num)
+{
+       /* TODO */
+
+       return NULL;
+}
+
+static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
+                             unsigned long page_num, void *addr)
+{
+       /* TODO */
+}
+
+static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+                                    unsigned long page_num,
+                                    void *addr)
+{
+       /* TODO */
+}
+
+static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
+                          struct vm_area_struct *vma)
+{
+       /* TODO */
+
+       return -EINVAL;
+}
+
+static struct dma_buf_ops udl_dmabuf_ops = {
+       .attach                 = udl_attach_dma_buf,
+       .detach                 = udl_detach_dma_buf,
+       .map_dma_buf            = udl_map_dma_buf,
+       .unmap_dma_buf          = udl_unmap_dma_buf,
+       .kmap                   = udl_dmabuf_kmap,
+       .kmap_atomic            = udl_dmabuf_kmap_atomic,
+       .kunmap                 = udl_dmabuf_kunmap,
+       .kunmap_atomic          = udl_dmabuf_kunmap_atomic,
+       .mmap                   = udl_dmabuf_mmap,
+       .release                = drm_gem_dmabuf_release,
+};
+
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+                                    struct drm_gem_object *obj, int flags)
+{
+       return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
+}
+
+static int udl_prime_create(struct drm_device *dev,
+                           size_t size,
+                           struct sg_table *sg,
+                           struct udl_gem_object **obj_p)
+{
+       struct udl_gem_object *obj;
+       int npages;
+
+       npages = size / PAGE_SIZE;
+
+       *obj_p = NULL;
+       obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+       if (!obj)
+               return -ENOMEM;
+
+       obj->sg = sg;
+       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (obj->pages == NULL) {
+               DRM_ERROR("obj pages is NULL %d\n", npages);
+               return -ENOMEM;
+       }
+
+       drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+       *obj_p = obj;
+       return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+                               struct dma_buf *dma_buf)
+{
+       struct dma_buf_attachment *attach;
+       struct sg_table *sg;
+       struct udl_gem_object *uobj;
+       int ret;
+
+       /* need to attach */
+       get_device(dev->dev);
+       attach = dma_buf_attach(dma_buf, dev->dev);
+       if (IS_ERR(attach)) {
+               put_device(dev->dev);
+               return ERR_CAST(attach);
+       }
+
+       get_dma_buf(dma_buf);
+
+       sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(sg)) {
+               ret = PTR_ERR(sg);
+               goto fail_detach;
+       }
+
+       ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+       if (ret)
+               goto fail_unmap;
+
+       uobj->base.import_attach = attach;
+       uobj->flags = UDL_BO_WC;
+
+       return &uobj->base;
+
+fail_unmap:
+       dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+       dma_buf_detach(dma_buf, attach);
+       dma_buf_put(dma_buf);
+       put_device(dev->dev);
+       return ERR_PTR(ret);
+}
index 8607e9e513db0d94129ea524b9a4abe5dd9653af..d5728ec8525443246aec49b508db2827375242bd 100644 (file)
@@ -51,7 +51,9 @@ static struct drm_driver driver = {
        .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &udl_driver_fops,
 
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+       .gem_prime_export = udl_gem_prime_export,
        .gem_prime_import = udl_gem_prime_import,
 
        .name = DRIVER_NAME,
index 308278086c7280090bd7083ac7e7bbb34fe9ec71..1b132d779621d452bd319bf41770dc93f83ed1e2 100644 (file)
@@ -124,6 +124,8 @@ int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
 void udl_gem_free_object(struct drm_gem_object *gem_obj);
 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
                                            size_t size);
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+                                    struct drm_gem_object *obj, int flags);
 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
                                struct dma_buf *dma_buf);
 
index e00459d38dcbf2370341b1355e343d56fc4437ad..692d6f21f1bc17ffd6114a8d79db7af49199f701 100644 (file)
@@ -240,74 +240,3 @@ unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
-
-static int udl_prime_create(struct drm_device *dev,
-                           size_t size,
-                           struct sg_table *sg,
-                           struct udl_gem_object **obj_p)
-{
-       struct udl_gem_object *obj;
-       int npages;
-
-       npages = size / PAGE_SIZE;
-
-       *obj_p = NULL;
-       obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
-       if (!obj)
-               return -ENOMEM;
-
-       obj->sg = sg;
-       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
-       if (obj->pages == NULL) {
-               DRM_ERROR("obj pages is NULL %d\n", npages);
-               return -ENOMEM;
-       }
-
-       drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
-       *obj_p = obj;
-       return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
-                               struct dma_buf *dma_buf)
-{
-       struct dma_buf_attachment *attach;
-       struct sg_table *sg;
-       struct udl_gem_object *uobj;
-       int ret;
-
-       /* need to attach */
-       get_device(dev->dev);
-       attach = dma_buf_attach(dma_buf, dev->dev);
-       if (IS_ERR(attach)) {
-               put_device(dev->dev);
-               return ERR_CAST(attach);
-       }
-
-       get_dma_buf(dma_buf);
-
-       sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-       if (IS_ERR(sg)) {
-               ret = PTR_ERR(sg);
-               goto fail_detach;
-       }
-
-       ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
-       if (ret) {
-               goto fail_unmap;
-       }
-
-       uobj->base.import_attach = attach;
-       uobj->flags = UDL_BO_WC;
-
-       return &uobj->base;
-
-fail_unmap:
-       dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
-       dma_buf_detach(dma_buf, attach);
-       dma_buf_put(dma_buf);
-       put_device(dev->dev);
-       return ERR_PTR(ret);
-}