drm/i915: Export our request as a dma-buf fence on the reservation object
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 4 Aug 2016 15:32:42 +0000 (16:32 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 4 Aug 2016 19:20:06 +0000 (20:20 +0100)
If the GEM objects being rendered with in this request have been
exported via dma-buf to a third party, hook ourselves into the dma-buf
reservation object so that the third party can serialise with our
rendering via the dma-buf fences.

Testcase: igt/prime_busy
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-26-git-send-email-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c

index 3a00ab3ad06e54f37a79d8a95b1bb3244042f541..c60a8d5bbad04466ba5644a7147317b68e52f597 100644 (file)
  * Authors:
  *     Dave Airlie <airlied@redhat.com>
  */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
 #include <drm/drmP.h>
+
 #include "i915_drv.h"
-#include <linux/dma-buf.h>
 
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
@@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
        .end_cpu_access = i915_gem_end_cpu_access,
 };
 
+static void export_fences(struct drm_i915_gem_object *obj,
+                         struct dma_buf *dma_buf)
+{
+       struct reservation_object *resv = dma_buf->resv;
+       struct drm_i915_gem_request *req;
+       unsigned long active;
+       int idx;
+
+       active = __I915_BO_ACTIVE(obj);
+       if (!active)
+               return;
+
+       /* Serialise with execbuf to prevent concurrent fence-loops */
+       mutex_lock(&obj->base.dev->struct_mutex);
+
+       /* Mark the object for future fences before racily adding old fences */
+       obj->base.dma_buf = dma_buf;
+
+       ww_mutex_lock(&resv->lock, NULL);
+
+       for_each_active(active, idx) {
+               req = i915_gem_active_get(&obj->last_read[idx],
+                                         &obj->base.dev->struct_mutex);
+               if (!req)
+                       continue;
+
+               if (reservation_object_reserve_shared(resv) == 0)
+                       reservation_object_add_shared_fence(resv, &req->fence);
+
+               i915_gem_request_put(req);
+       }
+
+       req = i915_gem_active_get(&obj->last_write,
+                                 &obj->base.dev->struct_mutex);
+       if (req) {
+               reservation_object_add_excl_fence(resv, &req->fence);
+               i915_gem_request_put(req);
+       }
+
+       ww_mutex_unlock(&resv->lock);
+       mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                      struct drm_gem_object *gem_obj, int flags)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+       struct dma_buf *dma_buf;
 
        exp_info.ops = &i915_dmabuf_ops;
        exp_info.size = gem_obj->size;
        exp_info.flags = flags;
        exp_info.priv = gem_obj;
 
-
        if (obj->ops->dmabuf_export) {
                int ret = obj->ops->dmabuf_export(obj);
                if (ret)
                        return ERR_PTR(ret);
        }
 
-       return dma_buf_export(&exp_info);
+       dma_buf = dma_buf_export(&exp_info);
+       if (IS_ERR(dma_buf))
+               return dma_buf;
+
+       export_fences(obj, dma_buf);
+       return dma_buf;
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
index a1da3028a949a26533fc2c5874b9452a896ee533..71834741bd87e08f5ab4ac309b43c37160f89667 100644 (file)
  *
  */
 
+#include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/uaccess.h>
+
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
+
 #include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
-#include <linux/dma_remapping.h>
-#include <linux/uaccess.h>
 
 #define  __EXEC_OBJECT_HAS_PIN         (1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE       (1<<30)
@@ -1205,6 +1209,28 @@ void i915_vma_move_to_active(struct i915_vma *vma,
        list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
+static void eb_export_fence(struct drm_i915_gem_object *obj,
+                           struct drm_i915_gem_request *req,
+                           unsigned int flags)
+{
+       struct reservation_object *resv;
+
+       resv = i915_gem_object_get_dmabuf_resv(obj);
+       if (!resv)
+               return;
+
+       /* Ignore errors from failing to allocate the new fence, we can't
+        * handle an error right now. Worst case should be missed
+        * synchronisation leading to rendering corruption.
+        */
+       ww_mutex_lock(&resv->lock, NULL);
+       if (flags & EXEC_OBJECT_WRITE)
+               reservation_object_add_excl_fence(resv, &req->fence);
+       else if (reservation_object_reserve_shared(resv) == 0)
+               reservation_object_add_shared_fence(resv, &req->fence);
+       ww_mutex_unlock(&resv->lock);
+}
+
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct drm_i915_gem_request *req)
@@ -1224,6 +1250,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                obj->base.read_domains = obj->base.pending_read_domains;
 
                i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
+               eb_export_fence(obj, req, vma->exec_entry->flags);
                trace_i915_gem_object_change_domain(obj, old_read, old_write);
        }
 }