i915: wait for fence in mmio_flip_work_func
authorAlex Goins <agoins@nvidia.com>
Thu, 26 Nov 2015 02:43:38 +0000 (18:43 -0800)
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Thu, 3 Dec 2015 15:10:11 +0000 (16:10 +0100)
If a buffer is backed by dmabuf, wait on its reservation object's exclusive
fence before flipping.

v2: First commit
v3: Remove object_name_lock acquire
v4: Move wait ahead of mark_page_flip_active
    Use crtc->primary->fb to get GEM object instead of pending_flip_obj
    use_mmio_flip() return true when exclusive fence is attached
    Wait only on exclusive fences, interruptible with no timeout
v5: Move wait from do_mmio_flip to mmio_flip_work_func
    Style tweaks to more closely match rest of file
v6: Change back to unintteruptible wait to match __i915_wait_request due to
    inability to properly handle interrupted wait.
    Warn on error code from waiting.
v7: No change
v8: Test for !reservation_object_signaled_rcu(test_all=FALSE) instead of
    obj->base.dma_buf->resv->fence_excl

Link: https://patchwork.kernel.org/patch/7704181/
Signed-off-by: Alex Goins <agoins@nvidia.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
drivers/gpu/drm/i915/intel_display.c

index 602e2be1c3d8425eaa5474f3890138e4ff76c1b7..0ad20eccbf62c05cc4e22019f9843a4aab9b6fa5 100644 (file)
@@ -44,6 +44,8 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_rect.h>
 #include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/dma-buf.h>
 
 /* Primary plane formats for gen <= 3 */
 static const uint32_t i8xx_primary_formats[] = {
@@ -11204,6 +11206,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
                return true;
        else if (i915.enable_execlists)
                return true;
+       else if (obj->base.dma_buf &&
+                !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
+                                                      false))
+               return true;
        else
                return ring != i915_gem_request_get_ring(obj->last_write_req);
 }
@@ -11318,6 +11324,9 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
 {
        struct intel_mmio_flip *mmio_flip =
                container_of(work, struct intel_mmio_flip, work);
+       struct intel_framebuffer *intel_fb =
+               to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
 
        if (mmio_flip->req) {
                WARN_ON(__i915_wait_request(mmio_flip->req,
@@ -11327,6 +11336,12 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
                i915_gem_request_unreference__unlocked(mmio_flip->req);
        }
 
+       /* For framebuffer backed by dmabuf, wait for fence */
+       if (obj->base.dma_buf)
+               WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
+                                                           false, false,
+                                                           MAX_SCHEDULE_TIMEOUT) < 0);
+
        intel_do_mmio_flip(mmio_flip);
        kfree(mmio_flip);
 }