drm/i915: Finish any pending operations on the framebuffer before disabling
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 3 Apr 2012 16:58:35 +0000 (17:58 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 9 Apr 2012 19:53:30 +0000 (21:53 +0200)
Similar to the case where we are changing from one framebuffer to
another, we need to be sure that there are no pending WAIT_FOR_EVENTs on
the pipe for the current framebuffer before switching. If we disable the
pipe, and then try to execute a WAIT_FOR_EVENT it will block
indefinitely and cause a GPU hang.

We attempted to fix this in commit 85345517fe6d4de27b0d6ca19fef9d28ac947c4a
(drm/i915: Retire any pending operations on the old scanout when switching)
for the case of mode switching, but this leaves the condition where we
are switching off the pipe vulnerable.

There still remains the race condition were a display may be unplugged,
switched off by the core, a uevent sent to notify the DDX and the DDX
may issue a WAIT_FOR_EVENT before it processes the uevent. This window
does not exist if the pipe is only switched off in response to the
uevent. Time to make sure that is so...

Reported-by: Francis Leblanc <Francis.Leblanc-Lebeau@verint.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=36515
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45413
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
[danvet: fixup spelling in comment, noticed by Eugeni.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/intel_display.c

index 91b35fd1db8c81ee06984de0aaae88824929cddd..f446e66cbdaf76cb1af5325e0a22e2dcf7cba9d6 100644 (file)
@@ -2244,6 +2244,33 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        return 0;
 }
 
+static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+       struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       bool was_interruptible = dev_priv->mm.interruptible;
+       int ret;
+
+       wait_event(dev_priv->pending_flip_queue,
+                  atomic_read(&dev_priv->mm.wedged) ||
+                  atomic_read(&obj->pending_flip) == 0);
+
+       /* Big Hammer, we also need to ensure that any pending
+        * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+        * current scanout is retired before unpinning the old
+        * framebuffer.
+        *
+        * This should only fail upon a hung GPU, in which case we
+        * can safely continue.
+        */
+       dev_priv->mm.interruptible = false;
+       ret = i915_gem_object_finish_gpu(obj);
+       dev_priv->mm.interruptible = was_interruptible;
+
+       return ret;
+}
+
 static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                    struct drm_framebuffer *old_fb)
@@ -2282,25 +2309,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
-       if (old_fb) {
-               struct drm_i915_private *dev_priv = dev->dev_private;
-               struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-
-               wait_event(dev_priv->pending_flip_queue,
-                          atomic_read(&dev_priv->mm.wedged) ||
-                          atomic_read(&obj->pending_flip) == 0);
-
-               /* Big Hammer, we also need to ensure that any pending
-                * MI_WAIT_FOR_EVENT inside a user batch buffer on the
-                * current scanout is retired before unpinning the old
-                * framebuffer.
-                *
-                * This should only fail upon a hung GPU, in which case we
-                * can safely continue.
-                */
-               ret = i915_gem_object_finish_gpu(obj);
-               (void) ret;
-       }
+       if (old_fb)
+               intel_finish_fb(old_fb);
 
        ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
                                         LEAVE_ATOMIC_MODE_SET);
@@ -3371,6 +3381,23 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
        struct drm_device *dev = crtc->dev;
 
+       /* Flush any pending WAITs before we disable the pipe. Note that
+        * we need to drop the struct_mutex in order to acquire it again
+        * during the lowlevel dpms routines around a couple of the
+        * operations. It does not look trivial nor desirable to move
+        * that locking higher. So instead we leave a window for the
+        * submission of further commands on the fb before we can actually
+        * disable it. This race with userspace exists anyway, and we can
+        * only rely on the pipe being disabled by userspace after it
+        * receives the hotplug notification and has flushed any pending
+        * batches.
+        */
+       if (crtc->fb) {
+               mutex_lock(&dev->struct_mutex);
+               intel_finish_fb(crtc->fb);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
        assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
        assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);