drm/i915/fbc: wait for a vblank instead of 50ms when enabling
authorPaulo Zanoni <paulo.r.zanoni@intel.com>
Thu, 21 Jan 2016 20:03:05 +0000 (18:03 -0200)
committerPaulo Zanoni <paulo.r.zanoni@intel.com>
Fri, 29 Jan 2016 20:07:08 +0000 (18:07 -0200)
Instead of waiting for 50ms, just wait until the next vblank, since
it's the minimum requirement. The whole infrastructure of FBC is based
on vblanks, so waiting for X vblanks instead of X milliseconds sounds
like the correct way to go. Besides, 50ms may be less than a vblank on
super slow modes that may or may not exist.

There are some small improvements in PC state residency (due to the
fact that we're now using 16ms for the common modes instead of 50ms),
but the biggest advantage is still the correctness of being
vblank-based instead of time-based.

v2:
  - Rebase after changing the patch order.
  - Update the commit message.
v3:
  - Fix bogus vblank_get() instead of vblank_count() (Ville).
  - Don't forget to call drm_crtc_vblank_{get,put} (Chris, Ville)
  - Adjust the performance details on the commit message.
v4:
  - Don't grab the FBC mutex just to grab the vblank (Maarten)

Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1453406585-10233-1-git-send-email-paulo.r.zanoni@intel.com
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_fbc.c

index 905e90f2595756a1a4c56c6d068abfb3091d8c95..83b629b30c61283e17dfa371f016d9de0c73e901 100644 (file)
@@ -921,9 +921,9 @@ struct i915_fbc {
 
        struct intel_fbc_work {
                bool scheduled;
+               u32 scheduled_vblank;
                struct work_struct work;
                struct drm_framebuffer *fb;
-               unsigned long enable_jiffies;
        } work;
 
        const char *no_fbc_reason;
index a1988a486b9276954db36e8cb482db283a051887..3993b431b02a41e19cbaec3d64225ec748e606c7 100644 (file)
@@ -381,7 +381,17 @@ static void intel_fbc_work_fn(struct work_struct *__work)
                container_of(__work, struct drm_i915_private, fbc.work.work);
        struct intel_fbc_work *work = &dev_priv->fbc.work;
        struct intel_crtc *crtc = dev_priv->fbc.crtc;
-       int delay_ms = 50;
+       struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
+
+       if (drm_crtc_vblank_get(&crtc->base)) {
+               DRM_ERROR("vblank not available for FBC on pipe %c\n",
+                         pipe_name(crtc->pipe));
+
+               mutex_lock(&dev_priv->fbc.lock);
+               work->scheduled = false;
+               mutex_unlock(&dev_priv->fbc.lock);
+               return;
+       }
 
 retry:
        /* Delay the actual enabling to let pageflipping cease and the
@@ -390,14 +400,16 @@ retry:
         * vblank to pass after disabling the FBC before we attempt
         * to modify the control registers.
         *
-        * A more complicated solution would involve tracking vblanks
-        * following the termination of the page-flipping sequence
-        * and indeed performing the enable as a co-routine and not
-        * waiting synchronously upon the vblank.
-        *
         * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+        *
+        * It is also worth mentioning that since work->scheduled_vblank can be
+        * updated multiple times by the other threads, hitting the timeout is
+        * not an error condition. We'll just end up hitting the "goto retry"
+        * case below.
         */
-       wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
+       wait_event_timeout(vblank->queue,
+               drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
+               msecs_to_jiffies(50));
 
        mutex_lock(&dev_priv->fbc.lock);
 
@@ -406,8 +418,7 @@ retry:
                goto out;
 
        /* Were we delayed again while this function was sleeping? */
-       if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
-                      jiffies)) {
+       if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
                mutex_unlock(&dev_priv->fbc.lock);
                goto retry;
        }
@@ -419,6 +430,7 @@ retry:
 
 out:
        mutex_unlock(&dev_priv->fbc.lock);
+       drm_crtc_vblank_put(&crtc->base);
 }
 
 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
@@ -434,13 +446,20 @@ static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
 
        WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
+       if (drm_crtc_vblank_get(&crtc->base)) {
+               DRM_ERROR("vblank not available for FBC on pipe %c\n",
+                         pipe_name(crtc->pipe));
+               return;
+       }
+
        /* It is useless to call intel_fbc_cancel_work() in this function since
         * we're not releasing fbc.lock, so it won't have an opportunity to grab
         * it to discover that it was cancelled. So we just update the expected
         * jiffy count. */
        work->fb = crtc->base.primary->fb;
        work->scheduled = true;
-       work->enable_jiffies = jiffies;
+       work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
+       drm_crtc_vblank_put(&crtc->base);
 
        schedule_work(&work->work);
 }