drm/i915: Create a more generic pm handler for hsw+
authorBen Widawsky <ben@bwidawsk.net>
Wed, 29 May 2013 02:22:24 +0000 (19:22 -0700)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 31 May 2013 18:54:15 +0000 (20:54 +0200)
HSW has some special requirements for the VEBOX. Splitting out the
interrupt handler will make the code a bit nicer and less error prone
when we begin to handle those.

The slight functional change in this patch (queueing work while holding
the spinlock) is intentional as it makes a subsequent patch a bit nicer.
The change should also only effect HSW platforms.

Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_irq.c

index 338e726f4ef53ec731f517d5000ca3018a4f9d12..0f052f90f39a97f25f74741e89675a98989b17d5 100644 (file)
@@ -842,6 +842,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
                ivybridge_handle_parity_error(dev);
 }
 
+/* Legacy way of handling PM interrupts */
 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
                                u32 pm_iir)
 {
@@ -921,6 +922,31 @@ static void dp_aux_irq_handler(struct drm_device *dev)
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
+/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+ * we must be able to deal with other PM interrupts. This is complicated because
+ * of the way in which we use the masks to defer the RPS work (which for
+ * posterity is necessary because of forcewake).
+ */
+static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
+                              u32 pm_iir)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->rps.lock, flags);
+       dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_DEFERRED_EVENTS;
+       if (dev_priv->rps.pm_iir) {
+               I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+               /* never want to mask useful interrupts. (also posting read) */
+               WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_DEFERRED_EVENTS);
+               /* TODO: if queue_work is slow, move it out of the spinlock */
+               queue_work(dev_priv->wq, &dev_priv->rps.work);
+       }
+       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+       if (pm_iir & ~GEN6_PM_DEFERRED_EVENTS)
+               DRM_ERROR("Unexpected PM interrupted\n");
+}
+
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
@@ -1231,7 +1257,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
        pm_iir = I915_READ(GEN6_PMIIR);
        if (pm_iir) {
-               if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+               if (IS_HASWELL(dev))
+                       hsw_pm_irq_handler(dev_priv, pm_iir);
+               else if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
                        gen6_queue_rps_work(dev_priv, pm_iir);
                I915_WRITE(GEN6_PMIIR, pm_iir);
                ret = IRQ_HANDLED;