drm/i915: Make mmio flip wait for seqno in the work function
authorAnder Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
Thu, 6 Nov 2014 09:03:40 +0000 (11:03 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 7 Nov 2014 17:42:20 +0000 (18:42 +0100)
This simplifies the code quite a bit compared to iterating over all
rings during the ring interrupt.

Also, it allows us to drop the mmio_flip spinlock, since the mmio_flip
struct is only accessed in two places. The first is when the flip is
queued and the other when the mmio writes are done. Since a flip cannot
be queued while there is a pending flip, the two paths shouldn't ever
run in parallel. We might need to revisit that if support for replacing
flips is implemented though.

v2: Don't hold dev->struct_mutext while waiting (Chris)

v3: Make the wait uninterruptable (Chris)

Signed-off-by: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h

index 318a6a0724d041c5a6d80fda6b6faf1724595f68..5fff2870a17b1e35ec9e47871323de43ff5dc648 100644 (file)
@@ -979,9 +979,6 @@ static void notify_ring(struct drm_device *dev,
 
        trace_i915_gem_request_complete(ring);
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               intel_notify_mmio_flip(ring);
-
        wake_up_all(&ring->irq_queue);
        i915_queue_hangcheck(dev);
 }
index 18ad2bc8550b241624e611a68ab22f8ee6622410..cb0f32fd3b5c4c23ee67828a3e1ba7da7109fbdc 100644 (file)
@@ -9417,73 +9417,24 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
 
        if (atomic_update)
                intel_pipe_update_end(intel_crtc, start_vbl_count);
-
-       spin_lock_irq(&dev_priv->mmio_flip_lock);
-       intel_crtc->mmio_flip.status = INTEL_MMIO_FLIP_IDLE;
-       spin_unlock_irq(&dev_priv->mmio_flip_lock);
 }
 
 static void intel_mmio_flip_work_func(struct work_struct *work)
 {
        struct intel_crtc *intel_crtc =
                container_of(work, struct intel_crtc, mmio_flip.work);
-
-       intel_do_mmio_flip(intel_crtc);
-}
-
-static int intel_postpone_flip(struct drm_i915_gem_object *obj)
-{
        struct intel_engine_cs *ring;
-       int ret;
-
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-       if (!obj->last_write_seqno)
-               return 0;
-
-       ring = obj->ring;
-
-       if (i915_seqno_passed(ring->get_seqno(ring, true),
-                             obj->last_write_seqno))
-               return 0;
-
-       ret = i915_gem_check_olr(ring, obj->last_write_seqno);
-       if (ret)
-               return ret;
-
-       if (WARN_ON(!ring->irq_get(ring)))
-               return 0;
-
-       return 1;
-}
+       uint32_t seqno;
 
-void intel_notify_mmio_flip(struct intel_engine_cs *ring)
-{
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
-       struct intel_crtc *intel_crtc;
-       unsigned long irq_flags;
-       u32 seqno;
-
-       seqno = ring->get_seqno(ring, false);
+       seqno = intel_crtc->mmio_flip.seqno;
+       ring = intel_crtc->mmio_flip.ring;
 
-       spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
-       for_each_intel_crtc(ring->dev, intel_crtc) {
-               struct intel_mmio_flip *mmio_flip;
+       if (seqno)
+               WARN_ON(__i915_wait_seqno(ring, seqno,
+                                         intel_crtc->reset_counter,
+                                         false, NULL, NULL) != 0);
 
-               mmio_flip = &intel_crtc->mmio_flip;
-               if (mmio_flip->status != INTEL_MMIO_FLIP_WAIT_RING)
-                       continue;
-
-               if (ring->id != mmio_flip->ring_id)
-                       continue;
-
-               if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
-                       schedule_work(&intel_crtc->mmio_flip.work);
-                       mmio_flip->status = INTEL_MMIO_FLIP_WORK_SCHEDULED;
-                       ring->irq_put(ring);
-               }
-       }
-       spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+       intel_do_mmio_flip(intel_crtc);
 }
 
 static int intel_queue_mmio_flip(struct drm_device *dev,
@@ -9493,32 +9444,13 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
                                 struct intel_engine_cs *ring,
                                 uint32_t flags)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int ret;
-
-       if (WARN_ON(intel_crtc->mmio_flip.status != INTEL_MMIO_FLIP_IDLE))
-               return -EBUSY;
-
-       ret = intel_postpone_flip(obj);
-       if (ret < 0)
-               return ret;
-       if (ret == 0) {
-               intel_do_mmio_flip(intel_crtc);
-               return 0;
-       }
 
-       spin_lock_irq(&dev_priv->mmio_flip_lock);
-       intel_crtc->mmio_flip.status = INTEL_MMIO_FLIP_WAIT_RING;
        intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
-       intel_crtc->mmio_flip.ring_id = obj->ring->id;
-       spin_unlock_irq(&dev_priv->mmio_flip_lock);
+       intel_crtc->mmio_flip.ring = obj->ring;
+
+       schedule_work(&intel_crtc->mmio_flip.work);
 
-       /*
-        * Double check to catch cases where irq fired before
-        * mmio flip data was ready
-        */
-       intel_notify_mmio_flip(obj->ring);
        return 0;
 }
 
index cb0e9dbf1c96152bbea910d0a67601199cad54d6..5c622ad2e9aa3c31a8c7245bbd30a46824772096 100644 (file)
@@ -399,16 +399,9 @@ struct intel_pipe_wm {
        bool sprites_scaled;
 };
 
-enum intel_mmio_flip_status {
-       INTEL_MMIO_FLIP_IDLE = 0,
-       INTEL_MMIO_FLIP_WAIT_RING,
-       INTEL_MMIO_FLIP_WORK_SCHEDULED,
-};
-
 struct intel_mmio_flip {
        u32 seqno;
-       u32 ring_id;
-       enum intel_mmio_flip_status status;
+       struct intel_engine_cs *ring;
        struct work_struct work;
 };