drm/i915: argument to control retiring behavior
authorBen Widawsky <ben@bwidawsk.net>
Wed, 25 Jan 2012 23:39:34 +0000 (15:39 -0800)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 26 Jan 2012 10:19:19 +0000 (11:19 +0100)
Sometimes it may be the case when we idle the gpu or wait on something
we don't actually want to process the retiring list. This patch allows
callers to choose the behavior.

Reviewed-by: Keith Packard <keithp@keithp.com>
Reviewed-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_overlay.c

index 8122738db91ec5b6ddf14a0936316e68659c5e5c..3f27173fb5191580a86c38f52ac9db1adc5991d4 100644 (file)
@@ -2131,7 +2131,7 @@ int i915_driver_unload(struct drm_device *dev)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev);
+       ret = i915_gpu_idle(dev, true);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
        mutex_unlock(&dev->struct_mutex);
index f02a5f525f031486bfe4afc99d13d30e2fd05858..1d10b8c26c98268aac46e8813e552f92139ef058 100644 (file)
@@ -1179,13 +1179,14 @@ void i915_gem_do_init(struct drm_device *dev,
                      unsigned long start,
                      unsigned long mappable_end,
                      unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
 int __must_check i915_gem_idle(struct drm_device *dev);
 int __must_check i915_add_request(struct intel_ring_buffer *ring,
                                  struct drm_file *file,
                                  struct drm_i915_gem_request *request);
 int __must_check i915_wait_request(struct intel_ring_buffer *ring,
-                                  uint32_t seqno);
+                                  uint32_t seqno,
+                                  bool do_retire);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
index eb98a7f55cfe7db8faff215fe36d91afe4be8a34..ff3066c4c76a23b5511aecc1b5d000913f931d41 100644 (file)
@@ -1943,7 +1943,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
  */
 int
 i915_wait_request(struct intel_ring_buffer *ring,
-                 uint32_t seqno)
+                 uint32_t seqno,
+                 bool do_retire)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        u32 ier;
@@ -2027,7 +2028,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
         * buffer to have made it to the inactive list, and we would need
         * a separate wait queue to handle that.
         */
-       if (ret == 0)
+       if (ret == 0 && do_retire)
                i915_gem_retire_requests_ring(ring);
 
        return ret;
@@ -2051,7 +2052,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
         * it.
         */
        if (obj->active) {
-               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
+                                       true);
                if (ret)
                        return ret;
        }
@@ -2172,7 +2174,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static int i915_ring_idle(struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
 {
        int ret;
 
@@ -2186,18 +2188,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
                        return ret;
        }
 
-       return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+       return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
+                                do_retire);
 }
 
-int
-i915_gpu_idle(struct drm_device *dev)
+int i915_gpu_idle(struct drm_device *dev, bool do_retire)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret, i;
 
        /* Flush everything onto the inactive list. */
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               ret = i915_ring_idle(&dev_priv->ring[i]);
+               ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
                if (ret)
                        return ret;
        }
@@ -2400,7 +2402,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
                if (!ring_passed_seqno(obj->last_fenced_ring,
                                       obj->last_fenced_seqno)) {
                        ret = i915_wait_request(obj->last_fenced_ring,
-                                               obj->last_fenced_seqno);
+                                               obj->last_fenced_seqno,
+                                               true);
                        if (ret)
                                return ret;
                }
@@ -2541,7 +2544,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                                if (!ring_passed_seqno(obj->last_fenced_ring,
                                                       reg->setup_seqno)) {
                                        ret = i915_wait_request(obj->last_fenced_ring,
-                                                               reg->setup_seqno);
+                                                               reg->setup_seqno,
+                                                               true);
                                        if (ret)
                                                return ret;
                                }
@@ -3710,7 +3714,7 @@ i915_gem_idle(struct drm_device *dev)
                return 0;
        }
 
-       ret = i915_gpu_idle(dev);
+       ret = i915_gpu_idle(dev, true);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
                return ret;
@@ -4201,7 +4205,7 @@ rescan:
                 * This has a dramatic impact to reduce the number of
                 * OOM-killer events whilst running the GPU aggressively.
                 */
-               if (i915_gpu_idle(dev) == 0)
+               if (i915_gpu_idle(dev, true) == 0)
                        goto rescan;
        }
        mutex_unlock(&dev->struct_mutex);
index ead5d00f91b043618f90457f10841b2a8505dbcc..097119caa36a55bfe28868016c364389695c796a 100644 (file)
@@ -195,7 +195,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
        trace_i915_gem_evict_everything(dev, purgeable_only);
 
        /* Flush everything (on to the inactive lists) and evict */
-       ret = i915_gpu_idle(dev);
+       ret = i915_gpu_idle(dev, true);
        if (ret)
                return ret;
 
index c01cb20184970e16e85f2ed3d4738deac7d5eff2..c649e0f255b48c354b827840a6b27437158876e5 100644 (file)
@@ -1186,7 +1186,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         * so every billion or so execbuffers, we need to stall
                         * the GPU in order to reset the counters.
                         */
-                       ret = i915_gpu_idle(dev);
+                       ret = i915_gpu_idle(dev, true);
                        if (ret)
                                goto err;
 
index 6042c5e6d2785c500e5c1391318aab686c442fa2..e050b903da95817783529a6003042e495e554d5c 100644 (file)
@@ -55,7 +55,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
        if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
                dev_priv->mm.interruptible = false;
-               if (i915_gpu_idle(dev_priv->dev)) {
+               if (i915_gpu_idle(dev_priv->dev, true)) {
                        DRM_ERROR("Couldn't idle GPU\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
index cdf17d4cc1f79d111e67ad66d4a8a64ce9c8c7bd..23a543cdfa99286cf62348ed7d3df0f7ed95bdb6 100644 (file)
@@ -227,7 +227,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
        }
        overlay->last_flip_req = request->seqno;
        overlay->flip_tail = tail;
-       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+                               true);
        if (ret)
                return ret;
 
@@ -448,7 +449,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
        if (overlay->last_flip_req == 0)
                return 0;
 
-       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+                               true);
        if (ret)
                return ret;