drm/i915: Move gpu_write_list to per-ring
authorChris Wilson <chris@chris-wilson.co.uk>
Sun, 24 Oct 2010 11:38:05 +0000 (12:38 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Sun, 24 Oct 2010 19:22:51 +0000 (20:22 +0100)
... to prevent flush processing of an idle (or even absent) ring.

This fixes a regression during suspend from 87acb0a5.

Reported-and-tested-by: Alexey Fisher <bug-track@fisher-privat.net>
Tested-by: Peter Clifton <pcjc2@cam.ac.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index cc9cb0dda6fc129197f50ac373b92de477c7ca22..2c2c19b6285ecf331edbac9c53e0bdd44b2093ad 100644 (file)
@@ -570,15 +570,6 @@ typedef struct drm_i915_private {
                 */
                struct list_head flushing_list;
 
-               /**
-                * List of objects currently pending a GPU write flush.
-                *
-                * All elements on this list will belong to either the
-                * active_list or flushing_list, last_rendering_seqno can
-                * be used to differentiate between the two elements.
-                */
-               struct list_head gpu_write_list;
-
                /**
                 * LRU list of objects which are not in the ringbuffer and
                 * are ready to unbind, but are still in the GTT.
index e7f27a5b89dcef86b70421d4b3c62a1232e063c4..6c2618d884e7a841ce9ae97e9abd0ab6e46da6d2 100644 (file)
@@ -1657,12 +1657,11 @@ i915_gem_process_flushing_list(struct drm_device *dev,
        struct drm_i915_gem_object *obj_priv, *next;
 
        list_for_each_entry_safe(obj_priv, next,
-                                &dev_priv->mm.gpu_write_list,
+                                &ring->gpu_write_list,
                                 gpu_write_list) {
                struct drm_gem_object *obj = &obj_priv->base;
 
-               if (obj->write_domain & flush_domains &&
-                   obj_priv->ring == ring) {
+               if (obj->write_domain & flush_domains) {
                        uint32_t old_write_domain = obj->write_domain;
 
                        obj->write_domain = 0;
@@ -2173,6 +2172,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 static int i915_ring_idle(struct drm_device *dev,
                          struct intel_ring_buffer *ring)
 {
+       if (list_empty(&ring->gpu_write_list))
+               return 0;
+
        i915_gem_flush_ring(dev, NULL, ring,
                            I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        return i915_wait_request(dev,
@@ -3786,14 +3788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
-               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
                uint32_t old_write_domain = obj->write_domain;
-
                obj->write_domain = obj->pending_write_domain;
-               if (obj->write_domain)
-                       list_move_tail(&obj_priv->gpu_write_list,
-                                      &dev_priv->mm.gpu_write_list);
-
                trace_i915_gem_object_change_domain(obj,
                                                    obj->read_domains,
                                                    old_write_domain);
@@ -3858,9 +3854,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
-               obj_priv = to_intel_bo(obj);
 
                i915_gem_object_move_to_active(obj, ring);
+               if (obj->write_domain)
+                       list_move_tail(&to_intel_bo(obj)->gpu_write_list,
+                                      &ring->gpu_write_list);
        }
 
        i915_add_request(dev, file, request, ring);
@@ -4618,6 +4616,14 @@ i915_gem_lastclose(struct drm_device *dev)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
 }
 
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
+}
+
 void
 i915_gem_load(struct drm_device *dev)
 {
@@ -4626,17 +4632,13 @@ i915_gem_load(struct drm_device *dev)
 
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
-       INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
-       INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
-       INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
-       INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
-       INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
-       INIT_LIST_HEAD(&dev_priv->blt_ring.active_list);
-       INIT_LIST_HEAD(&dev_priv->blt_ring.request_list);
+       init_ring_lists(&dev_priv->render_ring);
+       init_ring_lists(&dev_priv->bsd_ring);
+       init_ring_lists(&dev_priv->blt_ring);
        for (i = 0; i < 16; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
index 4b53ca81ea4d07649e84142162f3117aa3f0007d..09f2dc353ae239f0d2a6f86a5940c1d7b27656e4 100644 (file)
@@ -580,6 +580,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
 
        if (I915_NEED_GFX_HWS(dev)) {
                ret = init_status_page(dev, ring);
index 6ab40c6058f7961bff84c6ce0ddd4b5ea959d91a..a05aff0e5764d67e421a5a4995f2b0b92dfcd925 100644 (file)
@@ -82,6 +82,15 @@ struct  intel_ring_buffer {
         */
        struct list_head request_list;
 
+       /**
+        * List of objects currently pending a GPU write flush.
+        *
+        * All elements on this list will belong to either the
+        * active_list or flushing_list, last_rendering_seqno can
+        * be used to differentiate between the two elements.
+        */
+       struct list_head gpu_write_list;
+
        /**
         * Do we have some not yet emitted requests outstanding?
         */