drm/i915: Remove the defunct flushing list
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 20 Jul 2012 11:41:02 +0000 (12:41 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 25 Jul 2012 16:23:52 +0000 (18:23 +0200)
As we guarantee to emit a flush before emitting the breadcrumb or
the next batchbuffer, there is no further need for the flushing list.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c

index a8b7db6161ca8a8c98681a56ee12afa52e30b078..1312b79c70b387965c0f507b8684c4dcba13df38 100644 (file)
@@ -44,7 +44,6 @@
 
 enum {
        ACTIVE_LIST,
-       FLUSHING_LIST,
        INACTIVE_LIST,
        PINNED_LIST,
 };
@@ -178,10 +177,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
                seq_printf(m, "Inactive:\n");
                head = &dev_priv->mm.inactive_list;
                break;
-       case FLUSHING_LIST:
-               seq_printf(m, "Flushing:\n");
-               head = &dev_priv->mm.flushing_list;
-               break;
        default:
                mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
@@ -239,7 +234,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 
        size = count = mappable_size = mappable_count = 0;
        count_objects(&dev_priv->mm.active_list, mm_list);
-       count_objects(&dev_priv->mm.flushing_list, mm_list);
        seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
@@ -2007,7 +2001,6 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_gtt", i915_gem_gtt_info, 0},
        {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-       {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
index 49a532e338e65feaa41720e0f1d3db9319849095..6b91755f77436f6a22c515bc4255732e1ed3a793 100644 (file)
@@ -695,17 +695,6 @@ typedef struct drm_i915_private {
                 */
                struct list_head active_list;
 
-               /**
-                * List of objects which are not in the ringbuffer but which
-                * still have a write_domain which needs to be flushed before
-                * unbinding.
-                *
-                * last_rendering_seqno is 0 while an object is in this list.
-                *
-                * A reference is held on the buffer while on this list.
-                */
-               struct list_head flushing_list;
-
                /**
                 * LRU list of objects which are not in the ringbuffer and
                 * are ready to unbind, but are still in the GTT.
@@ -873,7 +862,7 @@ struct drm_i915_gem_object {
        struct drm_mm_node *gtt_space;
        struct list_head gtt_list;
 
-       /** This object's place on the active/flushing/inactive lists */
+       /** This object's place on the active/inactive lists */
        struct list_head ring_list;
        struct list_head mm_list;
        /** This object's place on GPU write list */
@@ -882,9 +871,9 @@ struct drm_i915_gem_object {
        struct list_head exec_list;
 
        /**
-        * This is set if the object is on the active or flushing lists
-        * (has pending rendering), and is not set if it's on inactive (ready
-        * to be unbound).
+        * This is set if the object is on the active lists (has pending
+        * rendering and so a non-zero seqno), and is not set if it i s on
+        * inactive (ready to be unbound) list.
         */
        unsigned int active:1;
 
index 6a80d6565ef23616189b02a3eb68856dd7c08d18..f62dd298a65d1849343a7d09d8b426cb8d510da5 100644 (file)
@@ -1457,27 +1457,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        }
 }
 
-static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
-       list_del_init(&obj->ring_list);
-       obj->last_read_seqno = 0;
-       obj->last_write_seqno = 0;
-       obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
-{
-       struct drm_device *dev = obj->base.dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       BUG_ON(!obj->active);
-       list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
-       i915_gem_object_move_off_active(obj);
-}
-
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
@@ -1487,10 +1466,17 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
        list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
        BUG_ON(!list_empty(&obj->gpu_write_list));
+       BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
+
+       list_del_init(&obj->ring_list);
        obj->ring = NULL;
 
-       i915_gem_object_move_off_active(obj);
+       obj->last_read_seqno = 0;
+       obj->last_write_seqno = 0;
+       obj->base.write_domain = 0;
+
+       obj->last_fenced_seqno = 0;
        obj->fenced_gpu_access = false;
 
        obj->active = 0;
@@ -1694,7 +1680,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                       struct drm_i915_gem_object,
                                       ring_list);
 
-               obj->base.write_domain = 0;
                list_del_init(&obj->gpu_write_list);
                i915_gem_object_move_to_inactive(obj);
        }
@@ -1731,20 +1716,6 @@ void i915_gem_reset(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i)
                i915_gem_reset_ring_lists(dev_priv, ring);
 
-       /* Remove anything from the flushing lists. The GPU cache is likely
-        * to be lost on reset along with the data, so simply move the
-        * lost bo to the inactive list.
-        */
-       while (!list_empty(&dev_priv->mm.flushing_list)) {
-               obj = list_first_entry(&dev_priv->mm.flushing_list,
-                                     struct drm_i915_gem_object,
-                                     mm_list);
-
-               obj->base.write_domain = 0;
-               list_del_init(&obj->gpu_write_list);
-               i915_gem_object_move_to_inactive(obj);
-       }
-
        /* Move everything out of the GPU domains to ensure we do any
         * necessary invalidation upon reuse.
         */
@@ -1815,10 +1786,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                if (!i915_seqno_passed(seqno, obj->last_read_seqno))
                        break;
 
-               if (obj->base.write_domain != 0)
-                       i915_gem_object_move_to_flushing(obj);
-               else
-                       i915_gem_object_move_to_inactive(obj);
+               i915_gem_object_move_to_inactive(obj);
        }
 
        if (unlikely(ring->trace_irq_seqno &&
@@ -3897,7 +3865,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }
 
        BUG_ON(!list_empty(&dev_priv->mm.active_list));
-       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        mutex_unlock(&dev->struct_mutex);
 
@@ -3955,7 +3922,6 @@ i915_gem_load(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
-       INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
@@ -4206,12 +4172,7 @@ static int
 i915_gpu_is_active(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int lists_empty;
-
-       lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->mm.active_list);
-
-       return !lists_empty;
+       return !list_empty(&dev_priv->mm.active_list);
 }
 
 static int
index eba0308f10e3d5cdad1c8c9b71333bfb46c57896..51e547c4ed89c02b970210d4321bc54d3ea0066f 100644 (file)
@@ -93,23 +93,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 
        /* Now merge in the soon-to-be-expired objects... */
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-               /* Does the object require an outstanding flush? */
-               if (obj->base.write_domain)
-                       continue;
-
-               if (mark_free(obj, &unwind_list))
-                       goto found;
-       }
-
-       /* Finally add anything with a pending flush (in order of retirement) */
-       list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-               if (mark_free(obj, &unwind_list))
-                       goto found;
-       }
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-               if (!obj->base.write_domain)
-                       continue;
-
                if (mark_free(obj, &unwind_list))
                        goto found;
        }
@@ -172,7 +155,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
        int ret;
 
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-                      list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->mm.active_list));
        if (lists_empty)
                return -ENOSPC;
@@ -189,8 +171,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 
        i915_gem_retire_requests(dev);
 
-       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
        /* Having flushed everything, unbind() should never raise an error */
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.inactive_list, mm_list) {