drm/i915: Remove the per-ring write list
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 20 Jul 2012 11:41:03 +0000 (12:41 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 25 Jul 2012 16:23:53 +0000 (18:23 +0200)
This is now handled by a global flag to ensure we emit a flush before
the next serialisation point (if we failed to queue one previously).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 6b91755f77436f6a22c515bc4255732e1ed3a793..59e3199da162091c03b4c91e0eabc2e2602ede85 100644 (file)
@@ -865,8 +865,6 @@ struct drm_i915_gem_object {
        /** This object's place on the active/inactive lists */
        struct list_head ring_list;
        struct list_head mm_list;
-       /** This object's place on GPU write list */
-       struct list_head gpu_write_list;
        /** This object's place in the batchbuffer or on the eviction list */
        struct list_head exec_list;
 
index f62dd298a65d1849343a7d09d8b426cb8d510da5..78fa9503a34d54c1c0aebd54d66a97361bd8d338 100644 (file)
@@ -1465,7 +1465,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 
        list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       BUG_ON(!list_empty(&obj->gpu_write_list));
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
@@ -1511,30 +1510,6 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
        return obj->madv == I915_MADV_DONTNEED;
 }
 
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
-                              uint32_t flush_domains)
-{
-       struct drm_i915_gem_object *obj, *next;
-
-       list_for_each_entry_safe(obj, next,
-                                &ring->gpu_write_list,
-                                gpu_write_list) {
-               if (obj->base.write_domain & flush_domains) {
-                       uint32_t old_write_domain = obj->base.write_domain;
-
-                       obj->base.write_domain = 0;
-                       list_del_init(&obj->gpu_write_list);
-                       i915_gem_object_move_to_active(obj, ring,
-                                                      i915_gem_next_request_seqno(ring));
-
-                       trace_i915_gem_object_change_domain(obj,
-                                                           obj->base.read_domains,
-                                                           old_write_domain);
-               }
-       }
-}
-
 static u32
 i915_gem_get_seqno(struct drm_device *dev)
 {
@@ -1637,8 +1612,6 @@ i915_add_request(struct intel_ring_buffer *ring,
                                           &dev_priv->mm.retire_work, HZ);
        }
 
-       WARN_ON(!list_empty(&ring->gpu_write_list));
-
        return 0;
 }
 
@@ -1680,7 +1653,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                       struct drm_i915_gem_object,
                                       ring_list);
 
-               list_del_init(&obj->gpu_write_list);
                i915_gem_object_move_to_inactive(obj);
        }
 }
@@ -2011,11 +1983,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
        u32 seqno;
        int ret;
 
-       /* This function only exists to support waiting for existing rendering,
-        * not for emitting required flushes.
-        */
-       BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
        /* If there is rendering queued on the buffer being evicted, wait for
         * it.
         */
@@ -2308,26 +2275,14 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
-       if (flush_domains & I915_GEM_GPU_DOMAINS)
-               i915_gem_process_flushing_list(ring, flush_domains);
-
        return 0;
 }
 
 static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
-       int ret;
-
-       if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+       if (list_empty(&ring->active_list))
                return 0;
 
-       if (!list_empty(&ring->gpu_write_list)) {
-               ret = i915_gem_flush_ring(ring,
-                                   I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-               if (ret)
-                       return ret;
-       }
-
        return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
 }
 
@@ -2343,10 +2298,6 @@ int i915_gpu_idle(struct drm_device *dev)
                if (ret)
                        return ret;
 
-               /* Is the device fubar? */
-               if (WARN_ON(!list_empty(&ring->gpu_write_list)))
-                       return -EBUSY;
-
                ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
                if (ret)
                        return ret;
@@ -3491,7 +3442,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        INIT_LIST_HEAD(&obj->gtt_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
-       INIT_LIST_HEAD(&obj->gpu_write_list);
        obj->madv = I915_MADV_WILLNEED;
        /* Avoid an unnecessary call to unbind on the first bind. */
        obj->map_and_fenceable = true;
@@ -3912,7 +3862,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
 {
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
-       INIT_LIST_HEAD(&ring->gpu_write_list);
 }
 
 void
index 2353e6ee2f0d9e6c79002f7c9c8b23e0c7d53d72..36c940c1a978d1169ef9453219bd35d4e3dcf109 100644 (file)
@@ -943,9 +943,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
        struct drm_i915_gem_object *obj;
 
        list_for_each_entry(obj, objects, exec_list) {
-                 u32 old_read = obj->base.read_domains;
-                 u32 old_write = obj->base.write_domain;
-
+               u32 old_read = obj->base.read_domains;
+               u32 old_write = obj->base.write_domain;
 
                obj->base.read_domains = obj->base.pending_read_domains;
                obj->base.write_domain = obj->base.pending_write_domain;
@@ -955,8 +954,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        obj->last_write_seqno = seqno;
-                       list_move_tail(&obj->gpu_write_list,
-                                      &ring->gpu_write_list);
                        if (obj->pin_count) /* check for potential scanout */
                                intel_mark_busy(ring->dev, obj);
                }
index bf0195a96d5308c48d273cb492081a8d415ba188..8f221d9a7bdbf93f500212ea56a76d16ac786632 100644 (file)
@@ -1002,7 +1002,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
-       INIT_LIST_HEAD(&ring->gpu_write_list);
        ring->size = 32 * PAGE_SIZE;
 
        init_waitqueue_head(&ring->irq_queue);
@@ -1473,7 +1472,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
-       INIT_LIST_HEAD(&ring->gpu_write_list);
 
        ring->size = size;
        ring->effective_size = ring->size;
index 1d3c81fdad92087d2caf58c3d1b79753f5c75319..7986f3001cf07b4f2fc2be69b6596858a39d0a8d 100644 (file)
@@ -100,15 +100,6 @@ struct  intel_ring_buffer {
         */
        struct list_head request_list;
 
-       /**
-        * List of objects currently pending a GPU write flush.
-        *
-        * All elements on this list will belong to either the
-        * active_list or flushing_list, last_rendering_seqno can
-        * be used to differentiate between the two elements.
-        */
-       struct list_head gpu_write_list;
-
        /**
         * Do we have some not yet emitted requests outstanding?
         */