drm/i915: Split i915_gem_flush_ring() into seperate invalidate/flush funcs
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 20 Jul 2012 11:41:08 +0000 (12:41 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 25 Jul 2012 16:23:55 +0000 (18:23 +0200)
By moving the function to intel_ringbuffer and currying the appropriate
parameter, hopefully we make the callsites easier to read and
understand.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 59e3199da162091c03b4c91e0eabc2e2602ede85..700dc838c57fe70bd3b5f05904fa614618e643f4 100644 (file)
@@ -1256,9 +1256,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
-                                    uint32_t invalidate_domains,
-                                    uint32_t flush_domains);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
index 3659d47a9f6e5585cc0c2a6352fc2630430a4e39..f26e2b201badbd43d950660774bba985142337f6 100644 (file)
@@ -1549,13 +1549,9 @@ i915_add_request(struct intel_ring_buffer *ring,
         * is that the flush _must_ happen before the next request, no matter
         * what.
         */
-       if (ring->gpu_caches_dirty) {
-               ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
-               if (ret)
-                       return ret;
-
-               ring->gpu_caches_dirty = false;
-       }
+       ret = intel_ring_flush_all_caches(ring);
+       if (ret)
+               return ret;
 
        if (request == NULL) {
                request = kmalloc(sizeof(*request), GFP_KERNEL);
@@ -2254,25 +2250,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        return ret;
 }
 
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
-                   uint32_t invalidate_domains,
-                   uint32_t flush_domains)
-{
-       int ret;
-
-       if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
-               return 0;
-
-       trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
-
-       ret = ring->flush(ring, invalidate_domains, flush_domains);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
        if (list_empty(&ring->active_list))
index 55a94c1a1f5930ebc013a1da9c96a8218f7f6b7a..6be1a8920a8493dac24678217b20e9750483b399 100644 (file)
@@ -707,14 +707,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
        /* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       ret = i915_gem_flush_ring(ring,
-                                 I915_GEM_GPU_DOMAINS,
-                                 ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
-       if (ret)
-               return ret;
-
-       ring->gpu_caches_dirty = false;
-       return 0;
+       return intel_ring_invalidate_all_caches(ring);
 }
 
 static bool
index 8f221d9a7bdbf93f500212ea56a76d16ac786632..8b7085e4cf84383730535ec77708634354a72469 100644 (file)
@@ -1564,3 +1564,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 
        return intel_init_ring_buffer(dev, ring);
 }
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+       int ret;
+
+       if (!ring->gpu_caches_dirty)
+               return 0;
+
+       ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+       if (ret)
+               return ret;
+
+       trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+       ring->gpu_caches_dirty = false;
+       return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+       uint32_t flush_domains;
+       int ret;
+
+       flush_domains = 0;
+       if (ring->gpu_caches_dirty)
+               flush_domains = I915_GEM_GPU_DOMAINS;
+
+       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+       if (ret)
+               return ret;
+
+       trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+       ring->gpu_caches_dirty = false;
+       return 0;
+}
index 7986f3001cf07b4f2fc2be69b6596858a39d0a8d..8b2b92e00e9d9f963f07263650cb2aa355917971 100644 (file)
@@ -195,6 +195,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 void intel_ring_advance(struct intel_ring_buffer *ring);
 
 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);