drm/i915: change i915_add_request to macro
authorMika Kuoppala <mika.kuoppala@linux.intel.com>
Wed, 12 Jun 2013 09:35:30 +0000 (12:35 +0300)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 13 Jun 2013 15:42:15 +0000 (17:42 +0200)
Only execbuffer needed all the parameters on i915_add_request().
By putting __i915_add_request behind macro, all current callsites
become cleaner. Following patch will introduce a new parameter
for __i915_add_request. With this patch, only the relevant callsite
will reflect the change making commit smaller and easier to understand.

v2: _i915_add_request as function name (Chris Wilson)

v3: change name __i915_add_request and fix ordering of params (Ben Widawsky)

Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c

index cfa9286436516822990a676dada5995d4d04b59a..425200bc1e44f1e25d44ade58a09813f0b3ade97 100644 (file)
@@ -1756,9 +1756,11 @@ void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
-int i915_add_request(struct intel_ring_buffer *ring,
-                    struct drm_file *file,
-                    u32 *seqno);
+int __i915_add_request(struct intel_ring_buffer *ring,
+                      struct drm_file *file,
+                      u32 *seqno);
+#define i915_add_request(ring, seqno) \
+       __i915_add_request(ring, NULL, seqno);
 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
                                 uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
index 58048d49256c2cffa263ba008db596809df866c0..38e20875c25572954d5360c12b287d9e32717f58 100644 (file)
@@ -959,7 +959,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
 
        ret = 0;
        if (seqno == ring->outstanding_lazy_request)
-               ret = i915_add_request(ring, NULL, NULL);
+               ret = i915_add_request(ring, NULL);
 
        return ret;
 }
@@ -2000,10 +2000,9 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
        return 0;
 }
 
-int
-i915_add_request(struct intel_ring_buffer *ring,
-                struct drm_file *file,
-                u32 *out_seqno)
+int __i915_add_request(struct intel_ring_buffer *ring,
+                      struct drm_file *file,
+                      u32 *out_seqno)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
@@ -2280,7 +2279,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
        idle = true;
        for_each_ring(ring, dev_priv, i) {
                if (ring->gpu_caches_dirty)
-                       i915_add_request(ring, NULL, NULL);
+                       i915_add_request(ring, NULL);
 
                idle &= list_empty(&ring->request_list);
        }
index f5ea3c1351f8434ff128e2aec3d67a4d3efd44e3..ff471454968dcb309d01556847fc46ed540e250e 100644 (file)
@@ -455,7 +455,7 @@ static int do_switch(struct i915_hw_context *to)
                from->obj->dirty = 1;
                BUG_ON(from->obj->ring != ring);
 
-               ret = i915_add_request(ring, NULL, NULL);
+               ret = i915_add_request(ring, NULL);
                if (ret) {
                        /* Too late, we've already scheduled a context switch.
                         * Try to undo the change so that the hw state is
index c98333d7411123f049826a6d1269347e6754f18e..d79ac7aa55d4faf1218189020fde572ffb05ad8e 100644 (file)
@@ -802,7 +802,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
        ring->gpu_caches_dirty = true;
 
        /* Add a breadcrumb for the completion of the batch buffer */
-       (void)i915_add_request(ring, file, NULL);
+       (void)__i915_add_request(ring, file, NULL);
 }
 
 static int
index 836794b68fc6cb9df13b6ee578ff5c1999bfe401..a3698812e9c7831f75048508b4b38ebf93ecce33 100644 (file)
@@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
        int ret;
 
        BUG_ON(overlay->last_flip_req);
-       ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
+       ret = i915_add_request(ring, &overlay->last_flip_req);
        if (ret)
                return ret;
 
@@ -286,7 +286,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
        intel_ring_emit(ring, flip_addr);
        intel_ring_advance(ring);
 
-       return i915_add_request(ring, NULL, &overlay->last_flip_req);
+       return i915_add_request(ring, &overlay->last_flip_req);
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
index a3cfa35b05738cddc6b49b806673d81ebbd598b8..e51ab552046c9ef943ec7d7bb1de5b5e24467154 100644 (file)
@@ -1512,7 +1512,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
 
        /* We need to add any requests required to flush the objects and ring */
        if (ring->outstanding_lazy_request) {
-               ret = i915_add_request(ring, NULL, NULL);
+               ret = i915_add_request(ring, NULL);
                if (ret)
                        return ret;
        }