drm/i915: s/\<rq\>/req/g
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 21 May 2015 12:21:25 +0000 (14:21 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 21 May 2015 13:10:48 +0000 (15:10 +0200)
The merged seqno->request conversion from John called request
variables req, but some (not all) of Chris' recent patches changed
those to just rq. We've had a lenghty (and inconclusive) discussion on
irc which is the more meaningful name with maybe at most a slight bias
towards req.

Given that the "don't change names without good reason to avoid
conflicts" rule applies, so lets go back to a req everywhere for
consistency. I'll sed any patches for which this will cause conflicts
before applying.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: John Harrison <John.C.Harrison@Intel.com>
[danvet: s/origina/merged/ as pointed out by Chris - the first
mass-conversion patch was from Chris, the merged one from John.]
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_pm.c

index a32b669bab89b6d9469297081102bb273b19c477..f465af1b02f8bf7e21b47a4bd5bca66a884d8c09 100644 (file)
@@ -665,7 +665,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
-       struct drm_i915_gem_request *rq;
+       struct drm_i915_gem_request *req;
        int ret, any, i;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -677,22 +677,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
                int count;
 
                count = 0;
-               list_for_each_entry(rq, &ring->request_list, list)
+               list_for_each_entry(req, &ring->request_list, list)
                        count++;
                if (count == 0)
                        continue;
 
                seq_printf(m, "%s requests: %d\n", ring->name, count);
-               list_for_each_entry(rq, &ring->request_list, list) {
+               list_for_each_entry(req, &ring->request_list, list) {
                        struct task_struct *task;
 
                        rcu_read_lock();
                        task = NULL;
-                       if (rq->pid)
-                               task = pid_task(rq->pid, PIDTYPE_PID);
+                       if (req->pid)
+                               task = pid_task(req->pid, PIDTYPE_PID);
                        seq_printf(m, "    %x @ %d: %s [%d]\n",
-                                  rq->seqno,
-                                  (int) (jiffies - rq->emitted_jiffies),
+                                  req->seqno,
+                                  (int) (jiffies - req->emitted_jiffies),
                                   task ? task->comm : "<unknown>",
                                   task ? task->pid : -1);
                        rcu_read_unlock();
index 5ff96f94c2ee6dfe62da0026522e46f775bc8992..fa4429144cb9b04afc973c535f2cb8a8c3206917 100644 (file)
@@ -1178,16 +1178,16 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
        return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
 }
 
-static int __i915_spin_request(struct drm_i915_gem_request *rq)
+static int __i915_spin_request(struct drm_i915_gem_request *req)
 {
        unsigned long timeout;
 
-       if (i915_gem_request_get_ring(rq)->irq_refcount)
+       if (i915_gem_request_get_ring(req)->irq_refcount)
                return -EBUSY;
 
        timeout = jiffies + 1;
        while (!need_resched()) {
-               if (i915_gem_request_completed(rq, true))
+               if (i915_gem_request_completed(req, true))
                        return 0;
 
                if (time_after_eq(jiffies, timeout))
@@ -1195,7 +1195,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *rq)
 
                cpu_relax_lowlatency();
        }
-       if (i915_gem_request_completed(rq, false))
+       if (i915_gem_request_completed(req, false))
                return 0;
 
        return -EAGAIN;
@@ -2572,37 +2572,37 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
                           struct intel_context *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
-       struct drm_i915_gem_request *rq;
+       struct drm_i915_gem_request *req;
        int ret;
 
        if (ring->outstanding_lazy_request)
                return 0;
 
-       rq = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
-       if (rq == NULL)
+       req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+       if (req == NULL)
                return -ENOMEM;
 
-       kref_init(&rq->ref);
-       rq->i915 = dev_priv;
+       kref_init(&req->ref);
+       req->i915 = dev_priv;
 
-       ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
+       ret = i915_gem_get_seqno(ring->dev, &req->seqno);
        if (ret) {
-               kfree(rq);
+               kfree(req);
                return ret;
        }
 
-       rq->ring = ring;
+       req->ring = ring;
 
        if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(rq, ctx);
+               ret = intel_logical_ring_alloc_request_extras(req, ctx);
        else
-               ret = intel_ring_alloc_request_extras(rq);
+               ret = intel_ring_alloc_request_extras(req);
        if (ret) {
-               kfree(rq);
+               kfree(req);
                return ret;
        }
 
-       ring->outstanding_lazy_request = rq;
+       ring->outstanding_lazy_request = req;
        return 0;
 }
 
index c97b4963e5c13b814cd4b26c1f1f7b0ae3add742..7ef22db8cbbf1767f1c2d190ae778b0b26b18b80 100644 (file)
@@ -10785,14 +10785,14 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
        struct intel_mmio_flip *mmio_flip =
                container_of(work, struct intel_mmio_flip, work);
 
-       if (mmio_flip->rq)
-               WARN_ON(__i915_wait_request(mmio_flip->rq,
+       if (mmio_flip->req)
+               WARN_ON(__i915_wait_request(mmio_flip->req,
                                            mmio_flip->crtc->reset_counter,
                                            false, NULL, NULL));
 
        intel_do_mmio_flip(mmio_flip->crtc);
 
-       i915_gem_request_unreference__unlocked(mmio_flip->rq);
+       i915_gem_request_unreference__unlocked(mmio_flip->req);
        kfree(mmio_flip);
 }
 
@@ -10809,7 +10809,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
        if (mmio_flip == NULL)
                return -ENOMEM;
 
-       mmio_flip->rq = i915_gem_request_reference(obj->last_write_req);
+       mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
        mmio_flip->crtc = to_intel_crtc(crtc);
 
        INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
index 47bc729043c5522640cb6d28438e7d9601dd43d8..c3c42ead4b46380714b7f1dfbfc40b50e698a81b 100644 (file)
@@ -460,7 +460,7 @@ struct intel_pipe_wm {
 
 struct intel_mmio_flip {
        struct work_struct work;
-       struct drm_i915_gem_request *rq;
+       struct drm_i915_gem_request *req;
        struct intel_crtc *crtc;
 };
 
@@ -1366,7 +1366,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv);
 void gen6_rps_boost(struct drm_i915_private *dev_priv,
                    struct drm_i915_file_private *file_priv);
 void intel_queue_rps_boost_for_request(struct drm_device *dev,
-                                      struct drm_i915_gem_request *rq);
+                                      struct drm_i915_gem_request *req);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
index a70b2d1fc84481473b9cce3af37e96e479905a1e..5dda008698a2955f202eec6be71645deebda4f6e 100644 (file)
@@ -6842,34 +6842,34 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
 
 struct request_boost {
        struct work_struct work;
-       struct drm_i915_gem_request *rq;
+       struct drm_i915_gem_request *req;
 };
 
 static void __intel_rps_boost_work(struct work_struct *work)
 {
        struct request_boost *boost = container_of(work, struct request_boost, work);
 
-       if (!i915_gem_request_completed(boost->rq, true))
-               gen6_rps_boost(to_i915(boost->rq->ring->dev), NULL);
+       if (!i915_gem_request_completed(boost->req, true))
+               gen6_rps_boost(to_i915(boost->req->ring->dev), NULL);
 
-       i915_gem_request_unreference__unlocked(boost->rq);
+       i915_gem_request_unreference__unlocked(boost->req);
        kfree(boost);
 }
 
 void intel_queue_rps_boost_for_request(struct drm_device *dev,
-                                      struct drm_i915_gem_request *rq)
+                                      struct drm_i915_gem_request *req)
 {
        struct request_boost *boost;
 
-       if (rq == NULL || INTEL_INFO(dev)->gen < 6)
+       if (req == NULL || INTEL_INFO(dev)->gen < 6)
                return;
 
        boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
        if (boost == NULL)
                return;
 
-       i915_gem_request_reference(rq);
-       boost->rq = rq;
+       i915_gem_request_reference(req);
+       boost->req = req;
 
        INIT_WORK(&boost->work, __intel_rps_boost_work);
        queue_work(to_i915(dev)->wq, &boost->work);