struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- struct drm_i915_gem_request *rq;
+ struct drm_i915_gem_request *req;
int ret, any, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
int count;
count = 0;
- list_for_each_entry(rq, &ring->request_list, list)
+ list_for_each_entry(req, &ring->request_list, list)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", ring->name, count);
- list_for_each_entry(rq, &ring->request_list, list) {
+ list_for_each_entry(req, &ring->request_list, list) {
struct task_struct *task;
rcu_read_lock();
task = NULL;
- if (rq->pid)
- task = pid_task(rq->pid, PIDTYPE_PID);
+ if (req->pid)
+ task = pid_task(req->pid, PIDTYPE_PID);
seq_printf(m, " %x @ %d: %s [%d]\n",
- rq->seqno,
- (int) (jiffies - rq->emitted_jiffies),
+ req->seqno,
+ (int) (jiffies - req->emitted_jiffies),
task ? task->comm : "<unknown>",
task ? task->pid : -1);
rcu_read_unlock();
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
-static int __i915_spin_request(struct drm_i915_gem_request *rq)
+static int __i915_spin_request(struct drm_i915_gem_request *req)
{
unsigned long timeout;
- if (i915_gem_request_get_ring(rq)->irq_refcount)
+ if (i915_gem_request_get_ring(req)->irq_refcount)
return -EBUSY;
timeout = jiffies + 1;
while (!need_resched()) {
- if (i915_gem_request_completed(rq, true))
+ if (i915_gem_request_completed(req, true))
return 0;
if (time_after_eq(jiffies, timeout))
cpu_relax_lowlatency();
}
- if (i915_gem_request_completed(rq, false))
+ if (i915_gem_request_completed(req, false))
return 0;
return -EAGAIN;
struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
- struct drm_i915_gem_request *rq;
+ struct drm_i915_gem_request *req;
int ret;
if (ring->outstanding_lazy_request)
return 0;
- rq = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
- if (rq == NULL)
+ req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+ if (req == NULL)
return -ENOMEM;
- kref_init(&rq->ref);
- rq->i915 = dev_priv;
+ kref_init(&req->ref);
+ req->i915 = dev_priv;
- ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
+ ret = i915_gem_get_seqno(ring->dev, &req->seqno);
if (ret) {
- kfree(rq);
+ kfree(req);
return ret;
}
- rq->ring = ring;
+ req->ring = ring;
if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(rq, ctx);
+ ret = intel_logical_ring_alloc_request_extras(req, ctx);
else
- ret = intel_ring_alloc_request_extras(rq);
+ ret = intel_ring_alloc_request_extras(req);
if (ret) {
- kfree(rq);
+ kfree(req);
return ret;
}
- ring->outstanding_lazy_request = rq;
+ ring->outstanding_lazy_request = req;
return 0;
}
struct intel_mmio_flip *mmio_flip =
container_of(work, struct intel_mmio_flip, work);
- if (mmio_flip->rq)
- WARN_ON(__i915_wait_request(mmio_flip->rq,
+ if (mmio_flip->req)
+ WARN_ON(__i915_wait_request(mmio_flip->req,
mmio_flip->crtc->reset_counter,
false, NULL, NULL));
intel_do_mmio_flip(mmio_flip->crtc);
- i915_gem_request_unreference__unlocked(mmio_flip->rq);
+ i915_gem_request_unreference__unlocked(mmio_flip->req);
kfree(mmio_flip);
}
if (mmio_flip == NULL)
return -ENOMEM;
- mmio_flip->rq = i915_gem_request_reference(obj->last_write_req);
+ mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
mmio_flip->crtc = to_intel_crtc(crtc);
INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
struct intel_mmio_flip {
struct work_struct work;
- struct drm_i915_gem_request *rq;
+ struct drm_i915_gem_request *req;
struct intel_crtc *crtc;
};
void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv);
void intel_queue_rps_boost_for_request(struct drm_device *dev,
- struct drm_i915_gem_request *rq);
+ struct drm_i915_gem_request *req);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct request_boost {
struct work_struct work;
- struct drm_i915_gem_request *rq;
+ struct drm_i915_gem_request *req;
};
static void __intel_rps_boost_work(struct work_struct *work)
{
struct request_boost *boost = container_of(work, struct request_boost, work);
- if (!i915_gem_request_completed(boost->rq, true))
- gen6_rps_boost(to_i915(boost->rq->ring->dev), NULL);
+ if (!i915_gem_request_completed(boost->req, true))
+ gen6_rps_boost(to_i915(boost->req->ring->dev), NULL);
- i915_gem_request_unreference__unlocked(boost->rq);
+ i915_gem_request_unreference__unlocked(boost->req);
kfree(boost);
}
void intel_queue_rps_boost_for_request(struct drm_device *dev,
- struct drm_i915_gem_request *rq)
+ struct drm_i915_gem_request *req)
{
struct request_boost *boost;
- if (rq == NULL || INTEL_INFO(dev)->gen < 6)
+ if (req == NULL || INTEL_INFO(dev)->gen < 6)
return;
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
if (boost == NULL)
return;
- i915_gem_request_reference(rq);
- boost->rq = rq;
+ i915_gem_request_reference(req);
+ boost->req = req;
INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(to_i915(dev)->wq, &boost->work);