/** Position in the ringbuffer of the end of the whole request */
u32 tail;
- /** Context related to this request */
+ /** Context and ring buffer related to this request */
struct intel_context *ctx;
+ struct intel_ringbuffer *ringbuf;
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
- struct intel_ringbuffer *ringbuf;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
trace_i915_gem_request_retire(request);
- /* This is one of the few common intersection points
- * between legacy ringbuffer submission and execlists:
- * we need to tell them apart in order to find the correct
- * ringbuffer to which the request belongs to.
- */
- if (i915.enable_execlists) {
- struct intel_context *ctx = request->ctx;
- ringbuf = ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
-
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
- ringbuf->last_retired_head = request->postfix;
+ request->ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request);
}
return ret;
}
- /* Hold a reference to the context this request belongs to
- * (we will need it when the time comes to emit/retire the
- * request).
- */
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
+ request->ringbuf = ctx->engine[ring->id].ringbuf;
ring->outstanding_lazy_request = request;
return 0;
kref_init(&request->ref);
request->ring = ring;
+ request->ringbuf = ring->buffer;
request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);