req->ctx = ctx;
i915_gem_context_reference(req->ctx);
- if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req);
- else
- ret = intel_ring_alloc_request_extras(req);
- if (ret) {
- i915_gem_context_unreference(req->ctx);
- goto err;
- }
-
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
* away, e.g. because a GPU scheduler has deferred it.
*/
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
- ret = intel_ring_begin(req, 0);
- if (ret) {
- /*
- * At this point, the request is fully allocated even if not
- * fully prepared. Thus it can be cleaned up using the proper
- * free code, along with any reserved space.
- */
- i915_gem_request_unreference(req);
- return ret;
- }
+
+ if (i915.enable_execlists)
+ ret = intel_logical_ring_alloc_request_extras(req);
+ else
+ ret = intel_ring_alloc_request_extras(req);
+ if (ret)
+ goto err_ctx;
*req_out = req;
return 0;
+err_ctx:
+ i915_gem_context_unreference(ctx);
err:
kmem_cache_free(dev_priv->requests, req);
return ret;
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- int ret = 0;
+ int ret;
request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
return ret;
}
- if (request->ctx != request->i915->kernel_context)
+ if (request->ctx != request->i915->kernel_context) {
ret = intel_lr_context_pin(request->ctx, request->engine);
+ if (ret)
+ return ret;
+ }
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ goto err_unpin;
+
+ return 0;
+
+err_unpin:
+ if (request->ctx != request->i915->kernel_context)
+ intel_lr_context_unpin(request->ctx, request->engine);
return ret;
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
request->ringbuf = request->engine->buffer;
- return 0;
+ return intel_ring_begin(request, 0);
}
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)