struct intel_context *ctx;
struct intel_ringbuffer *ringbuf;
- /** Batch buffer related to this request if any */
+ /** Batch buffer related to this request if any (used for
+ error state dump only) */
struct drm_i915_gem_object *batch_obj;
/** Time at which this request was emitted, in jiffies. */
/* Now it is safe to go back round and do everything else: */
for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_request *req;
+
WARN_ON(!ring->default_context);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+ if (ret) {
+ i915_gem_cleanup_ringbuffer(dev);
+ goto out;
+ }
+
if (ring->id == RCS) {
for (j = 0; j < NUM_L3_SLICES(dev); j++)
i915_gem_l3_remap(ring, j);
ret = i915_ppgtt_init_ring(ring);
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+ i915_gem_request_cancel(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
ret = i915_gem_context_enable(ring);
if (ret && ret != -EIO) {
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+ i915_gem_request_cancel(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
+
+ i915_add_request_no_flush(ring);
}
out:
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- __i915_add_request(ring, NULL, NULL, true);
- /* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
return ret;
{
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct render_state so;
- struct drm_i915_file_private *file_priv = ctx->file_priv;
- struct drm_file *file = file_priv ? file_priv->file : NULL;
int ret;
ret = i915_gem_render_state_prepare(ring, &so);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- __i915_add_request(ring, file, NULL, true);
- /* intel_logical_ring_add_request moves object to inactive if it
- * fails */
out:
i915_gem_render_state_fini(&so);
return ret;