request->batch_obj is only set by execbuffer for the convenience of
debugging hangs. By moving that operation to the callsite, we can
simplify all other callers and future patches. We also move the
complications of reference handling of the request->batch_obj next to
where the active tracking is set up for the request.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470832906-13972-2-git-send-email-chris@chris-wilson.co.uk
goto err_batch_unpin;
}
+ /* Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when this
+ * request is retired will the the batch_obj be moved onto the
+ * inactive_list and lose its active reference. Hence we do not need
+ * to explicitly hold another reference here.
+ */
+ params->request->batch_obj = params->batch->obj;
+
ret = i915_gem_request_add_to_client(params->request, file);
if (ret)
goto err_request;
ret = execbuf_submit(params, args, &eb->vmas);
err_request:
- __i915_add_request(params->request, params->batch->obj, ret == 0);
+ __i915_add_request(params->request, ret == 0);
err_batch_unpin:
/*
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_add_request(struct drm_i915_gem_request *request,
- struct drm_i915_gem_object *obj,
- bool flush_caches)
+void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
{
struct intel_engine_cs *engine;
struct intel_ring *ring;
request->head = request_start;
- /* Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when this
- * request is retired will the the batch_obj be moved onto the
- * inactive_list and lose its active reference. Hence we do not need
- * to explicitly hold another reference here.
- */
- request->batch_obj = obj;
-
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
* hangcheck. Hence we apply the barrier to ensure that we do not
*pdst = src;
}
-void __i915_add_request(struct drm_i915_gem_request *req,
- struct drm_i915_gem_object *batch_obj,
- bool flush_caches);
+void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
- __i915_add_request(req, NULL, true)
+ __i915_add_request(req, true)
#define i915_add_request_no_flush(req) \
- __i915_add_request(req, NULL, false)
+ __i915_add_request(req, false)
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)