i915_gem_object_put(obj);
}
+static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id)
+ GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
+}
+
int i915_gem_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
i915_gem_retire_requests(dev_priv);
+ assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ struct i915_gem_timeline *timeline;
enum intel_engine_id id;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
- if (engine->last_context == NULL)
- continue;
-
- if (engine->last_context == dev_priv->kernel_context)
- continue;
-
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
+ /* Queue this switch after all other activity */
+ list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ struct drm_i915_gem_request *prev;
+ struct intel_timeline *tl;
+
+ tl = &timeline->engine[engine->id];
+ prev = i915_gem_active_raw(&tl->last_request,
+ &dev_priv->drm.struct_mutex);
+ if (prev)
+ i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ &prev->submit,
+ GFP_KERNEL);
+ }
+
ret = i915_switch_context(req);
i915_add_request_no_flush(req);
if (ret)