drm/i915: Reorganise legacy context switch to cope with late failure
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 13 Apr 2016 16:35:14 +0000 (17:35 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 14 Apr 2016 09:45:40 +0000 (10:45 +0100)
After mi_set_context() succeeds, we need to update the state of the
engine's last_context. This ensures that we hold a pin on the context
whilst the hardware may write to it. However, since we didn't complete
the post-switch setup of the context, we need to force the subsequent
use of the same context to complete the setup (which means updating
should_skip_switch()).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-15-git-send-email-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_context.c

index c027240aacf3a0fea70bbdd7b098eeee6b483ce0..e5acc3916f75fa4dda6024a4f9c97c504d452349 100644 (file)
@@ -610,17 +610,19 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 }
 
 static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
-                                  struct intel_context *from,
                                   struct intel_context *to)
 {
        if (to->remap_slice)
                return false;
 
-       if (to->ppgtt && from == to &&
+       if (!to->legacy_hw_ctx.initialized)
+               return false;
+
+       if (to->ppgtt &&
            !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
-               return true;
+               return false;
 
-       return false;
+       return to == engine->last_context;
 }
 
 static bool
@@ -643,18 +645,12 @@ needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
 }
 
 static bool
-needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
-                  u32 hw_flags)
+needs_pd_load_post(struct intel_context *to, u32 hw_flags)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
-
        if (!to->ppgtt)
                return false;
 
-       if (!IS_GEN8(engine->dev))
-               return false;
-
-       if (engine != &dev_priv->engine[RCS])
+       if (!IS_GEN8(to->i915))
                return false;
 
        if (hw_flags & MI_RESTORE_INHIBIT)
@@ -667,12 +663,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 {
        struct intel_context *to = req->ctx;
        struct intel_engine_cs *engine = req->engine;
-       struct intel_context *from = engine->last_context;
-       u32 hw_flags = 0;
-       bool uninitialized = false;
+       struct intel_context *from;
+       u32 hw_flags;
        int ret, i;
 
-       if (skip_rcs_switch(engine, from, to))
+       if (skip_rcs_switch(engine, to))
                return 0;
 
        /* Trying to pin first makes error handling easier. */
@@ -686,23 +681,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
         * Pin can switch back to the default context if we end up calling into
         * evict_everything - as a last ditch gtt defrag effort that also
         * switches to the default context. Hence we need to reload from here.
+        *
+        * XXX: Doing so is painfully broken!
         */
        from = engine->last_context;
 
-       if (needs_pd_load_pre(engine, to)) {
-               /* Older GENs and non render rings still want the load first,
-                * "PP_DCLV followed by PP_DIR_BASE register through Load
-                * Register Immediate commands in Ring Buffer before submitting
-                * a context."*/
-               trace_switch_mm(engine, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, req);
-               if (ret)
-                       goto unpin_out;
-
-               /* Doing a PD load always reloads the page dirs */
-               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-       }
-
        /*
         * Clear this page out of any CPU caches for coherent swap-in/out. Note
         * that thanks to write = false in this call and us not setting any gpu
@@ -715,53 +698,37 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
        if (ret)
                goto unpin_out;
 
-       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
-               hw_flags |= MI_RESTORE_INHIBIT;
+       if (needs_pd_load_pre(engine, to)) {
+               /* Older GENs and non render rings still want the load first,
+                * "PP_DCLV followed by PP_DIR_BASE register through Load
+                * Register Immediate commands in Ring Buffer before submitting
+                * a context."*/
+               trace_switch_mm(engine, to);
+               ret = to->ppgtt->switch_mm(to->ppgtt, req);
+               if (ret)
+                       goto unpin_out;
+       }
+
+       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
                /* NB: If we inhibit the restore, the context is not allowed to
                 * die because future work may end up depending on valid address
                 * space. This means we must enforce that a page table load
                 * occur when this occurs. */
-       } else if (to->ppgtt &&
-                  (intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
-               hw_flags |= MI_FORCE_RESTORE;
-               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-       }
+               hw_flags = MI_RESTORE_INHIBIT;
+       else if (to->ppgtt &&
+                intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+               hw_flags = MI_FORCE_RESTORE;
+       else
+               hw_flags = 0;
 
        /* We should never emit switch_mm more than once */
        WARN_ON(needs_pd_load_pre(engine, to) &&
-               needs_pd_load_post(engine, to, hw_flags));
-
-       ret = mi_set_context(req, hw_flags);
-       if (ret)
-               goto unpin_out;
+               needs_pd_load_post(to, hw_flags));
 
-       /* GEN8 does *not* require an explicit reload if the PDPs have been
-        * setup, and we do not wish to move them.
-        */
-       if (needs_pd_load_post(engine, to, hw_flags)) {
-               trace_switch_mm(engine, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, req);
-               /* The hardware context switch is emitted, but we haven't
-                * actually changed the state - so it's probably safe to bail
-                * here. Still, let the user know something dangerous has
-                * happened.
-                */
-               if (ret) {
-                       DRM_ERROR("Failed to change address space on context switch\n");
-                       goto unpin_out;
-               }
-       }
-
-       for (i = 0; i < MAX_L3_SLICES; i++) {
-               if (!(to->remap_slice & (1<<i)))
-                       continue;
-
-               ret = i915_gem_l3_remap(req, i);
-               /* If it failed, try again next round */
+       if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
+               ret = mi_set_context(req, hw_flags);
                if (ret)
-                       DRM_DEBUG_DRIVER("L3 remapping failed\n");
-               else
-                       to->remap_slice &= ~(1<<i);
+                       goto unpin_out;
        }
 
        /* The backing object for the context is done after switching to the
@@ -786,19 +753,45 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
                i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
                i915_gem_context_unreference(from);
        }
-
-       uninitialized = !to->legacy_hw_ctx.initialized;
-       to->legacy_hw_ctx.initialized = true;
-
        i915_gem_context_reference(to);
        engine->last_context = to;
 
-       if (uninitialized) {
+       /* GEN8 does *not* require an explicit reload if the PDPs have been
+        * setup, and we do not wish to move them.
+        */
+       if (needs_pd_load_post(to, hw_flags)) {
+               trace_switch_mm(engine, to);
+               ret = to->ppgtt->switch_mm(to->ppgtt, req);
+               /* The hardware context switch is emitted, but we haven't
+                * actually changed the state - so it's probably safe to bail
+                * here. Still, let the user know something dangerous has
+                * happened.
+                */
+               if (ret)
+                       return ret;
+       }
+
+       if (to->ppgtt)
+               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+
+       for (i = 0; i < MAX_L3_SLICES; i++) {
+               if (!(to->remap_slice & (1<<i)))
+                       continue;
+
+               ret = i915_gem_l3_remap(req, i);
+               if (ret)
+                       return ret;
+
+               to->remap_slice &= ~(1<<i);
+       }
+
+       if (!to->legacy_hw_ctx.initialized) {
                if (engine->init_context) {
                        ret = engine->init_context(req);
                        if (ret)
-                               DRM_ERROR("ring init context: %d\n", ret);
+                               return ret;
                }
+               to->legacy_hw_ctx.initialized = true;
        }
 
        return 0;