drm/i915: Late request cancellations are harmful
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 13 Apr 2016 16:35:15 +0000 (17:35 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 14 Apr 2016 09:45:40 +0000 (10:45 +0100)
Conceptually, each request is a record of a hardware transaction - we
build up a list of pending commands and then either commit them to
hardware, or cancel them. However, whilst building up the list of
pending commands, we may modify state outside of the request and make
references to the pending request. If we do so and then cancel that
request, external objects then point to the deleted request leading to
both graphical and memory corruption.

The easiest example is to consider object/VMA tracking. When we mark an
object as active in a request, we store a pointer to this, the most
recent request, in the object. Then we want to free that object, we wait
for the most recent request to be idle before proceeding (otherwise the
hardware will write to pages now owned by the system, or we will attempt
to read from those pages before the hardware is finished writing). If
the request was cancelled instead, that wait completes immediately. As a
result, all requests must be committed and not cancelled if the external
state is unknown.

All that remains of i915_gem_request_cancel() users are just a couple of
extremely unlikely allocation failures, so remove the API entirely.

A consequence of committing all incomplete requests is that we generate
excess breadcrumbs and fill the ring much more often with dummy work. We
have completely undone the outstanding_last_seqno optimisation.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93907
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: stable@vger.kernel.org
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-16-git-send-email-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_overlay.c

index 0faca3a29868f60a5c2b253baf8eae47910a1290..b9ed1b305bebe29cf2df8e047be1c1955a2be689 100644 (file)
@@ -2331,7 +2331,6 @@ struct drm_i915_gem_request {
 struct drm_i915_gem_request * __must_check
 i915_gem_request_alloc(struct intel_engine_cs *engine,
                       struct intel_context *ctx);
-void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file);
@@ -2888,7 +2887,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                        struct drm_i915_gem_request *req);
-void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
                                   struct drm_i915_gem_execbuffer2 *args,
                                   struct list_head *vmas);
index 21fb41c93caa606cd2628afa8e56898b2a842608..0bafe7d2cc4e3b6df2a64e67cfb910a0fee018a3 100644 (file)
@@ -2791,7 +2791,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
                 * fully prepared. Thus it can be cleaned up using the proper
                 * free code.
                 */
-               i915_gem_request_cancel(req);
+               intel_ring_reserved_space_cancel(req->ringbuf);
+               i915_gem_request_unreference(req);
                return ret;
        }
 
@@ -2828,13 +2829,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        return err ? ERR_PTR(err) : req;
 }
 
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
-       intel_ring_reserved_space_cancel(req->ringbuf);
-
-       i915_gem_request_unreference(req);
-}
-
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
@@ -3444,12 +3438,9 @@ int i915_gpu_idle(struct drm_device *dev)
                                return PTR_ERR(req);
 
                        ret = i915_switch_context(req);
-                       if (ret) {
-                               i915_gem_request_cancel(req);
-                               return ret;
-                       }
-
                        i915_add_request_no_flush(req);
+                       if (ret)
+                               return ret;
                }
 
                ret = intel_engine_idle(engine);
@@ -4949,34 +4940,33 @@ i915_gem_init_hw(struct drm_device *dev)
                req = i915_gem_request_alloc(engine, NULL);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
-                       i915_gem_cleanup_engines(dev);
-                       goto out;
+                       break;
                }
 
                if (engine->id == RCS) {
-                       for (j = 0; j < NUM_L3_SLICES(dev); j++)
-                               i915_gem_l3_remap(req, j);
+                       for (j = 0; j < NUM_L3_SLICES(dev); j++) {
+                               ret = i915_gem_l3_remap(req, j);
+                               if (ret)
+                                       goto err_request;
+                       }
                }
 
                ret = i915_ppgtt_init_ring(req);
-               if (ret && ret != -EIO) {
-                       DRM_ERROR("PPGTT enable %s failed %d\n",
-                                 engine->name, ret);
-                       i915_gem_request_cancel(req);
-                       i915_gem_cleanup_engines(dev);
-                       goto out;
-               }
+               if (ret)
+                       goto err_request;
 
                ret = i915_gem_context_enable(req);
-               if (ret && ret != -EIO) {
-                       DRM_ERROR("Context enable %s failed %d\n",
+               if (ret)
+                       goto err_request;
+
+err_request:
+               i915_add_request_no_flush(req);
+               if (ret) {
+                       DRM_ERROR("Failed to enable %s, error=%d\n",
                                  engine->name, ret);
-                       i915_gem_request_cancel(req);
                        i915_gem_cleanup_engines(dev);
-                       goto out;
+                       break;
                }
-
-               i915_add_request_no_flush(req);
        }
 
 out:
index 6ee4f00f620c0b45caf515720578ad4045ee5f33..6f4f2a6cdf93640e8bdf05d487f354aa0174c676 100644 (file)
@@ -1137,7 +1137,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
        }
 }
 
-void
+static void
 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
        /* Unconditionally force add_request to emit a full flush. */
@@ -1322,7 +1322,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
        trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
        i915_gem_execbuffer_move_to_active(vmas, params->request);
-       i915_gem_execbuffer_retire_commands(params);
 
        return 0;
 }
@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        ret = i915_gem_request_add_to_client(req, file);
        if (ret)
-               goto err_batch_unpin;
+               goto err_request;
 
        /*
         * Save assorted stuff away to pass through to *_submission().
@@ -1641,6 +1640,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        params->request                 = req;
 
        ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+err_request:
+       i915_gem_execbuffer_retire_commands(params);
 
 err_batch_unpin:
        /*
@@ -1657,14 +1658,6 @@ err:
        i915_gem_context_unreference(ctx);
        eb_destroy(eb);
 
-       /*
-        * If the request was created but not successfully submitted then it
-        * must be freed again. If it was submitted then it is being tracked
-        * on the active request list and no clean up is required here.
-        */
-       if (ret && !IS_ERR_OR_NULL(req))
-               i915_gem_request_cancel(req);
-
        mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
index b1b457864e17c92acd84bfbef0a51065db600e7e..3cae596d10a3ee8efaa69e148e92e1a256e4adc8 100644 (file)
@@ -11664,7 +11664,7 @@ cleanup_unpin:
        intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
 cleanup_pending:
        if (!IS_ERR_OR_NULL(request))
-               i915_gem_request_cancel(request);
+               i915_add_request_no_flush(request);
        atomic_dec(&intel_crtc->unpin_work_count);
        mutex_unlock(&dev->struct_mutex);
 cleanup:
index c5dba687f288341977e85bf90577d7a5f45b75e2..7693efa3f15c30bc6bdef757a4d43e25b9959e77 100644 (file)
@@ -1007,7 +1007,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
        i915_gem_execbuffer_move_to_active(vmas, params->request);
-       i915_gem_execbuffer_retire_commands(params);
 
        return 0;
 }
@@ -2700,13 +2699,12 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                }
 
                ret = engine->init_context(req);
+               i915_add_request_no_flush(req);
                if (ret) {
                        DRM_ERROR("ring init context: %d\n",
                                ret);
-                       i915_gem_request_cancel(req);
                        goto error_ringbuf;
                }
-               i915_add_request_no_flush(req);
        }
        return 0;
 
index 6694e9230cd5bea860740bf1d5934d0a8db1ede1..bcc3b6a016d8ac8018a02732aa3f9b47d95a3820 100644 (file)
@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
        ret = intel_ring_begin(req, 4);
        if (ret) {
-               i915_gem_request_cancel(req);
+               i915_add_request_no_flush(req);
                return ret;
        }
 
@@ -290,7 +290,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 
        ret = intel_ring_begin(req, 2);
        if (ret) {
-               i915_gem_request_cancel(req);
+               i915_add_request_no_flush(req);
                return ret;
        }
 
@@ -356,7 +356,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 
        ret = intel_ring_begin(req, 6);
        if (ret) {
-               i915_gem_request_cancel(req);
+               i915_add_request_no_flush(req);
                return ret;
        }
 
@@ -431,7 +431,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
                ret = intel_ring_begin(req, 2);
                if (ret) {
-                       i915_gem_request_cancel(req);
+                       i915_add_request_no_flush(req);
                        return ret;
                }