drm/i915: Ensure requests stick around during waits
authorJohn Harrison <John.C.Harrison@Intel.com>
Mon, 24 Nov 2014 18:49:28 +0000 (18:49 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 3 Dec 2014 08:35:15 +0000 (09:35 +0100)
Added reference counting of the request structure around __wait_seqno() calls.
This is a precursor to updating the wait code itself to take the request rather
than a seqno. At that point, it would be a Bad Idea for a request object to be
retired and freed while the wait code is still using it.

v3:

Note that even though the mutex lock is held during a call to i915_wait_seqno(),
it is still necessary to explicitly bump the reference count. It appears that
the shrinker can asynchronously retire items even though the mutex is locked.

For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com>
[danvet: Remove wrongly squashed hunk which breaks the build.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c

index bf0135815a9604a9e33649cf3d1235d4e4c43aec..8ec07853b35c5f8a2c753087b9d25ea57c88df09 100644 (file)
@@ -1417,10 +1417,12 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                return ret;
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
        ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
                                file_priv);
        mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(req);
        if (ret)
                return ret;
 
@@ -2920,6 +2922,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_request *req;
        struct intel_engine_cs *ring = NULL;
        unsigned reset_counter;
        u32 seqno = 0;
@@ -2946,7 +2949,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (!obj->active || !obj->last_read_req)
                goto out;
 
-       seqno = i915_gem_request_get_seqno(obj->last_read_req);
+       req = obj->last_read_req;
+       seqno = i915_gem_request_get_seqno(req);
        WARN_ON(seqno == 0);
        ring = obj->ring;
 
@@ -2960,10 +2964,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
        drm_gem_object_unreference(&obj->base);
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
 
-       return __i915_wait_seqno(ring, seqno, reset_counter, true,
-                                &args->timeout_ns, file->driver_priv);
+       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
+                          file->driver_priv);
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(req);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
 
 out:
        drm_gem_object_unreference(&obj->base);
@@ -4118,6 +4127,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                target = request;
        }
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       if (target)
+               i915_gem_request_reference(target);
        spin_unlock(&file_priv->mm.lock);
 
        if (target == NULL)
@@ -4129,6 +4140,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(target);
+       mutex_unlock(&dev->struct_mutex);
+
        return ret;
 }