kref_put(&req->ref, i915_gem_request_free);
}
+static inline void
+i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
+{
+ if (req && !atomic_add_unless(&req->ref.refcount, -1, 1)) {
+ struct drm_device *dev = req->ring->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ if (likely(atomic_dec_and_test(&req->ref.refcount)))
+ i915_gem_request_free(&req->ref);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
ret = __i915_wait_request(req, reset_counter, true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
file->driver_priv);
- mutex_lock(&dev->struct_mutex);
- i915_gem_request_unreference(req);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_request_unreference__unlocked(req);
return ret;
out:
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- mutex_lock(&dev->struct_mutex);
- i915_gem_request_unreference(target);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_request_unreference__unlocked(target);
return ret;
}