From: Chris Wilson Date: Tue, 20 Jun 2017 11:05:47 +0000 (+0100) Subject: drm/i915: Enable rcu-only context lookups X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=1acfc104cdf8a3408f0e83b4115d4419c6315005;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git drm/i915: Enable rcu-only context lookups Whilst the contents of the context is still protected by the big struct_mutex, this is not much of an improvement. It is just one tiny step towards reducing our BKL. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170620110547.15947-3-chris@chris-wilson.co.uk --- diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0f1330adc37b..69219b5d1198 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3533,16 +3533,22 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages); +static inline struct i915_gem_context * +__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) +{ + return idr_find(&file_priv->context_idr, id); +} + static inline struct i915_gem_context * i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) { struct i915_gem_context *ctx; - lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); - - ctx = idr_find(&file_priv->context_idr, id); - if (!ctx) - return ERR_PTR(-ENOENT); + rcu_read_lock(); + ctx = __i915_gem_context_lookup_rcu(file_priv, id); + if (ctx && !kref_get_unless_zero(&ctx->ref)) + ctx = NULL; + rcu_read_unlock(); return ctx; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 9cf96380af9f..71d2ea7dab64 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -187,7 +187,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); - kfree(ctx); + kfree_rcu(ctx, rcu); } static void contexts_free(struct drm_i915_private *i915) @@ -1021,20 +1021,19 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) return -ENOENT; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } + if (!ctx) + return -ENOENT; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + goto out; __destroy_hw_context(ctx, file_priv); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("HW context %d destroyed\n", args->ctx_id); +out: + i915_gem_context_put(ctx); return 0; } @@ -1044,17 +1043,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_context *ctx; - int ret; - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; + int ret = 0; ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } + if (!ctx) + return -ENOENT; args->size = 0; switch (args->param) { @@ -1082,8 +1075,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ret = -EINVAL; break; } - mutex_unlock(&dev->struct_mutex); + i915_gem_context_put(ctx); return ret; } @@ -1095,15 +1088,13 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct i915_gem_context *ctx; int ret; + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (!ctx) + return -ENOENT; + ret = i915_mutex_lock_interruptible(dev); if (ret) - return ret; - - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } + goto out; switch (args->param) { case I915_CONTEXT_PARAM_BAN_PERIOD: @@ -1141,6 +1132,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, } mutex_unlock(&dev->struct_mutex); +out: + i915_gem_context_put(ctx); return ret; } @@ -1155,27 +1148,31 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, if (args->flags || args->pad) return -EINVAL; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; + ret = -ENOENT; + rcu_read_lock(); + ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); + if (!ctx) + goto out; - ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } + /* + * We opt for unserialised reads here. This may result in tearing + * in the extremely unlikely event of a GPU hang on this context + * as we are querying them. If we need that extra layer of protection, + * we should wrap the hangstats with a seqlock. + */ if (capable(CAP_SYS_ADMIN)) args->reset_count = i915_reset_count(&dev_priv->gpu_error); else args->reset_count = 0; - args->batch_active = ctx->guilty_count; - args->batch_pending = ctx->active_count; - - mutex_unlock(&dev->struct_mutex); + args->batch_active = READ_ONCE(ctx->guilty_count); + args->batch_pending = READ_ONCE(ctx->active_count); - return 0; + ret = 0; +out: + rcu_read_unlock(); + return ret; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 61146f4aa168..04320f80f9f4 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -99,6 +99,11 @@ struct i915_gem_context { */ struct kref ref; + /** + * @rcu: rcu_head for deferred freeing. + */ + struct rcu_head rcu; + /** * @flags: small set of booleans */ diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index eb46dfa374a7..35d1f8e8906e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -669,16 +669,17 @@ static int eb_select_context(struct i915_execbuffer *eb) struct i915_gem_context *ctx; ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); - if (unlikely(IS_ERR(ctx))) - return PTR_ERR(ctx); + if (unlikely(!ctx)) + return -ENOENT; if (unlikely(i915_gem_context_is_banned(ctx))) { DRM_DEBUG("Context %u tried to submit while banned\n", ctx->user_handle); + i915_gem_context_put(ctx); return -EIO; } - eb->ctx = i915_gem_context_get(ctx); + eb->ctx = ctx; eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base; eb->context_flags = 0; @@ -2127,7 +2128,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) args->flags |= __EXEC_HAS_RELOC; eb.exec = exec; - eb.ctx = NULL; eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; if (USES_FULL_PPGTT(eb.i915)) eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT; @@ -2182,6 +2182,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (eb_create(&eb)) return -ENOMEM; + err = eb_select_context(&eb); + if (unlikely(err)) + goto err_destroy; + /* * Take a local wakeref for preparing to dispatch the execbuf as * we expect to access the hardware fairly frequently in the @@ -2190,14 +2194,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, * 100ms. */ intel_runtime_pm_get(eb.i915); + err = i915_mutex_lock_interruptible(dev); if (err) goto err_rpm; - err = eb_select_context(&eb); - if (unlikely(err)) - goto err_unlock; - err = eb_relocate(&eb); if (err) /* @@ -2333,11 +2334,11 @@ err_batch_unpin: err_vma: if (eb.exec) eb_release_vmas(&eb); - i915_gem_context_put(eb.ctx); -err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: intel_runtime_pm_put(eb.i915); + i915_gem_context_put(eb.ctx); +err_destroy: eb_destroy(&eb); if (out_fence_fd != -1) put_unused_fd(out_fence_fd);