From 89ea20b930cb7372095645acae8928a75f7d5be5 Mon Sep 17 00:00:00 2001 From: Ping Gao Date: Thu, 29 Jun 2017 12:22:42 +0800 Subject: [PATCH] drm/i915/gvt: Factor out scan and shadow from workload dispatch To perform the workload scan and shadow in ELSP writing stage for performance consideration, the workload scan and shadow stuffs should be factored out from dispatch_workload(). v2:Put context pin before i915_add_request; Refine the comments; Rename some APIs; v3:workload->status should set only when error happens. v4:i915_add_request is must to have after i915_gem_request_alloc. Signed-off-by: Ping Gao Reviewed-by: Zhi Wang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 2 +- drivers/gpu/drm/i915/gvt/cmd_parser.h | 2 +- drivers/gpu/drm/i915/gvt/gvt.h | 2 + drivers/gpu/drm/i915/gvt/scheduler.c | 83 ++++++++++++++++----------- 4 files changed, 54 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 713848c36349..7ed8c551a5a1 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2647,7 +2647,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) return 0; } -int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) +int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload) { int ret; struct intel_vgpu *vgpu = workload->vgpu; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index bed33514103c..286703643002 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt); int intel_gvt_init_cmd_parser(struct intel_gvt *gvt); -int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); +int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 3a74e79eac2f..ba53ad17900b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -470,6 +470,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); void populate_pvinfo_page(struct intel_vgpu *vgpu); +int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); + struct intel_gvt_ops { int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, unsigned int); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0e2e36ad6196..7929c0285d1d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -184,42 +184,27 @@ static int shadow_context_status_change(struct notifier_block *nb, return NOTIFY_OK; } -static int dispatch_workload(struct intel_vgpu_workload *workload) +/** + * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and + * shadow it as well, include ringbuffer,wa_ctx and ctx. + * @workload: an abstract entity for each execlist submission. + * + * This function is called before the workload submitting to i915, to make + * sure the content of the workload is valid. + */ +int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) { int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; - struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct drm_i915_gem_request *rq; struct intel_vgpu *vgpu = workload->vgpu; - struct intel_ring *ring; int ret; - gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", - ring_id, workload); - shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - mutex_lock(&dev_priv->drm.struct_mutex); - - /* pin shadow context by gvt even the shadow context will be pinned - * when i915 alloc request. That is because gvt will update the guest - * context from shadow context when workload is completed, and at that - * moment, i915 may already unpined the shadow context to make the - * shadow_ctx pages invalid. So gvt need to pin itself. After update - * the guest context, gvt can unpin the shadow_ctx safely. - */ - ring = engine->context_pin(engine, shadow_ctx); - if (IS_ERR(ring)) { - ret = PTR_ERR(ring); - gvt_vgpu_err("fail to pin shadow context\n"); - workload->status = ret; - mutex_unlock(&dev_priv->drm.struct_mutex); - return ret; - } - rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); @@ -231,7 +216,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) workload->req = i915_gem_request_get(rq); - ret = intel_gvt_scan_and_shadow_workload(workload); + ret = intel_gvt_scan_and_shadow_ringbuffer(workload); if (ret) goto out; @@ -243,6 +228,27 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) } ret = populate_shadow_context(workload); + +out: + return ret; +} + +static int dispatch_workload(struct intel_vgpu_workload *workload) +{ + int ring_id = workload->ring_id; + struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_ring *ring; + int ret = 0; + + gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", + ring_id, workload); + + mutex_lock(&dev_priv->drm.struct_mutex); + + ret = intel_gvt_scan_and_shadow_workload(workload); if (ret) goto out; @@ -252,19 +258,30 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) goto out; } - gvt_dbg_sched("ring id %d submit workload to i915 %p\n", - ring_id, workload->req); + /* pin shadow context by gvt even the shadow context will be pinned + * when i915 alloc request. That is because gvt will update the guest + * context from shadow context when workload is completed, and at that + * moment, i915 may already unpined the shadow context to make the + * shadow_ctx pages invalid. So gvt need to pin itself. After update + * the guest context, gvt can unpin the shadow_ctx safely. + */ + ring = engine->context_pin(engine, shadow_ctx); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); + gvt_vgpu_err("fail to pin shadow context\n"); + goto out; + } - ret = 0; - workload->dispatched = true; out: if (ret) workload->status = ret; - if (!IS_ERR_OR_NULL(rq)) - i915_add_request(rq); - else - engine->context_unpin(engine, shadow_ctx); + if (!IS_ERR_OR_NULL(workload->req)) { + gvt_dbg_sched("ring id %d submit workload to i915 %p\n", + ring_id, workload->req); + i915_add_request(workload->req); + workload->dispatched = true; + } mutex_unlock(&dev_priv->drm.struct_mutex); return ret; -- 2.20.1