drm/i915/gvt: remove workload from intel_shadow_wa_ctx structure
authorTina Zhang <tina.zhang@intel.com>
Fri, 17 Mar 2017 07:08:51 +0000 (03:08 -0400)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Wed, 29 Mar 2017 07:29:21 +0000 (15:29 +0800)
intel_shadow_wa_ctx is a field of intel_vgpu_workload. container_of() can
be used to refine the relation-ship between intel_shadow_wa_ctx and
intel_vgpu_workload. This patch removes the useless dereference.

v2. add "drm/i915/gvt" prefix. (Zhenyu)

Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/scheduler.h

index c239fa8cbd0c0ecd540700770ef06887199b1706..94f2e701e4d44dd0c9fe68677f953a17e57f93e5 100644 (file)
@@ -2609,6 +2609,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
        struct parser_exec_state s;
        int ret = 0;
+       struct intel_vgpu_workload *workload = container_of(wa_ctx,
+                               struct intel_vgpu_workload,
+                               wa_ctx);
 
        /* ring base is page aligned */
        if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
@@ -2623,14 +2626,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 
        s.buf_type = RING_BUFFER_INSTRUCTION;
        s.buf_addr_type = GTT_BUFFER;
-       s.vgpu = wa_ctx->workload->vgpu;
-       s.ring_id = wa_ctx->workload->ring_id;
+       s.vgpu = workload->vgpu;
+       s.ring_id = workload->ring_id;
        s.ring_start = wa_ctx->indirect_ctx.guest_gma;
        s.ring_size = ring_size;
        s.ring_head = gma_head;
        s.ring_tail = gma_tail;
        s.rb_va = wa_ctx->indirect_ctx.shadow_va;
-       s.workload = wa_ctx->workload;
+       s.workload = workload;
 
        ret = ip_gma_set(&s, gma_head);
        if (ret)
@@ -2713,12 +2716,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
        int ctx_size = wa_ctx->indirect_ctx.size;
        unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
-       struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+       struct intel_vgpu_workload *workload = container_of(wa_ctx,
+                                       struct intel_vgpu_workload,
+                                       wa_ctx);
+       struct intel_vgpu *vgpu = workload->vgpu;
        struct drm_i915_gem_object *obj;
        int ret = 0;
        void *map;
 
-       obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
+       obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
                                     roundup(ctx_size + CACHELINE_BYTES,
                                             PAGE_SIZE));
        if (IS_ERR(obj))
@@ -2738,8 +2744,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
                goto unmap_src;
        }
 
-       ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
-                               wa_ctx->workload->vgpu->gtt.ggtt_mm,
+       ret = copy_gma_to_hva(workload->vgpu,
+                               workload->vgpu->gtt.ggtt_mm,
                                guest_gma, guest_gma + ctx_size,
                                map);
        if (ret < 0) {
@@ -2777,7 +2783,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
        int ret;
-       struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+       struct intel_vgpu_workload *workload = container_of(wa_ctx,
+                                       struct intel_vgpu_workload,
+                                       wa_ctx);
+       struct intel_vgpu *vgpu = workload->vgpu;
 
        if (wa_ctx->indirect_ctx.size == 0)
                return 0;
index f1f426a97aa9d43826010d7be90f6bffdbe59426..ce4276a7cf9c4eeb38c587928137cda66125b6b7 100644 (file)
@@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 
 static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
-       int ring_id = wa_ctx->workload->ring_id;
-       struct i915_gem_context *shadow_ctx =
-               wa_ctx->workload->vgpu->shadow_ctx;
+       struct intel_vgpu_workload *workload = container_of(wa_ctx,
+                                       struct intel_vgpu_workload,
+                                       wa_ctx);
+       int ring_id = workload->ring_id;
+       struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_gem_object *ctx_obj =
                shadow_ctx->engine[ring_id].state->obj;
        struct execlist_ring_context *shadow_ring_context;
@@ -680,7 +682,6 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
                        CACHELINE_BYTES;
                workload->wa_ctx.per_ctx.guest_gma =
                        per_ctx & PER_CTX_ADDR_MASK;
-               workload->wa_ctx.workload = workload;
 
                WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
        }
index 2833dfa8c9aed8e9b6c8f86bbd2ad9a45c461b0c..2cd725c0573e75924b3afc49603b7d9b6ba0df16 100644 (file)
@@ -67,7 +67,6 @@ struct shadow_per_ctx {
 };
 
 struct intel_shadow_wa_ctx {
-       struct intel_vgpu_workload *workload;
        struct shadow_indirect_ctx indirect_ctx;
        struct shadow_per_ctx per_ctx;