drm/i915: Rename intel_engine_cs function parameters
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>
Wed, 16 Mar 2016 11:00:37 +0000 (11:00 +0000)
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>
Wed, 16 Mar 2016 15:33:10 +0000 (15:33 +0000)
@@
identifier func;
@@
func(..., struct intel_engine_cs *
- ring
+ engine
, ...)
{
<...
- ring
+ engine
...>
}
@@
identifier func;
type T;
@@
T func(..., struct intel_engine_cs *
- ring
+ engine
, ...);

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
15 files changed:
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_render_state.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 814d894ed9257848d7e68fb6e3f8e0394e6bbd9a..2c50142be55973e70ca7bb3cd52e7b5b9c7fa5d6 100644 (file)
@@ -555,7 +555,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
        return 0;
 }
 
-static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+static bool validate_cmds_sorted(struct intel_engine_cs *engine,
                                 const struct drm_i915_cmd_table *cmd_tables,
                                 int cmd_table_count)
 {
@@ -577,7 +577,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
 
                        if (curr < previous) {
                                DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
-                                         ring->id, i, j, curr, previous);
+                                         engine->id, i, j, curr, previous);
                                ret = false;
                        }
 
@@ -611,11 +611,11 @@ static bool check_sorted(int ring_id,
        return ret;
 }
 
-static bool validate_regs_sorted(struct intel_engine_cs *ring)
+static bool validate_regs_sorted(struct intel_engine_cs *engine)
 {
-       return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
-               check_sorted(ring->id, ring->master_reg_table,
-                            ring->master_reg_count);
+       return check_sorted(engine->id, engine->reg_table, engine->reg_count) &&
+               check_sorted(engine->id, engine->master_reg_table,
+                            engine->master_reg_count);
 }
 
 struct cmd_node {
@@ -639,13 +639,13 @@ struct cmd_node {
  */
 #define CMD_HASH_MASK STD_MI_OPCODE_MASK
 
-static int init_hash_table(struct intel_engine_cs *ring,
+static int init_hash_table(struct intel_engine_cs *engine,
                           const struct drm_i915_cmd_table *cmd_tables,
                           int cmd_table_count)
 {
        int i, j;
 
-       hash_init(ring->cmd_hash);
+       hash_init(engine->cmd_hash);
 
        for (i = 0; i < cmd_table_count; i++) {
                const struct drm_i915_cmd_table *table = &cmd_tables[i];
@@ -660,7 +660,7 @@ static int init_hash_table(struct intel_engine_cs *ring,
                                return -ENOMEM;
 
                        desc_node->desc = desc;
-                       hash_add(ring->cmd_hash, &desc_node->node,
+                       hash_add(engine->cmd_hash, &desc_node->node,
                                 desc->cmd.value & CMD_HASH_MASK);
                }
        }
@@ -668,13 +668,13 @@ static int init_hash_table(struct intel_engine_cs *ring,
        return 0;
 }
 
-static void fini_hash_table(struct intel_engine_cs *ring)
+static void fini_hash_table(struct intel_engine_cs *engine)
 {
        struct hlist_node *tmp;
        struct cmd_node *desc_node;
        int i;
 
-       hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+       hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
                hash_del(&desc_node->node);
                kfree(desc_node);
        }
@@ -690,18 +690,18 @@ static void fini_hash_table(struct intel_engine_cs *ring)
  *
  * Return: non-zero if initialization fails
  */
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
+int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 {
        const struct drm_i915_cmd_table *cmd_tables;
        int cmd_table_count;
        int ret;
 
-       if (!IS_GEN7(ring->dev))
+       if (!IS_GEN7(engine->dev))
                return 0;
 
-       switch (ring->id) {
+       switch (engine->id) {
        case RCS:
-               if (IS_HASWELL(ring->dev)) {
+               if (IS_HASWELL(engine->dev)) {
                        cmd_tables = hsw_render_ring_cmds;
                        cmd_table_count =
                                ARRAY_SIZE(hsw_render_ring_cmds);
@@ -710,26 +710,26 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
                        cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
                }
 
-               ring->reg_table = gen7_render_regs;
-               ring->reg_count = ARRAY_SIZE(gen7_render_regs);
+               engine->reg_table = gen7_render_regs;
+               engine->reg_count = ARRAY_SIZE(gen7_render_regs);
 
-               if (IS_HASWELL(ring->dev)) {
-                       ring->master_reg_table = hsw_master_regs;
-                       ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+               if (IS_HASWELL(engine->dev)) {
+                       engine->master_reg_table = hsw_master_regs;
+                       engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
                } else {
-                       ring->master_reg_table = ivb_master_regs;
-                       ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+                       engine->master_reg_table = ivb_master_regs;
+                       engine->master_reg_count = ARRAY_SIZE(ivb_master_regs);
                }
 
-               ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+               engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
                break;
        case VCS:
                cmd_tables = gen7_video_cmds;
                cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
-               ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+               engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
                break;
        case BCS:
-               if (IS_HASWELL(ring->dev)) {
+               if (IS_HASWELL(engine->dev)) {
                        cmd_tables = hsw_blt_ring_cmds;
                        cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
                } else {
@@ -737,44 +737,44 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
                        cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
                }
 
-               ring->reg_table = gen7_blt_regs;
-               ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
+               engine->reg_table = gen7_blt_regs;
+               engine->reg_count = ARRAY_SIZE(gen7_blt_regs);
 
-               if (IS_HASWELL(ring->dev)) {
-                       ring->master_reg_table = hsw_master_regs;
-                       ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+               if (IS_HASWELL(engine->dev)) {
+                       engine->master_reg_table = hsw_master_regs;
+                       engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
                } else {
-                       ring->master_reg_table = ivb_master_regs;
-                       ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+                       engine->master_reg_table = ivb_master_regs;
+                       engine->master_reg_count = ARRAY_SIZE(ivb_master_regs);
                }
 
-               ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+               engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
                break;
        case VECS:
                cmd_tables = hsw_vebox_cmds;
                cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
                /* VECS can use the same length_mask function as VCS */
-               ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+               engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
                break;
        default:
                DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
-                         ring->id);
+                         engine->id);
                BUG();
        }
 
-       BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
-       BUG_ON(!validate_regs_sorted(ring));
+       BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
+       BUG_ON(!validate_regs_sorted(engine));
 
-       WARN_ON(!hash_empty(ring->cmd_hash));
+       WARN_ON(!hash_empty(engine->cmd_hash));
 
-       ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+       ret = init_hash_table(engine, cmd_tables, cmd_table_count);
        if (ret) {
                DRM_ERROR("CMD: cmd_parser_init failed!\n");
-               fini_hash_table(ring);
+               fini_hash_table(engine);
                return ret;
        }
 
-       ring->needs_cmd_parser = true;
+       engine->needs_cmd_parser = true;
 
        return 0;
 }
@@ -786,21 +786,21 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
  * Releases any resources related to command parsing that may have been
  * initialized for the specified ring.
  */
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
 {
-       if (!ring->needs_cmd_parser)
+       if (!engine->needs_cmd_parser)
                return;
 
-       fini_hash_table(ring);
+       fini_hash_table(engine);
 }
 
 static const struct drm_i915_cmd_descriptor*
-find_cmd_in_table(struct intel_engine_cs *ring,
+find_cmd_in_table(struct intel_engine_cs *engine,
                  u32 cmd_header)
 {
        struct cmd_node *desc_node;
 
-       hash_for_each_possible(ring->cmd_hash, desc_node, node,
+       hash_for_each_possible(engine->cmd_hash, desc_node, node,
                               cmd_header & CMD_HASH_MASK) {
                const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
                u32 masked_cmd = desc->cmd.mask & cmd_header;
@@ -822,18 +822,18 @@ find_cmd_in_table(struct intel_engine_cs *ring,
  * ring's default length encoding and returns default_desc.
  */
 static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_engine_cs *ring,
+find_cmd(struct intel_engine_cs *engine,
         u32 cmd_header,
         struct drm_i915_cmd_descriptor *default_desc)
 {
        const struct drm_i915_cmd_descriptor *desc;
        u32 mask;
 
-       desc = find_cmd_in_table(ring, cmd_header);
+       desc = find_cmd_in_table(engine, cmd_header);
        if (desc)
                return desc;
 
-       mask = ring->get_cmd_length_mask(cmd_header);
+       mask = engine->get_cmd_length_mask(cmd_header);
        if (!mask)
                return NULL;
 
@@ -963,18 +963,18 @@ unpin_src:
  *
  * Return: true if the ring requires software command parsing
  */
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
 {
-       if (!ring->needs_cmd_parser)
+       if (!engine->needs_cmd_parser)
                return false;
 
-       if (!USES_PPGTT(ring->dev))
+       if (!USES_PPGTT(engine->dev))
                return false;
 
        return (i915.enable_cmd_parser == 1);
 }
 
-static bool check_cmd(const struct intel_engine_cs *ring,
+static bool check_cmd(const struct intel_engine_cs *engine,
                      const struct drm_i915_cmd_descriptor *desc,
                      const u32 *cmd, u32 length,
                      const bool is_master,
@@ -1004,17 +1004,17 @@ static bool check_cmd(const struct intel_engine_cs *ring,
                     offset += step) {
                        const u32 reg_addr = cmd[offset] & desc->reg.mask;
                        const struct drm_i915_reg_descriptor *reg =
-                               find_reg(ring->reg_table, ring->reg_count,
+                               find_reg(engine->reg_table, engine->reg_count,
                                         reg_addr);
 
                        if (!reg && is_master)
-                               reg = find_reg(ring->master_reg_table,
-                                              ring->master_reg_count,
+                               reg = find_reg(engine->master_reg_table,
+                                              engine->master_reg_count,
                                               reg_addr);
 
                        if (!reg) {
                                DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
-                                                reg_addr, *cmd, ring->id);
+                                                reg_addr, *cmd, engine->id);
                                return false;
                        }
 
@@ -1087,7 +1087,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
                                                 *cmd,
                                                 desc->bits[i].mask,
                                                 desc->bits[i].expected,
-                                                dword, ring->id);
+                                                dword, engine->id);
                                return false;
                        }
                }
@@ -1113,7 +1113,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  * if the batch appears legal but should use hardware parsing
  */
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_parse_cmds(struct intel_engine_cs *engine,
                    struct drm_i915_gem_object *batch_obj,
                    struct drm_i915_gem_object *shadow_batch_obj,
                    u32 batch_start_offset,
@@ -1147,7 +1147,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
                if (*cmd == MI_BATCH_BUFFER_END)
                        break;
 
-               desc = find_cmd(ring, *cmd, &default_desc);
+               desc = find_cmd(engine, *cmd, &default_desc);
                if (!desc) {
                        DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
                                         *cmd);
@@ -1179,7 +1179,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
                        break;
                }
 
-               if (!check_cmd(ring, desc, cmd, length, is_master,
+               if (!check_cmd(engine, desc, cmd, length, is_master,
                               &oacontrol_set)) {
                        ret = -EINVAL;
                        break;
index 5037ccb18e778c3e0d6e2893c3c8d84a6743a713..164e1432d41fe97b9e0bab2e8d2b04997862da09 100644 (file)
@@ -725,11 +725,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 }
 
 static void i915_ring_seqno_info(struct seq_file *m,
-                                struct intel_engine_cs *ring)
+                                struct intel_engine_cs *engine)
 {
-       if (ring->get_seqno) {
+       if (engine->get_seqno) {
                seq_printf(m, "Current sequence (%s): %x\n",
-                          ring->name, ring->get_seqno(ring, false));
+                          engine->name, engine->get_seqno(engine, false));
        }
 }
 
@@ -1992,22 +1992,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
 static void i915_dump_lrc_obj(struct seq_file *m,
                              struct intel_context *ctx,
-                             struct intel_engine_cs *ring)
+                             struct intel_engine_cs *engine)
 {
        struct page *page;
        uint32_t *reg_state;
        int j;
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
        unsigned long ggtt_offset = 0;
 
        if (ctx_obj == NULL) {
                seq_printf(m, "Context on %s with no gem object\n",
-                          ring->name);
+                          engine->name);
                return;
        }
 
-       seq_printf(m, "CONTEXT: %s %u\n", ring->name,
-                  intel_execlists_ctx_id(ctx, ring));
+       seq_printf(m, "CONTEXT: %s %u\n", engine->name,
+                  intel_execlists_ctx_id(ctx, engine));
 
        if (!i915_gem_obj_ggtt_bound(ctx_obj))
                seq_puts(m, "\tNot bound in GGTT\n");
index 80b14f1ba302053004f2a72e63393bdf9dba363b..8d87242ce601cb0e4b53768076317e320c84685d 100644 (file)
@@ -2964,10 +2964,10 @@ int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring);
+i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
 
@@ -3297,10 +3297,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
 /* i915_cmd_parser.c */
 int i915_cmd_parser_get_version(void);
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
+int i915_parse_cmds(struct intel_engine_cs *engine,
                    struct drm_i915_gem_object *batch_obj,
                    struct drm_i915_gem_object *shadow_batch_obj,
                    u32 batch_start_offset,
@@ -3571,11 +3571,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
        }
 }
 
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
                                      struct drm_i915_gem_request *req)
 {
-       if (ring->trace_irq_req == NULL && ring->irq_get(ring))
-               i915_gem_request_assign(&ring->trace_irq_req, req);
+       if (engine->trace_irq_req == NULL && engine->irq_get(engine))
+               i915_gem_request_assign(&engine->trace_irq_req, req);
 }
 
 #endif
index 5a7f6032f0664aaf993d80a63eee80f336aa7b1d..1119b8f46f09b09855729ce86cdef4c424d14852 100644 (file)
@@ -1141,9 +1141,9 @@ static void fake_irq(unsigned long data)
 }
 
 static bool missed_irq(struct drm_i915_private *dev_priv,
-                      struct intel_engine_cs *ring)
+                      struct intel_engine_cs *engine)
 {
-       return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+       return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
 }
 
 static unsigned long local_clock_us(unsigned *cpu)
@@ -2689,11 +2689,11 @@ void i915_gem_request_free(struct kref *req_ref)
 }
 
 static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *ring,
+__i915_gem_request_alloc(struct intel_engine_cs *engine,
                         struct intel_context *ctx,
                         struct drm_i915_gem_request **req_out)
 {
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct drm_i915_private *dev_priv = to_i915(engine->dev);
        struct drm_i915_gem_request *req;
        int ret;
 
@@ -2706,13 +2706,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
        if (req == NULL)
                return -ENOMEM;
 
-       ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+       ret = i915_gem_get_seqno(engine->dev, &req->seqno);
        if (ret)
                goto err;
 
        kref_init(&req->ref);
        req->i915 = dev_priv;
-       req->ring = ring;
+       req->ring = engine;
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
 
@@ -2787,11 +2787,11 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 }
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring)
+i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
 
-       list_for_each_entry(request, &ring->request_list, list) {
+       list_for_each_entry(request, &engine->request_list, list) {
                if (i915_gem_request_completed(request, false))
                        continue;
 
@@ -2802,37 +2802,37 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
 }
 
 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
-                                      struct intel_engine_cs *ring)
+                                      struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
        bool ring_hung;
 
-       request = i915_gem_find_active_request(ring);
+       request = i915_gem_find_active_request(engine);
 
        if (request == NULL)
                return;
 
-       ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+       ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
        i915_set_reset_status(dev_priv, request->ctx, ring_hung);
 
-       list_for_each_entry_continue(request, &ring->request_list, list)
+       list_for_each_entry_continue(request, &engine->request_list, list)
                i915_set_reset_status(dev_priv, request->ctx, false);
 }
 
 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
-                                       struct intel_engine_cs *ring)
+                                       struct intel_engine_cs *engine)
 {
        struct intel_ringbuffer *buffer;
 
-       while (!list_empty(&ring->active_list)) {
+       while (!list_empty(&engine->active_list)) {
                struct drm_i915_gem_object *obj;
 
-               obj = list_first_entry(&ring->active_list,
+               obj = list_first_entry(&engine->active_list,
                                       struct drm_i915_gem_object,
-                                      ring_list[ring->id]);
+                                      ring_list[engine->id]);
 
-               i915_gem_object_retire__read(obj, ring->id);
+               i915_gem_object_retire__read(obj, engine->id);
        }
 
        /*
@@ -2842,14 +2842,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
         */
 
        if (i915.enable_execlists) {
-               spin_lock_irq(&ring->execlist_lock);
+               spin_lock_irq(&engine->execlist_lock);
 
                /* list_splice_tail_init checks for empty lists */
-               list_splice_tail_init(&ring->execlist_queue,
-                                     &ring->execlist_retired_req_list);
+               list_splice_tail_init(&engine->execlist_queue,
+                                     &engine->execlist_retired_req_list);
 
-               spin_unlock_irq(&ring->execlist_lock);
-               intel_execlists_retire_requests(ring);
+               spin_unlock_irq(&engine->execlist_lock);
+               intel_execlists_retire_requests(engine);
        }
 
        /*
@@ -2859,10 +2859,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
         * implicit references on things like e.g. ppgtt address spaces through
         * the request.
         */
-       while (!list_empty(&ring->request_list)) {
+       while (!list_empty(&engine->request_list)) {
                struct drm_i915_gem_request *request;
 
-               request = list_first_entry(&ring->request_list,
+               request = list_first_entry(&engine->request_list,
                                           struct drm_i915_gem_request,
                                           list);
 
@@ -2876,7 +2876,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
         * upon reset is less than when we start. Do one more pass over
         * all the ringbuffers to reset last_retired_head.
         */
-       list_for_each_entry(buffer, &ring->buffers, link) {
+       list_for_each_entry(buffer, &engine->buffers, link) {
                buffer->last_retired_head = buffer->tail;
                intel_ring_update_space(buffer);
        }
@@ -2910,19 +2910,19 @@ void i915_gem_reset(struct drm_device *dev)
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
+i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 {
-       WARN_ON(i915_verify_lists(ring->dev));
+       WARN_ON(i915_verify_lists(engine->dev));
 
        /* Retire requests first as we use it above for the early return.
         * If we retire requests last, we may use a later seqno and so clear
         * the requests lists without clearing the active list, leading to
         * confusion.
         */
-       while (!list_empty(&ring->request_list)) {
+       while (!list_empty(&engine->request_list)) {
                struct drm_i915_gem_request *request;
 
-               request = list_first_entry(&ring->request_list,
+               request = list_first_entry(&engine->request_list,
                                           struct drm_i915_gem_request,
                                           list);
 
@@ -2936,26 +2936,26 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
         * by the ringbuffer to the flushing/inactive lists as appropriate,
         * before we free the context associated with the requests.
         */
-       while (!list_empty(&ring->active_list)) {
+       while (!list_empty(&engine->active_list)) {
                struct drm_i915_gem_object *obj;
 
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list[ring->id]);
+               obj = list_first_entry(&engine->active_list,
+                                      struct drm_i915_gem_object,
+                                      ring_list[engine->id]);
 
-               if (!list_empty(&obj->last_read_req[ring->id]->list))
+               if (!list_empty(&obj->last_read_req[engine->id]->list))
                        break;
 
-               i915_gem_object_retire__read(obj, ring->id);
+               i915_gem_object_retire__read(obj, engine->id);
        }
 
-       if (unlikely(ring->trace_irq_req &&
-                    i915_gem_request_completed(ring->trace_irq_req, true))) {
-               ring->irq_put(ring);
-               i915_gem_request_assign(&ring->trace_irq_req, NULL);
+       if (unlikely(engine->trace_irq_req &&
+                    i915_gem_request_completed(engine->trace_irq_req, true))) {
+               engine->irq_put(engine);
+               i915_gem_request_assign(&engine->trace_irq_req, NULL);
        }
 
-       WARN_ON(i915_verify_lists(ring->dev));
+       WARN_ON(i915_verify_lists(engine->dev));
 }
 
 bool
@@ -5022,10 +5022,10 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 }
 
 static void
-init_ring_lists(struct intel_engine_cs *ring)
+init_ring_lists(struct intel_engine_cs *engine)
 {
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&engine->active_list);
+       INIT_LIST_HEAD(&engine->request_list);
 }
 
 void
index cc07666c2d91f6e398a6ed16326119b72ec0d8c5..44f582988094c51787b8bc9db5d4fc4ad49bd9f3 100644 (file)
@@ -600,7 +600,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
        return ret;
 }
 
-static inline bool should_skip_switch(struct intel_engine_cs *ring,
+static inline bool should_skip_switch(struct intel_engine_cs *engine,
                                      struct intel_context *from,
                                      struct intel_context *to)
 {
@@ -608,42 +608,42 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
                return false;
 
        if (to->ppgtt && from == to &&
-           !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
+           !(intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings))
                return true;
 
        return false;
 }
 
 static bool
-needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
        if (!to->ppgtt)
                return false;
 
-       if (INTEL_INFO(ring->dev)->gen < 8)
+       if (INTEL_INFO(engine->dev)->gen < 8)
                return true;
 
-       if (ring != &dev_priv->ring[RCS])
+       if (engine != &dev_priv->ring[RCS])
                return true;
 
        return false;
 }
 
 static bool
-needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
-               u32 hw_flags)
+needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
+                  u32 hw_flags)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
        if (!to->ppgtt)
                return false;
 
-       if (!IS_GEN8(ring->dev))
+       if (!IS_GEN8(engine->dev))
                return false;
 
-       if (ring != &dev_priv->ring[RCS])
+       if (engine != &dev_priv->ring[RCS])
                return false;
 
        if (hw_flags & MI_RESTORE_INHIBIT)
index b73496ea558353cb3b4092ca0014ae33ba78af5d..f94d756828e888ecdaa1db62a98d40e84cb0bc21 100644 (file)
@@ -599,7 +599,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
 
 static int
 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
-                               struct intel_engine_cs *ring,
+                               struct intel_engine_cs *engine,
                                bool *need_reloc)
 {
        struct drm_i915_gem_object *obj = vma->obj;
@@ -713,7 +713,7 @@ eb_vma_misplaced(struct i915_vma *vma)
 }
 
 static int
-i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
+i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
                            struct list_head *vmas,
                            struct intel_context *ctx,
                            bool *need_relocs)
@@ -723,10 +723,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
        struct i915_address_space *vm;
        struct list_head ordered_vmas;
        struct list_head pinned_vmas;
-       bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+       bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
        int retry;
 
-       i915_gem_retire_requests_ring(ring);
+       i915_gem_retire_requests_ring(engine);
 
        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
@@ -788,7 +788,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                        if (eb_vma_misplaced(vma))
                                ret = i915_vma_unbind(vma);
                        else
-                               ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+                               ret = i915_gem_execbuffer_reserve_vma(vma,
+                                                                     engine,
+                                                                     need_relocs);
                        if (ret)
                                goto err;
                }
@@ -798,7 +800,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                        if (drm_mm_node_allocated(&vma->node))
                                continue;
 
-                       ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+                       ret = i915_gem_execbuffer_reserve_vma(vma, engine,
+                                                             need_relocs);
                        if (ret)
                                goto err;
                }
@@ -821,7 +824,7 @@ static int
 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct drm_i915_gem_execbuffer2 *args,
                                  struct drm_file *file,
-                                 struct intel_engine_cs *ring,
+                                 struct intel_engine_cs *engine,
                                  struct eb_vmas *eb,
                                  struct drm_i915_gem_exec_object2 *exec,
                                  struct intel_context *ctx)
@@ -910,7 +913,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                goto err;
 
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+                                         &need_relocs);
        if (ret)
                goto err;
 
@@ -1062,12 +1066,12 @@ validate_exec_list(struct drm_device *dev,
 
 static struct intel_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
-                         struct intel_engine_cs *ring, const u32 ctx_id)
+                         struct intel_engine_cs *engine, const u32 ctx_id)
 {
        struct intel_context *ctx = NULL;
        struct i915_ctx_hang_stats *hs;
 
-       if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
+       if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
                return ERR_PTR(-EINVAL);
 
        ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -1080,8 +1084,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
                return ERR_PTR(-EIO);
        }
 
-       if (i915.enable_execlists && !ctx->engine[ring->id].state) {
-               int ret = intel_lr_context_deferred_alloc(ctx, ring);
+       if (i915.enable_execlists && !ctx->engine[engine->id].state) {
+               int ret = intel_lr_context_deferred_alloc(ctx, engine);
                if (ret) {
                        DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
                        return ERR_PTR(ret);
@@ -1171,7 +1175,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
 }
 
 static struct drm_i915_gem_object*
-i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
                          struct drm_i915_gem_exec_object2 *shadow_exec_entry,
                          struct eb_vmas *eb,
                          struct drm_i915_gem_object *batch_obj,
@@ -1183,12 +1187,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
        struct i915_vma *vma;
        int ret;
 
-       shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
+       shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
                                                   PAGE_ALIGN(batch_len));
        if (IS_ERR(shadow_batch_obj))
                return shadow_batch_obj;
 
-       ret = i915_parse_cmds(ring,
+       ret = i915_parse_cmds(engine,
                              batch_obj,
                              shadow_batch_obj,
                              batch_start_offset,
index fc7e6d5c625102492a6b4c2343f4c9f73192f496..b21f72ec895cebc3e15ac835a98ea3458d6be4a9 100644 (file)
@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so)
        drm_gem_object_unreference(&so->obj->base);
 }
 
-int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
                                  struct render_state *so)
 {
        int ret;
 
-       if (WARN_ON(ring->id != RCS))
+       if (WARN_ON(engine->id != RCS))
                return -ENOENT;
 
-       ret = render_state_init(so, ring->dev);
+       ret = render_state_init(so, engine->dev);
        if (ret)
                return ret;
 
index e641bb093a903bba18e02cd0cd156e6070a40a62..6aaa3a10a6309e2f79539fe4df2a79714ea97f26 100644 (file)
@@ -43,7 +43,7 @@ struct render_state {
 
 int i915_gem_render_state_init(struct drm_i915_gem_request *req);
 void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
                                  struct render_state *so);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
index d97cadcfccb16a934d971122453aeed9a2e69e4f..029ed4031edf2b2ce478eb53f469d49e0761eeec 100644 (file)
@@ -842,7 +842,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
 
 static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
                                        struct drm_i915_error_state *error,
-                                       struct intel_engine_cs *ring,
+                                       struct intel_engine_cs *engine,
                                        struct drm_i915_error_ring *ering)
 {
        struct intel_engine_cs *to;
@@ -861,63 +861,64 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
                u16 signal_offset;
                u32 *tmp;
 
-               if (ring == to)
+               if (engine == to)
                        continue;
 
-               signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+               signal_offset = (GEN8_SIGNAL_OFFSET(engine, i) & (PAGE_SIZE - 1))
                                / 4;
                tmp = error->semaphore_obj->pages[0];
-               idx = intel_ring_sync_index(ring, to);
+               idx = intel_ring_sync_index(engine, to);
 
                ering->semaphore_mboxes[idx] = tmp[signal_offset];
-               ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+               ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
        }
 }
 
 static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
-                                       struct intel_engine_cs *ring,
+                                       struct intel_engine_cs *engine,
                                        struct drm_i915_error_ring *ering)
 {
-       ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
-       ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
-       ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
-       ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+       ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
+       ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
+       ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
+       ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
 
        if (HAS_VEBOX(dev_priv->dev)) {
                ering->semaphore_mboxes[2] =
-                       I915_READ(RING_SYNC_2(ring->mmio_base));
-               ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+                       I915_READ(RING_SYNC_2(engine->mmio_base));
+               ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
        }
 }
 
 static void i915_record_ring_state(struct drm_device *dev,
                                   struct drm_i915_error_state *error,
-                                  struct intel_engine_cs *ring,
+                                  struct intel_engine_cs *engine,
                                   struct drm_i915_error_ring *ering)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (INTEL_INFO(dev)->gen >= 6) {
-               ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
-               ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+               ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
+               ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
                if (INTEL_INFO(dev)->gen >= 8)
-                       gen8_record_semaphore_state(dev_priv, error, ring, ering);
+                       gen8_record_semaphore_state(dev_priv, error, engine,
+                                                   ering);
                else
-                       gen6_record_semaphore_state(dev_priv, ring, ering);
+                       gen6_record_semaphore_state(dev_priv, engine, ering);
        }
 
        if (INTEL_INFO(dev)->gen >= 4) {
-               ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
-               ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
-               ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
-               ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
-               ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
-               ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
+               ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+               ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
+               ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+               ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
+               ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
+               ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
                if (INTEL_INFO(dev)->gen >= 8) {
-                       ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
-                       ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+                       ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
+                       ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
                }
-               ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
+               ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
        } else {
                ering->faddr = I915_READ(DMA_FADD_I8XX);
                ering->ipeir = I915_READ(IPEIR);
@@ -925,20 +926,20 @@ static void i915_record_ring_state(struct drm_device *dev,
                ering->instdone = I915_READ(GEN2_INSTDONE);
        }
 
-       ering->waiting = waitqueue_active(&ring->irq_queue);
-       ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
-       ering->seqno = ring->get_seqno(ring, false);
-       ering->acthd = intel_ring_get_active_head(ring);
-       ering->start = I915_READ_START(ring);
-       ering->head = I915_READ_HEAD(ring);
-       ering->tail = I915_READ_TAIL(ring);
-       ering->ctl = I915_READ_CTL(ring);
+       ering->waiting = waitqueue_active(&engine->irq_queue);
+       ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+       ering->seqno = engine->get_seqno(engine, false);
+       ering->acthd = intel_ring_get_active_head(engine);
+       ering->start = I915_READ_START(engine);
+       ering->head = I915_READ_HEAD(engine);
+       ering->tail = I915_READ_TAIL(engine);
+       ering->ctl = I915_READ_CTL(engine);
 
        if (I915_NEED_GFX_HWS(dev)) {
                i915_reg_t mmio;
 
                if (IS_GEN7(dev)) {
-                       switch (ring->id) {
+                       switch (engine->id) {
                        default:
                        case RCS:
                                mmio = RENDER_HWS_PGA_GEN7;
@@ -953,51 +954,51 @@ static void i915_record_ring_state(struct drm_device *dev,
                                mmio = VEBOX_HWS_PGA_GEN7;
                                break;
                        }
-               } else if (IS_GEN6(ring->dev)) {
-                       mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+               } else if (IS_GEN6(engine->dev)) {
+                       mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
                } else {
                        /* XXX: gen8 returns to sanity */
-                       mmio = RING_HWS_PGA(ring->mmio_base);
+                       mmio = RING_HWS_PGA(engine->mmio_base);
                }
 
                ering->hws = I915_READ(mmio);
        }
 
-       ering->hangcheck_score = ring->hangcheck.score;
-       ering->hangcheck_action = ring->hangcheck.action;
+       ering->hangcheck_score = engine->hangcheck.score;
+       ering->hangcheck_action = engine->hangcheck.action;
 
        if (USES_PPGTT(dev)) {
                int i;
 
-               ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
+               ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
                if (IS_GEN6(dev))
                        ering->vm_info.pp_dir_base =
-                               I915_READ(RING_PP_DIR_BASE_READ(ring));
+                               I915_READ(RING_PP_DIR_BASE_READ(engine));
                else if (IS_GEN7(dev))
                        ering->vm_info.pp_dir_base =
-                               I915_READ(RING_PP_DIR_BASE(ring));
+                               I915_READ(RING_PP_DIR_BASE(engine));
                else if (INTEL_INFO(dev)->gen >= 8)
                        for (i = 0; i < 4; i++) {
                                ering->vm_info.pdp[i] =
-                                       I915_READ(GEN8_RING_PDP_UDW(ring, i));
+                                       I915_READ(GEN8_RING_PDP_UDW(engine, i));
                                ering->vm_info.pdp[i] <<= 32;
                                ering->vm_info.pdp[i] |=
-                                       I915_READ(GEN8_RING_PDP_LDW(ring, i));
+                                       I915_READ(GEN8_RING_PDP_LDW(engine, i));
                        }
        }
 }
 
 
-static void i915_gem_record_active_context(struct intel_engine_cs *ring,
+static void i915_gem_record_active_context(struct intel_engine_cs *engine,
                                           struct drm_i915_error_state *error,
                                           struct drm_i915_error_ring *ering)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        struct drm_i915_gem_object *obj;
 
        /* Currently render ring is the only HW context user */
-       if (ring->id != RCS || !error->ccid)
+       if (engine->id != RCS || !error->ccid)
                return;
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
index f172de0a61bfb8abc3738484967d60c6cd55b482..64658961a7e59039588c1dac14c50c66826fe158 100644 (file)
@@ -994,14 +994,14 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
        return;
 }
 
-static void notify_ring(struct intel_engine_cs *ring)
+static void notify_ring(struct intel_engine_cs *engine)
 {
-       if (!intel_ring_initialized(ring))
+       if (!intel_ring_initialized(engine))
                return;
 
-       trace_i915_gem_request_notify(ring);
+       trace_i915_gem_request_notify(engine);
 
-       wake_up_all(&ring->irq_queue);
+       wake_up_all(&engine->irq_queue);
 }
 
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1319,12 +1319,12 @@ static void snb_gt_irq_handler(struct drm_device *dev,
 }
 
 static __always_inline void
-gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
+gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
 {
        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
-               notify_ring(ring);
+               notify_ring(engine);
        if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
-               intel_lrc_irq_handler(ring);
+               intel_lrc_irq_handler(engine);
 }
 
 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
@@ -2805,10 +2805,10 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 }
 
 static bool
-ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *engine, u32 seqno)
 {
-       return (list_empty(&ring->request_list) ||
-               i915_seqno_passed(seqno, ring->last_submitted_seqno));
+       return (list_empty(&engine->request_list) ||
+               i915_seqno_passed(seqno, engine->last_submitted_seqno));
 }
 
 static bool
@@ -2824,42 +2824,43 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
 }
 
 static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+                                u64 offset)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        struct intel_engine_cs *signaller;
        int i;
 
        if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
                for_each_ring(signaller, dev_priv, i) {
-                       if (ring == signaller)
+                       if (engine == signaller)
                                continue;
 
-                       if (offset == signaller->semaphore.signal_ggtt[ring->id])
+                       if (offset == signaller->semaphore.signal_ggtt[engine->id])
                                return signaller;
                }
        } else {
                u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
                for_each_ring(signaller, dev_priv, i) {
-                       if(ring == signaller)
+                       if(engine == signaller)
                                continue;
 
-                       if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
+                       if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
                                return signaller;
                }
        }
 
        DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
-                 ring->id, ipehr, offset);
+                 engine->id, ipehr, offset);
 
        return NULL;
 }
 
 static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        u32 cmd, ipehr, head;
        u64 offset = 0;
        int i, backwards;
@@ -2881,11 +2882,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
         * Therefore, this function does not support execlist mode in its
         * current form. Just return NULL and move on.
         */
-       if (ring->buffer == NULL)
+       if (engine->buffer == NULL)
                return NULL;
 
-       ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
-       if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
+       ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+       if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
                return NULL;
 
        /*
@@ -2896,8 +2897,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
         * point at at batch, and semaphores are always emitted into the
         * ringbuffer itself.
         */
-       head = I915_READ_HEAD(ring) & HEAD_ADDR;
-       backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
+       head = I915_READ_HEAD(engine) & HEAD_ADDR;
+       backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
 
        for (i = backwards; i; --i) {
                /*
@@ -2905,10 +2906,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
                 * our ring is smaller than what the hardware (and hence
                 * HEAD_ADDR) allows. Also handles wrap-around.
                 */
-               head &= ring->buffer->size - 1;
+               head &= engine->buffer->size - 1;
 
                /* This here seems to blow up */
-               cmd = ioread32(ring->buffer->virtual_start + head);
+               cmd = ioread32(engine->buffer->virtual_start + head);
                if (cmd == ipehr)
                        break;
 
@@ -2918,24 +2919,24 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
        if (!i)
                return NULL;
 
-       *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
-       if (INTEL_INFO(ring->dev)->gen >= 8) {
-               offset = ioread32(ring->buffer->virtual_start + head + 12);
+       *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+       if (INTEL_INFO(engine->dev)->gen >= 8) {
+               offset = ioread32(engine->buffer->virtual_start + head + 12);
                offset <<= 32;
-               offset = ioread32(ring->buffer->virtual_start + head + 8);
+               offset = ioread32(engine->buffer->virtual_start + head + 8);
        }
-       return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
+       return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
 }
 
-static int semaphore_passed(struct intel_engine_cs *ring)
+static int semaphore_passed(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        struct intel_engine_cs *signaller;
        u32 seqno;
 
-       ring->hangcheck.deadlock++;
+       engine->hangcheck.deadlock++;
 
-       signaller = semaphore_waits_for(ring, &seqno);
+       signaller = semaphore_waits_for(engine, &seqno);
        if (signaller == NULL)
                return -1;
 
@@ -2963,16 +2964,16 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
                engine->hangcheck.deadlock = 0;
 }
 
-static bool subunits_stuck(struct intel_engine_cs *ring)
+static bool subunits_stuck(struct intel_engine_cs *engine)
 {
        u32 instdone[I915_NUM_INSTDONE_REG];
        bool stuck;
        int i;
 
-       if (ring->id != RCS)
+       if (engine->id != RCS)
                return true;
 
-       i915_get_extra_instdone(ring->dev, instdone);
+       i915_get_extra_instdone(engine->dev, instdone);
 
        /* There might be unstable subunit states even when
         * actual head is not moving. Filter out the unstable ones by
@@ -2981,44 +2982,44 @@ static bool subunits_stuck(struct intel_engine_cs *ring)
         */
        stuck = true;
        for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
-               const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
+               const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
 
-               if (tmp != ring->hangcheck.instdone[i])
+               if (tmp != engine->hangcheck.instdone[i])
                        stuck = false;
 
-               ring->hangcheck.instdone[i] |= tmp;
+               engine->hangcheck.instdone[i] |= tmp;
        }
 
        return stuck;
 }
 
 static enum intel_ring_hangcheck_action
-head_stuck(struct intel_engine_cs *ring, u64 acthd)
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-       if (acthd != ring->hangcheck.acthd) {
+       if (acthd != engine->hangcheck.acthd) {
 
                /* Clear subunit states on head movement */
-               memset(ring->hangcheck.instdone, 0,
-                      sizeof(ring->hangcheck.instdone));
+               memset(engine->hangcheck.instdone, 0,
+                      sizeof(engine->hangcheck.instdone));
 
                return HANGCHECK_ACTIVE;
        }
 
-       if (!subunits_stuck(ring))
+       if (!subunits_stuck(engine))
                return HANGCHECK_ACTIVE;
 
        return HANGCHECK_HUNG;
 }
 
 static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum intel_ring_hangcheck_action ha;
        u32 tmp;
 
-       ha = head_stuck(ring, acthd);
+       ha = head_stuck(engine, acthd);
        if (ha != HANGCHECK_HUNG)
                return ha;
 
@@ -3030,24 +3031,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
         * and break the hang. This should work on
         * all but the second generation chipsets.
         */
-       tmp = I915_READ_CTL(ring);
+       tmp = I915_READ_CTL(engine);
        if (tmp & RING_WAIT) {
                i915_handle_error(dev, false,
                                  "Kicking stuck wait on %s",
-                                 ring->name);
-               I915_WRITE_CTL(ring, tmp);
+                                 engine->name);
+               I915_WRITE_CTL(engine, tmp);
                return HANGCHECK_KICK;
        }
 
        if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
-               switch (semaphore_passed(ring)) {
+               switch (semaphore_passed(engine)) {
                default:
                        return HANGCHECK_HUNG;
                case 1:
                        i915_handle_error(dev, false,
                                          "Kicking stuck semaphore on %s",
-                                         ring->name);
-                       I915_WRITE_CTL(ring, tmp);
+                                         engine->name);
+                       I915_WRITE_CTL(engine, tmp);
                        return HANGCHECK_KICK;
                case 0:
                        return HANGCHECK_WAIT;
index e95f2b7ed96242a5d201425ac38a7e5e3103df27..317b55b0b596344c4e64e7ab44440357fbde3d37 100644 (file)
@@ -11214,7 +11214,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        return 0;
 }
 
-static bool use_mmio_flip(struct intel_engine_cs *ring,
+static bool use_mmio_flip(struct intel_engine_cs *engine,
                          struct drm_i915_gem_object *obj)
 {
        /*
@@ -11225,10 +11225,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
         * So using MMIO flips there would disrupt this mechanism.
         */
 
-       if (ring == NULL)
+       if (engine == NULL)
                return true;
 
-       if (INTEL_INFO(ring->dev)->gen < 5)
+       if (INTEL_INFO(engine->dev)->gen < 5)
                return false;
 
        if (i915.use_mmio_flip < 0)
@@ -11242,7 +11242,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
                                                       false))
                return true;
        else
-               return ring != i915_gem_request_get_ring(obj->last_write_req);
+               return engine != i915_gem_request_get_ring(obj->last_write_req);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
index 448c68e691943fd4be35b64be8aff94c1ed8cc17..25514e91479a98319e1b0293b4d9308f87d1b8df 100644 (file)
@@ -228,8 +228,8 @@ enum {
 
 static int intel_lr_context_pin(struct intel_context *ctx,
                                struct intel_engine_cs *engine);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-               struct drm_i915_gem_object *default_ctx_obj);
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
+                                          struct drm_i915_gem_object *default_ctx_obj);
 
 
 /**
@@ -266,23 +266,23 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
 }
 
 static void
-logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
 
        if (IS_GEN8(dev) || IS_GEN9(dev))
-               ring->idle_lite_restore_wa = ~0;
+               engine->idle_lite_restore_wa = ~0;
 
-       ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+       engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
                                        IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
-                                       (ring->id == VCS || ring->id == VCS2);
+                                       (engine->id == VCS || engine->id == VCS2);
 
-       ring->ctx_desc_template = GEN8_CTX_VALID;
-       ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+       engine->ctx_desc_template = GEN8_CTX_VALID;
+       engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
                                   GEN8_CTX_ADDRESSING_MODE_SHIFT;
        if (IS_GEN8(dev))
-               ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
-       ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+               engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+       engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
 
        /* TODO: WaDisableLiteRestore when we start using semaphore
         * signalling between Command Streamers */
@@ -290,8 +290,8 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
 
        /* WaEnableForceRestoreInCtxtDescForVCS:skl */
        /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
-       if (ring->disable_lite_restore_wa)
-               ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+       if (engine->disable_lite_restore_wa)
+               engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
 }
 
 /**
@@ -314,24 +314,24 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
  */
 static void
 intel_lr_context_descriptor_update(struct intel_context *ctx,
-                                  struct intel_engine_cs *ring)
+                                  struct intel_engine_cs *engine)
 {
        uint64_t lrca, desc;
 
-       lrca = ctx->engine[ring->id].lrc_vma->node.start +
+       lrca = ctx->engine[engine->id].lrc_vma->node.start +
               LRC_PPHWSP_PN * PAGE_SIZE;
 
-       desc = ring->ctx_desc_template;                    /* bits  0-11 */
+       desc = engine->ctx_desc_template;                          /* bits  0-11 */
        desc |= lrca;                                      /* bits 12-31 */
        desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
 
-       ctx->engine[ring->id].lrc_desc = desc;
+       ctx->engine[engine->id].lrc_desc = desc;
 }
 
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
-                                    struct intel_engine_cs *ring)
+                                    struct intel_engine_cs *engine)
 {
-       return ctx->engine[ring->id].lrc_desc;
+       return ctx->engine[engine->id].lrc_desc;
 }
 
 /**
@@ -351,9 +351,9 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
  * Return: 20-bits globally unique context ID.
  */
 u32 intel_execlists_ctx_id(struct intel_context *ctx,
-                          struct intel_engine_cs *ring)
+                          struct intel_engine_cs *engine)
 {
-       return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
+       return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
 }
 
 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
@@ -424,21 +424,21 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
        execlists_elsp_write(rq0, rq1);
 }
 
-static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
+static void execlists_context_unqueue__locked(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
        struct drm_i915_gem_request *cursor, *tmp;
 
-       assert_spin_locked(&ring->execlist_lock);
+       assert_spin_locked(&engine->execlist_lock);
 
        /*
         * If irqs are not active generate a warning as batches that finish
         * without the irqs may get lost and a GPU Hang may occur.
         */
-       WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
+       WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
 
        /* Try to read in pairs */
-       list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+       list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
                                 execlist_link) {
                if (!req0) {
                        req0 = cursor;
@@ -447,7 +447,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
                         * will update tail past first request's workload */
                        cursor->elsp_submitted = req0->elsp_submitted;
                        list_move_tail(&req0->execlist_link,
-                                      &ring->execlist_retired_req_list);
+                                      &engine->execlist_retired_req_list);
                        req0 = cursor;
                } else {
                        req1 = cursor;
@@ -459,7 +459,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
        if (unlikely(!req0))
                return;
 
-       if (req0->elsp_submitted & ring->idle_lite_restore_wa) {
+       if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
                /*
                 * WaIdleLiteRestore: make sure we never cause a lite restore
                 * with HEAD==TAIL.
@@ -470,7 +470,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
                 */
                struct intel_ringbuffer *ringbuf;
 
-               ringbuf = req0->ctx->engine[ring->id].ringbuf;
+               ringbuf = req0->ctx->engine[engine->id].ringbuf;
                req0->tail += 8;
                req0->tail &= ringbuf->size - 1;
        }
@@ -478,34 +478,34 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
        execlists_submit_requests(req0, req1);
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *ring)
+static void execlists_context_unqueue(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
        spin_lock(&dev_priv->uncore.lock);
        intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
 
-       execlists_context_unqueue__locked(ring);
+       execlists_context_unqueue__locked(engine);
 
        intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
        spin_unlock(&dev_priv->uncore.lock);
 }
 
 static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *ring, u32 request_id)
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
 {
        struct drm_i915_gem_request *head_req;
 
-       assert_spin_locked(&ring->execlist_lock);
+       assert_spin_locked(&engine->execlist_lock);
 
-       head_req = list_first_entry_or_null(&ring->execlist_queue,
+       head_req = list_first_entry_or_null(&engine->execlist_queue,
                                            struct drm_i915_gem_request,
                                            execlist_link);
 
        if (!head_req)
                return 0;
 
-       if (unlikely(intel_execlists_ctx_id(head_req->ctx, ring) != request_id))
+       if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
                return 0;
 
        WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
@@ -514,26 +514,26 @@ execlists_check_remove_request(struct intel_engine_cs *ring, u32 request_id)
                return 0;
 
        list_move_tail(&head_req->execlist_link,
-                      &ring->execlist_retired_req_list);
+                      &engine->execlist_retired_req_list);
 
        return 1;
 }
 
 static u32
-get_context_status(struct intel_engine_cs *ring, unsigned int read_pointer,
+get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
                   u32 *context_id)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        u32 status;
 
        read_pointer %= GEN8_CSB_ENTRIES;
 
-       status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
+       status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
 
        if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
                return 0;
 
-       *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(ring,
+       *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
                                                              read_pointer));
 
        return status;
@@ -546,33 +546,34 @@ get_context_status(struct intel_engine_cs *ring, unsigned int read_pointer,
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
  */
-void intel_lrc_irq_handler(struct intel_engine_cs *ring)
+void intel_lrc_irq_handler(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        u32 status_pointer;
        unsigned int read_pointer, write_pointer;
        u32 status = 0;
        u32 status_id;
        unsigned int submit_contexts = 0;
 
-       spin_lock(&ring->execlist_lock);
+       spin_lock(&engine->execlist_lock);
 
        spin_lock(&dev_priv->uncore.lock);
        intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
 
-       status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(ring));
+       status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
 
-       read_pointer = ring->next_context_status_buffer;
+       read_pointer = engine->next_context_status_buffer;
        write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
        if (read_pointer > write_pointer)
                write_pointer += GEN8_CSB_ENTRIES;
 
        while (read_pointer < write_pointer) {
-               status = get_context_status(ring, ++read_pointer, &status_id);
+               status = get_context_status(engine, ++read_pointer,
+                                           &status_id);
 
                if (unlikely(status & GEN8_CTX_STATUS_PREEMPTED)) {
                        if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
-                               if (execlists_check_remove_request(ring, status_id))
+                               if (execlists_check_remove_request(engine, status_id))
                                        WARN(1, "Lite Restored request removed from queue\n");
                        } else
                                WARN(1, "Preemption without Lite Restore\n");
@@ -581,27 +582,28 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
                if (status & (GEN8_CTX_STATUS_ACTIVE_IDLE |
                    GEN8_CTX_STATUS_ELEMENT_SWITCH))
                        submit_contexts +=
-                               execlists_check_remove_request(ring, status_id);
+                               execlists_check_remove_request(engine,
+                                                              status_id);
        }
 
        if (submit_contexts) {
-               if (!ring->disable_lite_restore_wa ||
+               if (!engine->disable_lite_restore_wa ||
                    (status & GEN8_CTX_STATUS_ACTIVE_IDLE))
-                       execlists_context_unqueue__locked(ring);
+                       execlists_context_unqueue__locked(engine);
        }
 
-       ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
+       engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
        /* Update the read pointer to the old write pointer. Manual ringbuffer
         * management ftw </sarcasm> */
-       I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(ring),
+       I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
                      _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
-                                   ring->next_context_status_buffer << 8));
+                                   engine->next_context_status_buffer << 8));
 
        intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
        spin_unlock(&dev_priv->uncore.lock);
 
-       spin_unlock(&ring->execlist_lock);
+       spin_unlock(&engine->execlist_lock);
 
        if (unlikely(submit_contexts > 2))
                DRM_ERROR("More than two context complete events?\n");
@@ -1020,53 +1022,53 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        return 0;
 }
 
-void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+void intel_execlists_retire_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req, *tmp;
        struct list_head retired_list;
 
-       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-       if (list_empty(&ring->execlist_retired_req_list))
+       WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+       if (list_empty(&engine->execlist_retired_req_list))
                return;
 
        INIT_LIST_HEAD(&retired_list);
-       spin_lock_irq(&ring->execlist_lock);
-       list_replace_init(&ring->execlist_retired_req_list, &retired_list);
-       spin_unlock_irq(&ring->execlist_lock);
+       spin_lock_irq(&engine->execlist_lock);
+       list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+       spin_unlock_irq(&engine->execlist_lock);
 
        list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
                struct intel_context *ctx = req->ctx;
                struct drm_i915_gem_object *ctx_obj =
-                               ctx->engine[ring->id].state;
+                               ctx->engine[engine->id].state;
 
                if (ctx_obj && (ctx != req->i915->kernel_context))
-                       intel_lr_context_unpin(ctx, ring);
+                       intel_lr_context_unpin(ctx, engine);
 
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
 }
 
-void intel_logical_ring_stop(struct intel_engine_cs *ring)
+void intel_logical_ring_stop(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        int ret;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_ring_initialized(engine))
                return;
 
-       ret = intel_ring_idle(ring);
-       if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+       ret = intel_ring_idle(engine);
+       if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-                         ring->name, ret);
+                         engine->name, ret);
 
        /* TODO: Is this correct with Execlists enabled? */
-       I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-       if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-               DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+       I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+       if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+               DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
                return;
        }
-       I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+       I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
@@ -1086,17 +1088,17 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 }
 
 static int intel_lr_context_do_pin(struct intel_context *ctx,
-                                  struct intel_engine_cs *ring)
+                                  struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+       struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
        struct page *lrc_state_page;
        uint32_t *lrc_reg_state;
        int ret;
 
-       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
 
        ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
                        PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
@@ -1109,15 +1111,15 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
                goto unpin_ctx_obj;
        }
 
-       ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+       ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
        if (ret)
                goto unpin_ctx_obj;
 
-       ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
-       intel_lr_context_descriptor_update(ctx, ring);
+       ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+       intel_lr_context_descriptor_update(ctx, engine);
        lrc_reg_state = kmap(lrc_state_page);
        lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
-       ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
+       ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
        ctx_obj->dirty = true;
 
        /* Invalidate GuC TLB. */
@@ -1235,7 +1237,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
  * This WA is also required for Gen9 so extracting as a function avoids
  * code duplication.
  */
-static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *const batch,
                                                uint32_t index)
 {
@@ -1247,13 +1249,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
         * this batch updates GEN8_L3SQCREG4 with default value we need to
         * set this bit here to retain the WA during flush.
         */
-       if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
                l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-       wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+       wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
        wa_ctx_emit(batch, index, 0);
 
        wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1271,7 +1273,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
        wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-       wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+       wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
        wa_ctx_emit(batch, index, 0);
 
        return index;
@@ -1324,7 +1326,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
  * Return: non-zero if we exceed the PAGE_SIZE limit.
  */
 
-static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
                                    struct i915_wa_ctx_bb *wa_ctx,
                                    uint32_t *const batch,
                                    uint32_t *offset)
@@ -1336,8 +1338,8 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
        wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
-       if (IS_BROADWELL(ring->dev)) {
-               int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+       if (IS_BROADWELL(engine->dev)) {
+               int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
                if (rc < 0)
                        return rc;
                index = rc;
@@ -1345,7 +1347,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
 
        /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
        /* Actual scratch location is at 128 bytes offset */
-       scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+       scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
 
        wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
        wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1387,7 +1389,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
  */
-static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
                               struct i915_wa_ctx_bb *wa_ctx,
                               uint32_t *const batch,
                               uint32_t *offset)
@@ -1402,13 +1404,13 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
        return wa_ctx_end(wa_ctx, *offset = index, 1);
 }
 
-static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                                    struct i915_wa_ctx_bb *wa_ctx,
                                    uint32_t *const batch,
                                    uint32_t *offset)
 {
        int ret;
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1417,7 +1419,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
                wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
-       ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+       ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
        if (ret < 0)
                return ret;
        index = ret;
@@ -1429,12 +1431,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
        return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
 }
 
-static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
                               struct i915_wa_ctx_bb *wa_ctx,
                               uint32_t *const batch,
                               uint32_t *offset)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
@@ -1457,60 +1459,61 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
        return wa_ctx_end(wa_ctx, *offset = index, 1);
 }
 
-static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 {
        int ret;
 
-       ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
-       if (!ring->wa_ctx.obj) {
+       engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
+                                                  PAGE_ALIGN(size));
+       if (!engine->wa_ctx.obj) {
                DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
                return -ENOMEM;
        }
 
-       ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+       ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
        if (ret) {
                DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
                                 ret);
-               drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+               drm_gem_object_unreference(&engine->wa_ctx.obj->base);
                return ret;
        }
 
        return 0;
 }
 
-static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
 {
-       if (ring->wa_ctx.obj) {
-               i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
-               drm_gem_object_unreference(&ring->wa_ctx.obj->base);
-               ring->wa_ctx.obj = NULL;
+       if (engine->wa_ctx.obj) {
+               i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
+               drm_gem_object_unreference(&engine->wa_ctx.obj->base);
+               engine->wa_ctx.obj = NULL;
        }
 }
 
-static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 {
        int ret;
        uint32_t *batch;
        uint32_t offset;
        struct page *page;
-       struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+       struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
 
-       WARN_ON(ring->id != RCS);
+       WARN_ON(engine->id != RCS);
 
        /* update this when WA for higher Gen are added */
-       if (INTEL_INFO(ring->dev)->gen > 9) {
+       if (INTEL_INFO(engine->dev)->gen > 9) {
                DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
-                         INTEL_INFO(ring->dev)->gen);
+                         INTEL_INFO(engine->dev)->gen);
                return 0;
        }
 
        /* some WA perform writes to scratch page, ensure it is valid */
-       if (ring->scratch.obj == NULL) {
-               DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+       if (engine->scratch.obj == NULL) {
+               DRM_ERROR("scratch page not allocated for %s\n", engine->name);
                return -EINVAL;
        }
 
-       ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+       ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
                return ret;
@@ -1520,29 +1523,29 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
        batch = kmap_atomic(page);
        offset = 0;
 
-       if (INTEL_INFO(ring->dev)->gen == 8) {
-               ret = gen8_init_indirectctx_bb(ring,
+       if (INTEL_INFO(engine->dev)->gen == 8) {
+               ret = gen8_init_indirectctx_bb(engine,
                                               &wa_ctx->indirect_ctx,
                                               batch,
                                               &offset);
                if (ret)
                        goto out;
 
-               ret = gen8_init_perctx_bb(ring,
+               ret = gen8_init_perctx_bb(engine,
                                          &wa_ctx->per_ctx,
                                          batch,
                                          &offset);
                if (ret)
                        goto out;
-       } else if (INTEL_INFO(ring->dev)->gen == 9) {
-               ret = gen9_init_indirectctx_bb(ring,
+       } else if (INTEL_INFO(engine->dev)->gen == 9) {
+               ret = gen9_init_indirectctx_bb(engine,
                                               &wa_ctx->indirect_ctx,
                                               batch,
                                               &offset);
                if (ret)
                        goto out;
 
-               ret = gen9_init_perctx_bb(ring,
+               ret = gen9_init_perctx_bb(engine,
                                          &wa_ctx->per_ctx,
                                          batch,
                                          &offset);
@@ -1553,27 +1556,28 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
 out:
        kunmap_atomic(batch);
        if (ret)
-               lrc_destroy_wa_ctx_obj(ring);
+               lrc_destroy_wa_ctx_obj(engine);
 
        return ret;
 }
 
-static int gen8_init_common_ring(struct intel_engine_cs *ring)
+static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned int next_context_status_buffer_hw;
 
-       lrc_setup_hardware_status_page(ring,
-                               dev_priv->kernel_context->engine[ring->id].state);
+       lrc_setup_hardware_status_page(engine,
+                                      dev_priv->kernel_context->engine[engine->id].state);
 
-       I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-       I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask | engine->irq_keep_mask));
+       I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
 
-       I915_WRITE(RING_MODE_GEN7(ring),
+       I915_WRITE(RING_MODE_GEN7(engine),
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
-       POSTING_READ(RING_MODE_GEN7(ring));
+       POSTING_READ(RING_MODE_GEN7(engine));
 
        /*
         * Instead of resetting the Context Status Buffer (CSB) read pointer to
@@ -1588,7 +1592,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
         * BXT  |         ?                |         ?            |
         */
        next_context_status_buffer_hw =
-               GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
+               GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
 
        /*
         * When the CSB registers are reset (also after power-up / gpu reset),
@@ -1598,21 +1602,21 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
        if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
                next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
 
-       ring->next_context_status_buffer = next_context_status_buffer_hw;
-       DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
+       engine->next_context_status_buffer = next_context_status_buffer_hw;
+       DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
-       memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+       memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
 
        return 0;
 }
 
-static int gen8_init_render_ring(struct intel_engine_cs *ring)
+static int gen8_init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = gen8_init_common_ring(ring);
+       ret = gen8_init_common_ring(engine);
        if (ret)
                return ret;
 
@@ -1626,18 +1630,18 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
 
        I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       return init_workarounds_ring(ring);
+       return init_workarounds_ring(engine);
 }
 
-static int gen9_init_render_ring(struct intel_engine_cs *ring)
+static int gen9_init_render_ring(struct intel_engine_cs *engine)
 {
        int ret;
 
-       ret = gen8_init_common_ring(ring);
+       ret = gen8_init_common_ring(engine);
        if (ret)
                return ret;
 
-       return init_workarounds_ring(ring);
+       return init_workarounds_ring(engine);
 }
 
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1712,9 +1716,9 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
        return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
@@ -1722,25 +1726,26 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-               POSTING_READ(RING_IMR(ring->mmio_base));
+       if (engine->irq_refcount++ == 0) {
+               I915_WRITE_IMR(engine,
+                              ~(engine->irq_enable_mask | engine->irq_keep_mask));
+               POSTING_READ(RING_IMR(engine->mmio_base));
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
 }
 
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
-               POSTING_READ(RING_IMR(ring->mmio_base));
+       if (--engine->irq_refcount == 0) {
+               I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+               POSTING_READ(RING_IMR(engine->mmio_base));
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
@@ -1848,17 +1853,18 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 gen8_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
 {
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 }
 
-static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 bxt_a_get_seqno(struct intel_engine_cs *engine,
+                          bool lazy_coherency)
 {
 
        /*
@@ -1873,17 +1879,17 @@ static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
         */
 
        if (!lazy_coherency)
-               intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+               intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 
        /* See bxt_a_get_seqno() explaining the reason for the clflush. */
-       intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+       intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
 /*
@@ -2002,109 +2008,109 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
  * @ring: Engine Command Streamer.
  *
  */
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_ring_initialized(engine))
                return;
 
-       dev_priv = ring->dev->dev_private;
+       dev_priv = engine->dev->dev_private;
 
-       if (ring->buffer) {
-               intel_logical_ring_stop(ring);
-               WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+       if (engine->buffer) {
+               intel_logical_ring_stop(engine);
+               WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
        }
 
-       if (ring->cleanup)
-               ring->cleanup(ring);
+       if (engine->cleanup)
+               engine->cleanup(engine);
 
-       i915_cmd_parser_fini_ring(ring);
-       i915_gem_batch_pool_fini(&ring->batch_pool);
+       i915_cmd_parser_fini_ring(engine);
+       i915_gem_batch_pool_fini(&engine->batch_pool);
 
-       if (ring->status_page.obj) {
-               kunmap(sg_page(ring->status_page.obj->pages->sgl));
-               ring->status_page.obj = NULL;
+       if (engine->status_page.obj) {
+               kunmap(sg_page(engine->status_page.obj->pages->sgl));
+               engine->status_page.obj = NULL;
        }
 
-       ring->idle_lite_restore_wa = 0;
-       ring->disable_lite_restore_wa = false;
-       ring->ctx_desc_template = 0;
+       engine->idle_lite_restore_wa = 0;
+       engine->disable_lite_restore_wa = false;
+       engine->ctx_desc_template = 0;
 
-       lrc_destroy_wa_ctx_obj(ring);
-       ring->dev = NULL;
+       lrc_destroy_wa_ctx_obj(engine);
+       engine->dev = NULL;
 }
 
 static void
 logical_ring_default_vfuncs(struct drm_device *dev,
-                           struct intel_engine_cs *ring)
+                           struct intel_engine_cs *engine)
 {
        /* Default vfuncs which can be overriden by each engine. */
-       ring->init_hw = gen8_init_common_ring;
-       ring->emit_request = gen8_emit_request;
-       ring->emit_flush = gen8_emit_flush;
-       ring->irq_get = gen8_logical_ring_get_irq;
-       ring->irq_put = gen8_logical_ring_put_irq;
-       ring->emit_bb_start = gen8_emit_bb_start;
+       engine->init_hw = gen8_init_common_ring;
+       engine->emit_request = gen8_emit_request;
+       engine->emit_flush = gen8_emit_flush;
+       engine->irq_get = gen8_logical_ring_get_irq;
+       engine->irq_put = gen8_logical_ring_put_irq;
+       engine->emit_bb_start = gen8_emit_bb_start;
        if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               ring->get_seqno = bxt_a_get_seqno;
-               ring->set_seqno = bxt_a_set_seqno;
+               engine->get_seqno = bxt_a_get_seqno;
+               engine->set_seqno = bxt_a_set_seqno;
        } else {
-               ring->get_seqno = gen8_get_seqno;
-               ring->set_seqno = gen8_set_seqno;
+               engine->get_seqno = gen8_get_seqno;
+               engine->set_seqno = gen8_set_seqno;
        }
 }
 
 static inline void
-logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
+logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
 {
-       ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
-       ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+       engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+       engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 }
 
 static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
 {
        struct intel_context *dctx = to_i915(dev)->kernel_context;
        int ret;
 
        /* Intentionally left blank. */
-       ring->buffer = NULL;
+       engine->buffer = NULL;
 
-       ring->dev = dev;
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-       i915_gem_batch_pool_init(dev, &ring->batch_pool);
-       init_waitqueue_head(&ring->irq_queue);
+       engine->dev = dev;
+       INIT_LIST_HEAD(&engine->active_list);
+       INIT_LIST_HEAD(&engine->request_list);
+       i915_gem_batch_pool_init(dev, &engine->batch_pool);
+       init_waitqueue_head(&engine->irq_queue);
 
-       INIT_LIST_HEAD(&ring->buffers);
-       INIT_LIST_HEAD(&ring->execlist_queue);
-       INIT_LIST_HEAD(&ring->execlist_retired_req_list);
-       spin_lock_init(&ring->execlist_lock);
+       INIT_LIST_HEAD(&engine->buffers);
+       INIT_LIST_HEAD(&engine->execlist_queue);
+       INIT_LIST_HEAD(&engine->execlist_retired_req_list);
+       spin_lock_init(&engine->execlist_lock);
 
-       logical_ring_init_platform_invariants(ring);
+       logical_ring_init_platform_invariants(engine);
 
-       ret = i915_cmd_parser_init_ring(ring);
+       ret = i915_cmd_parser_init_ring(engine);
        if (ret)
                goto error;
 
-       ret = intel_lr_context_deferred_alloc(dctx, ring);
+       ret = intel_lr_context_deferred_alloc(dctx, engine);
        if (ret)
                goto error;
 
        /* As this is the default context, always pin it */
-       ret = intel_lr_context_do_pin(dctx, ring);
+       ret = intel_lr_context_do_pin(dctx, engine);
        if (ret) {
                DRM_ERROR(
                        "Failed to pin and map ringbuffer %s: %d\n",
-                       ring->name, ret);
+                       engine->name, ret);
                goto error;
        }
 
        return 0;
 
 error:
-       intel_logical_ring_cleanup(ring);
+       intel_logical_ring_cleanup(engine);
        return ret;
 }
 
@@ -2329,13 +2335,13 @@ make_rpcs(struct drm_device *dev)
        return rpcs;
 }
 
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
+static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 {
        u32 indirect_ctx_offset;
 
-       switch (INTEL_INFO(ring->dev)->gen) {
+       switch (INTEL_INFO(engine->dev)->gen) {
        default:
-               MISSING_CASE(INTEL_INFO(ring->dev)->gen);
+               MISSING_CASE(INTEL_INFO(engine->dev)->gen);
                /* fall through */
        case 9:
                indirect_ctx_offset =
@@ -2352,9 +2358,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
 
 static int
 populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
-                   struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
+                   struct intel_engine_cs *engine,
+                   struct intel_ringbuffer *ringbuf)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
        struct page *page;
@@ -2389,33 +2396,47 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
         * recreate this batchbuffer with new values (including all the missing
         * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
        reg_state[CTX_LRI_HEADER_0] =
-               MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
-       ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
+               MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
+       ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
+                      RING_CONTEXT_CONTROL(engine),
                       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
                                          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
                                          (HAS_RESOURCE_STREAMER(dev) ?
                                            CTX_CTRL_RS_CTX_ENABLE : 0)));
-       ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
+                      0);
        /* Ring buffer start address is not known until the buffer is pinned.
         * It is written to the context image in execlists_update_context()
         */
-       ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
+       ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
+                      RING_START(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
+                      RING_CTL(engine->mmio_base),
                       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
-       ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
+       ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
+                      RING_BBADDR_UDW(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
+                      RING_BBADDR(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
+                      RING_BBSTATE(engine->mmio_base),
                       RING_BB_PPGTT);
-       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
-       if (ring->id == RCS) {
-               ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
-               ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
-               ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
-               if (ring->wa_ctx.obj) {
-                       struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
+                      RING_SBBADDR_UDW(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
+                      RING_SBBADDR(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
+                      RING_SBBSTATE(engine->mmio_base), 0);
+       if (engine->id == RCS) {
+               ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
+                              RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
+               ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
+                              RING_INDIRECT_CTX(engine->mmio_base), 0);
+               ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
+                              RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
+               if (engine->wa_ctx.obj) {
+                       struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
                        uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
 
                        reg_state[CTX_RCS_INDIRECT_CTX+1] =
@@ -2423,7 +2444,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                                (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
 
                        reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
-                               intel_lr_indirect_ctx_offset(ring) << 6;
+                               intel_lr_indirect_ctx_offset(engine) << 6;
 
                        reg_state[CTX_BB_PER_CTX_PTR+1] =
                                (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2431,16 +2452,25 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                }
        }
        reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
-       ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
+                      RING_CTX_TIMESTAMP(engine->mmio_base), 0);
        /* PDP values well be assigned later if needed */
-       ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
+                      0);
 
        if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
                /* 64b PPGTT (48bit canonical)
@@ -2457,7 +2487,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                execlists_update_context_pdps(ppgtt, reg_state);
        }
 
-       if (ring->id == RCS) {
+       if (engine->id == RCS) {
                reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
                ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
                               make_rpcs(dev));
@@ -2513,15 +2543,15 @@ void intel_lr_context_free(struct intel_context *ctx)
  * in LRC mode, but does not include the "shared data page" used with
  * GuC submission. The caller should account for this if using the GuC.
  */
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 {
        int ret = 0;
 
-       WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
+       WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
 
-       switch (ring->id) {
+       switch (engine->id) {
        case RCS:
-               if (INTEL_INFO(ring->dev)->gen >= 9)
+               if (INTEL_INFO(engine->dev)->gen >= 9)
                        ret = GEN9_LR_CONTEXT_RENDER_SIZE;
                else
                        ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2537,22 +2567,22 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
        return ret;
 }
 
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-               struct drm_i915_gem_object *default_ctx_obj)
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
+                                          struct drm_i915_gem_object *default_ctx_obj)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        struct page *page;
 
        /* The HWSP is part of the default context object in LRC mode. */
-       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
+       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
                        + LRC_PPHWSP_PN * PAGE_SIZE;
        page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
-       ring->status_page.page_addr = kmap(page);
-       ring->status_page.obj = default_ctx_obj;
+       engine->status_page.page_addr = kmap(page);
+       engine->status_page.obj = default_ctx_obj;
 
-       I915_WRITE(RING_HWS_PGA(ring->mmio_base),
-                       (u32)ring->status_page.gfx_addr);
-       POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+       I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+                       (u32)engine->status_page.gfx_addr);
+       POSTING_READ(RING_HWS_PGA(engine->mmio_base));
 }
 
 /**
@@ -2570,18 +2600,18 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
  */
 
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-                                   struct intel_engine_cs *ring)
+                                   struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_gem_object *ctx_obj;
        uint32_t context_size;
        struct intel_ringbuffer *ringbuf;
        int ret;
 
        WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
-       WARN_ON(ctx->engine[ring->id].state);
+       WARN_ON(ctx->engine[engine->id].state);
 
-       context_size = round_up(intel_lr_context_size(ring), 4096);
+       context_size = round_up(intel_lr_context_size(engine), 4096);
 
        /* One extra page as the sharing data between driver and GuC */
        context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2592,32 +2622,32 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                return -ENOMEM;
        }
 
-       ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
+       ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
        if (IS_ERR(ringbuf)) {
                ret = PTR_ERR(ringbuf);
                goto error_deref_obj;
        }
 
-       ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
+       ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
                goto error_ringbuf;
        }
 
-       ctx->engine[ring->id].ringbuf = ringbuf;
-       ctx->engine[ring->id].state = ctx_obj;
+       ctx->engine[engine->id].ringbuf = ringbuf;
+       ctx->engine[engine->id].state = ctx_obj;
 
-       if (ctx != ctx->i915->kernel_context && ring->init_context) {
+       if (ctx != ctx->i915->kernel_context && engine->init_context) {
                struct drm_i915_gem_request *req;
 
-               req = i915_gem_request_alloc(ring, ctx);
+               req = i915_gem_request_alloc(engine, ctx);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
                        DRM_ERROR("ring create req: %d\n", ret);
                        goto error_ringbuf;
                }
 
-               ret = ring->init_context(req);
+               ret = engine->init_context(req);
                if (ret) {
                        DRM_ERROR("ring init context: %d\n",
                                ret);
@@ -2632,8 +2662,8 @@ error_ringbuf:
        intel_ringbuffer_free(ringbuf);
 error_deref_obj:
        drm_gem_object_unreference(&ctx_obj->base);
-       ctx->engine[ring->id].ringbuf = NULL;
-       ctx->engine[ring->id].state = NULL;
+       ctx->engine[engine->id].ringbuf = NULL;
+       ctx->engine[engine->id].state = NULL;
        return ret;
 }
 
index e6cda3e225d02b67d2df629b0f06b2462093353f..a17cb12221bad0af70ff1474701d8e99e0001b46 100644 (file)
@@ -57,8 +57,8 @@
 /* Logical Rings */
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
-void intel_logical_ring_stop(struct intel_engine_cs *ring);
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
+void intel_logical_ring_stop(struct intel_engine_cs *engine);
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
 int intel_logical_rings_init(struct drm_device *dev);
 int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
@@ -98,18 +98,18 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
 #define LRC_STATE_PN   (LRC_PPHWSP_PN + 1)
 
 void intel_lr_context_free(struct intel_context *ctx);
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-                                   struct intel_engine_cs *ring);
+                                   struct intel_engine_cs *engine);
 void intel_lr_context_unpin(struct intel_context *ctx,
                            struct intel_engine_cs *engine);
 void intel_lr_context_reset(struct drm_device *dev,
                        struct intel_context *ctx);
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
-                                    struct intel_engine_cs *ring);
+                                    struct intel_engine_cs *engine);
 
 u32 intel_execlists_ctx_id(struct intel_context *ctx,
-                          struct intel_engine_cs *ring);
+                          struct intel_engine_cs *engine);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -118,7 +118,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
                               struct list_head *vmas);
 
-void intel_lrc_irq_handler(struct intel_engine_cs *ring);
-void intel_execlists_retire_requests(struct intel_engine_cs *ring);
+void intel_lrc_irq_handler(struct intel_engine_cs *engine);
+void intel_execlists_retire_requests(struct intel_engine_cs *engine);
 
 #endif /* _INTEL_LRC_H_ */
index 688773aaa5e502e3b9fff6fb809020acb351b7de..53237616ce1987e3ec9d227feb9f98b400bded22 100644 (file)
@@ -59,19 +59,19 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf)
        return ringbuf->space;
 }
 
-bool intel_ring_stopped(struct intel_engine_cs *ring)
+bool intel_ring_stopped(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       return dev_priv->gpu_error.stop_rings & intel_ring_flag(engine);
 }
 
-static void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *engine)
 {
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf = engine->buffer;
        ringbuf->tail &= ringbuf->size - 1;
-       if (intel_ring_stopped(ring))
+       if (intel_ring_stopped(engine))
                return;
-       ring->write_tail(ring, ringbuf->tail);
+       engine->write_tail(engine, ringbuf->tail);
 }
 
 static int
@@ -429,51 +429,51 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
        return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-static void ring_write_tail(struct intel_engine_cs *ring,
+static void ring_write_tail(struct intel_engine_cs *engine,
                            u32 value)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       I915_WRITE_TAIL(ring, value);
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       I915_WRITE_TAIL(engine, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        u64 acthd;
 
-       if (INTEL_INFO(ring->dev)->gen >= 8)
-               acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
-                                        RING_ACTHD_UDW(ring->mmio_base));
-       else if (INTEL_INFO(ring->dev)->gen >= 4)
-               acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+       if (INTEL_INFO(engine->dev)->gen >= 8)
+               acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+                                        RING_ACTHD_UDW(engine->mmio_base));
+       else if (INTEL_INFO(engine->dev)->gen >= 4)
+               acthd = I915_READ(RING_ACTHD(engine->mmio_base));
        else
                acthd = I915_READ(ACTHD);
 
        return acthd;
 }
 
-static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        u32 addr;
 
        addr = dev_priv->status_page_dmah->busaddr;
-       if (INTEL_INFO(ring->dev)->gen >= 4)
+       if (INTEL_INFO(engine->dev)->gen >= 4)
                addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
        I915_WRITE(HWS_PGA, addr);
 }
 
-static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_device *dev = engine->dev;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        i915_reg_t mmio;
 
        /* The ring status page addresses are no longer next to the rest of
         * the ring registers as of gen7.
         */
        if (IS_GEN7(dev)) {
-               switch (ring->id) {
+               switch (engine->id) {
                case RCS:
                        mmio = RENDER_HWS_PGA_GEN7;
                        break;
@@ -492,14 +492,14 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
                        mmio = VEBOX_HWS_PGA_GEN7;
                        break;
                }
-       } else if (IS_GEN6(ring->dev)) {
-               mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+       } else if (IS_GEN6(engine->dev)) {
+               mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
        } else {
                /* XXX: gen8 returns to sanity */
-               mmio = RING_HWS_PGA(ring->mmio_base);
+               mmio = RING_HWS_PGA(engine->mmio_base);
        }
 
-       I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+       I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
        POSTING_READ(mmio);
 
        /*
@@ -510,10 +510,10 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
         * invalidating the TLB?
         */
        if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
-               i915_reg_t reg = RING_INSTPM(ring->mmio_base);
+               i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 
                /* ring should be idle before issuing a sync flush*/
-               WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+               WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
                I915_WRITE(reg,
                           _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -521,117 +521,120 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
                if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
                             1000))
                        DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
-                                 ring->name);
+                                 engine->name);
        }
 }
 
-static bool stop_ring(struct intel_engine_cs *ring)
+static bool stop_ring(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct drm_i915_private *dev_priv = to_i915(engine->dev);
 
-       if (!IS_GEN2(ring->dev)) {
-               I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-               if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-                       DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
+       if (!IS_GEN2(engine->dev)) {
+               I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+               if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+                       DRM_ERROR("%s : timed out trying to stop ring\n",
+                                 engine->name);
                        /* Sometimes we observe that the idle flag is not
                         * set even though the ring is empty. So double
                         * check before giving up.
                         */
-                       if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
+                       if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
                                return false;
                }
        }
 
-       I915_WRITE_CTL(ring, 0);
-       I915_WRITE_HEAD(ring, 0);
-       ring->write_tail(ring, 0);
+       I915_WRITE_CTL(engine, 0);
+       I915_WRITE_HEAD(engine, 0);
+       engine->write_tail(engine, 0);
 
-       if (!IS_GEN2(ring->dev)) {
-               (void)I915_READ_CTL(ring);
-               I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+       if (!IS_GEN2(engine->dev)) {
+               (void)I915_READ_CTL(engine);
+               I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
        }
 
-       return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+       return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
 }
 
-static int init_ring_common(struct intel_engine_cs *ring)
+static int init_ring_common(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf = engine->buffer;
        struct drm_i915_gem_object *obj = ringbuf->obj;
        int ret = 0;
 
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       if (!stop_ring(ring)) {
+       if (!stop_ring(engine)) {
                /* G45 ring initialization often fails to reset head to zero */
                DRM_DEBUG_KMS("%s head not reset to zero "
                              "ctl %08x head %08x tail %08x start %08x\n",
-                             ring->name,
-                             I915_READ_CTL(ring),
-                             I915_READ_HEAD(ring),
-                             I915_READ_TAIL(ring),
-                             I915_READ_START(ring));
+                             engine->name,
+                             I915_READ_CTL(engine),
+                             I915_READ_HEAD(engine),
+                             I915_READ_TAIL(engine),
+                             I915_READ_START(engine));
 
-               if (!stop_ring(ring)) {
+               if (!stop_ring(engine)) {
                        DRM_ERROR("failed to set %s head to zero "
                                  "ctl %08x head %08x tail %08x start %08x\n",
-                                 ring->name,
-                                 I915_READ_CTL(ring),
-                                 I915_READ_HEAD(ring),
-                                 I915_READ_TAIL(ring),
-                                 I915_READ_START(ring));
+                                 engine->name,
+                                 I915_READ_CTL(engine),
+                                 I915_READ_HEAD(engine),
+                                 I915_READ_TAIL(engine),
+                                 I915_READ_START(engine));
                        ret = -EIO;
                        goto out;
                }
        }
 
        if (I915_NEED_GFX_HWS(dev))
-               intel_ring_setup_status_page(ring);
+               intel_ring_setup_status_page(engine);
        else
-               ring_setup_phys_status_page(ring);
+               ring_setup_phys_status_page(engine);
 
        /* Enforce ordering by reading HEAD register back */
-       I915_READ_HEAD(ring);
+       I915_READ_HEAD(engine);
 
        /* Initialize the ring. This must happen _after_ we've cleared the ring
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
-       I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+       I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
 
        /* WaClearRingBufHeadRegAtInit:ctg,elk */
-       if (I915_READ_HEAD(ring))
+       if (I915_READ_HEAD(engine))
                DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
-                         ring->name, I915_READ_HEAD(ring));
-       I915_WRITE_HEAD(ring, 0);
-       (void)I915_READ_HEAD(ring);
+                         engine->name, I915_READ_HEAD(engine));
+       I915_WRITE_HEAD(engine, 0);
+       (void)I915_READ_HEAD(engine);
 
-       I915_WRITE_CTL(ring,
+       I915_WRITE_CTL(engine,
                        ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
-       if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
-                    I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
-                    (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+       if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
+                    I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
+                    (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                          "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
-                         ring->name,
-                         I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
-                         I915_READ_HEAD(ring), I915_READ_TAIL(ring),
-                         I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
+                         engine->name,
+                         I915_READ_CTL(engine),
+                         I915_READ_CTL(engine) & RING_VALID,
+                         I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+                         I915_READ_START(engine),
+                         (unsigned long)i915_gem_obj_ggtt_offset(obj));
                ret = -EIO;
                goto out;
        }
 
        ringbuf->last_retired_head = -1;
-       ringbuf->head = I915_READ_HEAD(ring);
-       ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+       ringbuf->head = I915_READ_HEAD(engine);
+       ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
        intel_ring_update_space(ringbuf);
 
-       memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+       memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
 
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -640,59 +643,60 @@ out:
 }
 
 void
-intel_fini_pipe_control(struct intel_engine_cs *ring)
+intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
 
-       if (ring->scratch.obj == NULL)
+       if (engine->scratch.obj == NULL)
                return;
 
        if (INTEL_INFO(dev)->gen >= 5) {
-               kunmap(sg_page(ring->scratch.obj->pages->sgl));
-               i915_gem_object_ggtt_unpin(ring->scratch.obj);
+               kunmap(sg_page(engine->scratch.obj->pages->sgl));
+               i915_gem_object_ggtt_unpin(engine->scratch.obj);
        }
 
-       drm_gem_object_unreference(&ring->scratch.obj->base);
-       ring->scratch.obj = NULL;
+       drm_gem_object_unreference(&engine->scratch.obj->base);
+       engine->scratch.obj = NULL;
 }
 
 int
-intel_init_pipe_control(struct intel_engine_cs *ring)
+intel_init_pipe_control(struct intel_engine_cs *engine)
 {
        int ret;
 
-       WARN_ON(ring->scratch.obj);
+       WARN_ON(engine->scratch.obj);
 
-       ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
-       if (ring->scratch.obj == NULL) {
+       engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
+       if (engine->scratch.obj == NULL) {
                DRM_ERROR("Failed to allocate seqno page\n");
                ret = -ENOMEM;
                goto err;
        }
 
-       ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+       ret = i915_gem_object_set_cache_level(engine->scratch.obj,
+                                             I915_CACHE_LLC);
        if (ret)
                goto err_unref;
 
-       ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
+       ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
        if (ret)
                goto err_unref;
 
-       ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
-       ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
-       if (ring->scratch.cpu_page == NULL) {
+       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
+       engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
+       if (engine->scratch.cpu_page == NULL) {
                ret = -ENOMEM;
                goto err_unpin;
        }
 
        DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-                        ring->name, ring->scratch.gtt_offset);
+                        engine->name, engine->scratch.gtt_offset);
        return 0;
 
 err_unpin:
-       i915_gem_object_ggtt_unpin(ring->scratch.obj);
+       i915_gem_object_ggtt_unpin(engine->scratch.obj);
 err_unref:
-       drm_gem_object_unreference(&ring->scratch.obj->base);
+       drm_gem_object_unreference(&engine->scratch.obj->base);
 err:
        return ret;
 }
@@ -789,25 +793,26 @@ static int wa_add(struct drm_i915_private *dev_priv,
 
 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
-static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+                                i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        struct i915_workarounds *wa = &dev_priv->workarounds;
-       const uint32_t index = wa->hw_whitelist_count[ring->id];
+       const uint32_t index = wa->hw_whitelist_count[engine->id];
 
        if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
                return -EINVAL;
 
-       WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
+       WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
                 i915_mmio_reg_offset(reg));
-       wa->hw_whitelist_count[ring->id]++;
+       wa->hw_whitelist_count[engine->id]++;
 
        return 0;
 }
 
-static int gen8_init_workarounds(struct intel_engine_cs *ring)
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
@@ -857,13 +862,13 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int bdw_init_workarounds(struct intel_engine_cs *ring)
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
 {
        int ret;
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ret = gen8_init_workarounds(ring);
+       ret = gen8_init_workarounds(engine);
        if (ret)
                return ret;
 
@@ -886,13 +891,13 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int chv_init_workarounds(struct intel_engine_cs *ring)
+static int chv_init_workarounds(struct intel_engine_cs *engine)
 {
        int ret;
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ret = gen8_init_workarounds(ring);
+       ret = gen8_init_workarounds(engine);
        if (ret)
                return ret;
 
@@ -905,9 +910,9 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int gen9_init_workarounds(struct intel_engine_cs *ring)
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
        int ret;
@@ -986,21 +991,21 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
                                    GEN8_LQSC_FLUSH_COHERENT_LINES));
 
        /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
-       ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
+       ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
        if (ret)
                return ret;
 
        /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
-       ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
+       ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
        if (ret)
                return ret;
 
        return 0;
 }
 
-static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u8 vals[3] = { 0, 0, 0 };
        unsigned int i;
@@ -1040,13 +1045,13 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int skl_init_workarounds(struct intel_engine_cs *ring)
+static int skl_init_workarounds(struct intel_engine_cs *engine)
 {
        int ret;
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ret = gen9_init_workarounds(ring);
+       ret = gen9_init_workarounds(engine);
        if (ret)
                return ret;
 
@@ -1113,20 +1118,20 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
        /* WaDisableLSQCROPERFforOCL:skl */
-       ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
        if (ret)
                return ret;
 
-       return skl_tune_iz_hashing(ring);
+       return skl_tune_iz_hashing(engine);
 }
 
-static int bxt_init_workarounds(struct intel_engine_cs *ring)
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
 {
        int ret;
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ret = gen9_init_workarounds(ring);
+       ret = gen9_init_workarounds(engine);
        if (ret)
                return ret;
 
@@ -1157,11 +1162,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
        /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
        /* WaDisableLSQCROPERFforOCL:bxt */
        if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
+               ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
                if (ret)
                        return ret;
 
-               ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+               ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
                if (ret)
                        return ret;
        }
@@ -1169,36 +1174,36 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
        return 0;
 }
 
-int init_workarounds_ring(struct intel_engine_cs *ring)
+int init_workarounds_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       WARN_ON(ring->id != RCS);
+       WARN_ON(engine->id != RCS);
 
        dev_priv->workarounds.count = 0;
        dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
        if (IS_BROADWELL(dev))
-               return bdw_init_workarounds(ring);
+               return bdw_init_workarounds(engine);
 
        if (IS_CHERRYVIEW(dev))
-               return chv_init_workarounds(ring);
+               return chv_init_workarounds(engine);
 
        if (IS_SKYLAKE(dev))
-               return skl_init_workarounds(ring);
+               return skl_init_workarounds(engine);
 
        if (IS_BROXTON(dev))
-               return bxt_init_workarounds(ring);
+               return bxt_init_workarounds(engine);
 
        return 0;
 }
 
-static int init_render_ring(struct intel_engine_cs *ring)
+static int init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = init_ring_common(ring);
+       int ret = init_ring_common(engine);
        if (ret)
                return ret;
 
@@ -1241,14 +1246,14 @@ static int init_render_ring(struct intel_engine_cs *ring)
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
        if (HAS_L3_DPF(dev))
-               I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+               I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
 
-       return init_workarounds_ring(ring);
+       return init_workarounds_ring(engine);
 }
 
-static void render_ring_cleanup(struct intel_engine_cs *ring)
+static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (dev_priv->semaphore_obj) {
@@ -1257,7 +1262,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
                dev_priv->semaphore_obj = NULL;
        }
 
-       intel_fini_pipe_control(ring);
+       intel_fini_pipe_control(engine);
 }
 
 static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
@@ -1554,47 +1559,47 @@ pc_render_add_request(struct drm_i915_gem_request *req)
 }
 
 static u32
-gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+gen6_ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
 {
        /* Workaround to force correct ordering between irq and seqno writes on
         * ivb (and maybe also on snb) by reading from a CS register (like
         * ACTHD) before reading the status page. */
        if (!lazy_coherency) {
-               struct drm_i915_private *dev_priv = ring->dev->dev_private;
-               POSTING_READ(RING_ACTHD(ring->mmio_base));
+               struct drm_i915_private *dev_priv = engine->dev->dev_private;
+               POSTING_READ(RING_ACTHD(engine->mmio_base));
        }
 
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
 static u32
-ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
 {
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
 static void
-ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 }
 
 static u32
-pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
 {
-       return ring->scratch.cpu_page[0];
+       return engine->scratch.cpu_page[0];
 }
 
 static void
-pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       ring->scratch.cpu_page[0] = seqno;
+       engine->scratch.cpu_page[0] = seqno;
 }
 
 static bool
-gen5_ring_get_irq(struct intel_engine_cs *ring)
+gen5_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
@@ -1602,30 +1607,30 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0)
-               gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+       if (engine->irq_refcount++ == 0)
+               gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
 }
 
 static void
-gen5_ring_put_irq(struct intel_engine_cs *ring)
+gen5_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0)
-               gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+       if (--engine->irq_refcount == 0)
+               gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static bool
-i9xx_ring_get_irq(struct intel_engine_cs *ring)
+i9xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
@@ -1633,8 +1638,8 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~ring->irq_enable_mask;
+       if (engine->irq_refcount++ == 0) {
+               dev_priv->irq_mask &= ~engine->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
        }
@@ -1644,15 +1649,15 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
 }
 
 static void
-i9xx_ring_put_irq(struct intel_engine_cs *ring)
+i9xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               dev_priv->irq_mask |= ring->irq_enable_mask;
+       if (--engine->irq_refcount == 0) {
+               dev_priv->irq_mask |= engine->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
        }
@@ -1660,9 +1665,9 @@ i9xx_ring_put_irq(struct intel_engine_cs *ring)
 }
 
 static bool
-i8xx_ring_get_irq(struct intel_engine_cs *ring)
+i8xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
@@ -1670,8 +1675,8 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~ring->irq_enable_mask;
+       if (engine->irq_refcount++ == 0) {
+               dev_priv->irq_mask &= ~engine->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
        }
@@ -1681,15 +1686,15 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
 }
 
 static void
-i8xx_ring_put_irq(struct intel_engine_cs *ring)
+i8xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               dev_priv->irq_mask |= ring->irq_enable_mask;
+       if (--engine->irq_refcount == 0) {
+               dev_priv->irq_mask |= engine->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
        }
@@ -1735,9 +1740,9 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 }
 
 static bool
-gen6_ring_get_irq(struct intel_engine_cs *ring)
+gen6_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
@@ -1745,14 +1750,14 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && ring->id == RCS)
-                       I915_WRITE_IMR(ring,
-                                      ~(ring->irq_enable_mask |
+       if (engine->irq_refcount++ == 0) {
+               if (HAS_L3_DPF(dev) && engine->id == RCS)
+                       I915_WRITE_IMR(engine,
+                                      ~(engine->irq_enable_mask |
                                         GT_PARITY_ERROR(dev)));
                else
-                       I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+                       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+               gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1760,27 +1765,27 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
 }
 
 static void
-gen6_ring_put_irq(struct intel_engine_cs *ring)
+gen6_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && ring->id == RCS)
-                       I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+       if (--engine->irq_refcount == 0) {
+               if (HAS_L3_DPF(dev) && engine->id == RCS)
+                       I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
                else
-                       I915_WRITE_IMR(ring, ~0);
-               gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+                       I915_WRITE_IMR(engine, ~0);
+               gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static bool
-hsw_vebox_get_irq(struct intel_engine_cs *ring)
+hsw_vebox_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
@@ -1788,9 +1793,9 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+       if (engine->irq_refcount++ == 0) {
+               I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+               gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1798,24 +1803,24 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
 }
 
 static void
-hsw_vebox_put_irq(struct intel_engine_cs *ring)
+hsw_vebox_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               I915_WRITE_IMR(ring, ~0);
-               gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+       if (--engine->irq_refcount == 0) {
+               I915_WRITE_IMR(engine, ~0);
+               gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static bool
-gen8_ring_get_irq(struct intel_engine_cs *ring)
+gen8_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
@@ -1823,15 +1828,15 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && ring->id == RCS) {
-                       I915_WRITE_IMR(ring,
-                                      ~(ring->irq_enable_mask |
+       if (engine->irq_refcount++ == 0) {
+               if (HAS_L3_DPF(dev) && engine->id == RCS) {
+                       I915_WRITE_IMR(engine,
+                                      ~(engine->irq_enable_mask |
                                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
                } else {
-                       I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+                       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
                }
-               POSTING_READ(RING_IMR(ring->mmio_base));
+               POSTING_READ(RING_IMR(engine->mmio_base));
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1839,21 +1844,21 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
 }
 
 static void
-gen8_ring_put_irq(struct intel_engine_cs *ring)
+gen8_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && ring->id == RCS) {
-                       I915_WRITE_IMR(ring,
+       if (--engine->irq_refcount == 0) {
+               if (HAS_L3_DPF(dev) && engine->id == RCS) {
+                       I915_WRITE_IMR(engine,
                                       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
                } else {
-                       I915_WRITE_IMR(ring, ~0);
+                       I915_WRITE_IMR(engine, ~0);
                }
-               POSTING_READ(RING_IMR(ring->mmio_base));
+               POSTING_READ(RING_IMR(engine->mmio_base));
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
@@ -1967,40 +1972,40 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
        return 0;
 }
 
-static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct drm_i915_private *dev_priv = to_i915(engine->dev);
 
        if (!dev_priv->status_page_dmah)
                return;
 
-       drm_pci_free(ring->dev, dev_priv->status_page_dmah);
-       ring->status_page.page_addr = NULL;
+       drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+       engine->status_page.page_addr = NULL;
 }
 
-static void cleanup_status_page(struct intel_engine_cs *ring)
+static void cleanup_status_page(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_object *obj;
 
-       obj = ring->status_page.obj;
+       obj = engine->status_page.obj;
        if (obj == NULL)
                return;
 
        kunmap(sg_page(obj->pages->sgl));
        i915_gem_object_ggtt_unpin(obj);
        drm_gem_object_unreference(&obj->base);
-       ring->status_page.obj = NULL;
+       engine->status_page.obj = NULL;
 }
 
-static int init_status_page(struct intel_engine_cs *ring)
+static int init_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_object *obj = ring->status_page.obj;
+       struct drm_i915_gem_object *obj = engine->status_page.obj;
 
        if (obj == NULL) {
                unsigned flags;
                int ret;
 
-               obj = i915_gem_alloc_object(ring->dev, 4096);
+               obj = i915_gem_alloc_object(engine->dev, 4096);
                if (obj == NULL) {
                        DRM_ERROR("Failed to allocate status page\n");
                        return -ENOMEM;
@@ -2011,7 +2016,7 @@ static int init_status_page(struct intel_engine_cs *ring)
                        goto err_unref;
 
                flags = 0;
-               if (!HAS_LLC(ring->dev))
+               if (!HAS_LLC(engine->dev))
                        /* On g33, we cannot place HWS above 256MiB, so
                         * restrict its pinning to the low mappable arena.
                         * Though this restriction is not documented for
@@ -2030,32 +2035,32 @@ err_unref:
                        return ret;
                }
 
-               ring->status_page.obj = obj;
+               engine->status_page.obj = obj;
        }
 
-       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
-       ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
-       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+       engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
+       memset(engine->status_page.page_addr, 0, PAGE_SIZE);
 
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
-                       ring->name, ring->status_page.gfx_addr);
+                       engine->name, engine->status_page.gfx_addr);
 
        return 0;
 }
 
-static int init_phys_status_page(struct intel_engine_cs *ring)
+static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
        if (!dev_priv->status_page_dmah) {
                dev_priv->status_page_dmah =
-                       drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+                       drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
                if (!dev_priv->status_page_dmah)
                        return -ENOMEM;
        }
 
-       ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+       memset(engine->status_page.page_addr, 0, PAGE_SIZE);
 
        return 0;
 }
@@ -2218,37 +2223,38 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
 }
 
 static int intel_init_ring_buffer(struct drm_device *dev,
-                                 struct intel_engine_cs *ring)
+                                 struct intel_engine_cs *engine)
 {
        struct intel_ringbuffer *ringbuf;
        int ret;
 
-       WARN_ON(ring->buffer);
+       WARN_ON(engine->buffer);
 
-       ring->dev = dev;
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-       INIT_LIST_HEAD(&ring->execlist_queue);
-       INIT_LIST_HEAD(&ring->buffers);
-       i915_gem_batch_pool_init(dev, &ring->batch_pool);
-       memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+       engine->dev = dev;
+       INIT_LIST_HEAD(&engine->active_list);
+       INIT_LIST_HEAD(&engine->request_list);
+       INIT_LIST_HEAD(&engine->execlist_queue);
+       INIT_LIST_HEAD(&engine->buffers);
+       i915_gem_batch_pool_init(dev, &engine->batch_pool);
+       memset(engine->semaphore.sync_seqno, 0,
+              sizeof(engine->semaphore.sync_seqno));
 
-       init_waitqueue_head(&ring->irq_queue);
+       init_waitqueue_head(&engine->irq_queue);
 
-       ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
+       ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
        if (IS_ERR(ringbuf)) {
                ret = PTR_ERR(ringbuf);
                goto error;
        }
-       ring->buffer = ringbuf;
+       engine->buffer = ringbuf;
 
        if (I915_NEED_GFX_HWS(dev)) {
-               ret = init_status_page(ring);
+               ret = init_status_page(engine);
                if (ret)
                        goto error;
        } else {
-               WARN_ON(ring->id != RCS);
-               ret = init_phys_status_page(ring);
+               WARN_ON(engine->id != RCS);
+               ret = init_phys_status_page(engine);
                if (ret)
                        goto error;
        }
@@ -2256,58 +2262,58 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
        if (ret) {
                DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
-                               ring->name, ret);
+                               engine->name, ret);
                intel_destroy_ringbuffer_obj(ringbuf);
                goto error;
        }
 
-       ret = i915_cmd_parser_init_ring(ring);
+       ret = i915_cmd_parser_init_ring(engine);
        if (ret)
                goto error;
 
        return 0;
 
 error:
-       intel_cleanup_ring_buffer(ring);
+       intel_cleanup_ring_buffer(engine);
        return ret;
 }
 
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+void intel_cleanup_ring_buffer(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_ring_initialized(engine))
                return;
 
-       dev_priv = to_i915(ring->dev);
+       dev_priv = to_i915(engine->dev);
 
-       if (ring->buffer) {
-               intel_stop_ring_buffer(ring);
-               WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+       if (engine->buffer) {
+               intel_stop_ring_buffer(engine);
+               WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-               intel_unpin_ringbuffer_obj(ring->buffer);
-               intel_ringbuffer_free(ring->buffer);
-               ring->buffer = NULL;
+               intel_unpin_ringbuffer_obj(engine->buffer);
+               intel_ringbuffer_free(engine->buffer);
+               engine->buffer = NULL;
        }
 
-       if (ring->cleanup)
-               ring->cleanup(ring);
+       if (engine->cleanup)
+               engine->cleanup(engine);
 
-       if (I915_NEED_GFX_HWS(ring->dev)) {
-               cleanup_status_page(ring);
+       if (I915_NEED_GFX_HWS(engine->dev)) {
+               cleanup_status_page(engine);
        } else {
-               WARN_ON(ring->id != RCS);
-               cleanup_phys_status_page(ring);
+               WARN_ON(engine->id != RCS);
+               cleanup_phys_status_page(engine);
        }
 
-       i915_cmd_parser_fini_ring(ring);
-       i915_gem_batch_pool_fini(&ring->batch_pool);
-       ring->dev = NULL;
+       i915_cmd_parser_fini_ring(engine);
+       i915_gem_batch_pool_fini(&engine->batch_pool);
+       engine->dev = NULL;
 }
 
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
+static int ring_wait_for_space(struct intel_engine_cs *engine, int n)
 {
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf = engine->buffer;
        struct drm_i915_gem_request *request;
        unsigned space;
        int ret;
@@ -2318,14 +2324,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
        /* The whole point of reserving space is to not wait! */
        WARN_ON(ringbuf->reserved_in_use);
 
-       list_for_each_entry(request, &ring->request_list, list) {
+       list_for_each_entry(request, &engine->request_list, list) {
                space = __intel_ring_space(request->postfix, ringbuf->tail,
                                           ringbuf->size);
                if (space >= n)
                        break;
        }
 
-       if (WARN_ON(&request->list == &ring->request_list))
+       if (WARN_ON(&request->list == &engine->request_list))
                return -ENOSPC;
 
        ret = i915_wait_request(request);
@@ -2350,22 +2356,22 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
        intel_ring_update_space(ringbuf);
 }
 
-int intel_ring_idle(struct intel_engine_cs *ring)
+int intel_ring_idle(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req;
 
        /* Wait upon the last request to be completed */
-       if (list_empty(&ring->request_list))
+       if (list_empty(&engine->request_list))
                return 0;
 
-       req = list_entry(ring->request_list.prev,
-                       struct drm_i915_gem_request,
-                       list);
+       req = list_entry(engine->request_list.prev,
+                        struct drm_i915_gem_request,
+                        list);
 
        /* Make sure we do not trigger any retires */
        return __i915_wait_request(req,
-                                  atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
-                                  to_i915(ring->dev)->mm.interruptible,
+                                  atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter),
+                                  to_i915(engine->dev)->mm.interruptible,
                                   NULL, NULL);
 }
 
@@ -2437,9 +2443,9 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
        ringbuf->reserved_in_use = false;
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
 {
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf = engine->buffer;
        int remain_usable = ringbuf->effective_size - ringbuf->tail;
        int remain_actual = ringbuf->size - ringbuf->tail;
        int ret, total_bytes, wait_bytes = 0;
@@ -2473,7 +2479,7 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
        }
 
        if (wait_bytes) {
-               ret = ring_wait_for_space(ring, wait_bytes);
+               ret = ring_wait_for_space(engine, wait_bytes);
                if (unlikely(ret))
                        return ret;
 
@@ -2531,26 +2537,26 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
        return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
-               I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
-               I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+               I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+               I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
                if (HAS_VEBOX(dev))
-                       I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
+                       I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
        }
 
-       ring->set_seqno(ring, seqno);
-       ring->hangcheck.seqno = seqno;
+       engine->set_seqno(engine, seqno);
+       engine->hangcheck.seqno = seqno;
 }
 
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
                                     u32 value)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
        /* Every tail move must follow the sequence below */
 
@@ -2570,8 +2576,8 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
                DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
        /* Now that the ring is fully powered up, update the tail */
-       I915_WRITE_TAIL(ring, value);
-       POSTING_READ(RING_TAIL(ring->mmio_base));
+       I915_WRITE_TAIL(engine, value);
+       POSTING_READ(RING_TAIL(engine->mmio_base));
 
        /* Let the ring send IDLE messages to the GT again,
         * and so let it sleep to conserve power when idle.
@@ -3157,17 +3163,17 @@ intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 }
 
 void
-intel_stop_ring_buffer(struct intel_engine_cs *ring)
+intel_stop_ring_buffer(struct intel_engine_cs *engine)
 {
        int ret;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_ring_initialized(engine))
                return;
 
-       ret = intel_ring_idle(ring);
-       if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+       ret = intel_ring_idle(engine);
+       if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-                         ring->name, ret);
+                         engine->name, ret);
 
-       stop_ring(ring);
+       stop_ring(engine);
 }
index 24efb57dcd7d862458582d024136c3e28a199e89..48484639c9dac387252914497284082126232f83 100644 (file)
@@ -355,19 +355,19 @@ struct  intel_engine_cs {
 };
 
 static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
+intel_ring_initialized(struct intel_engine_cs *engine)
 {
-       return ring->dev != NULL;
+       return engine->dev != NULL;
 }
 
 static inline unsigned
-intel_ring_flag(struct intel_engine_cs *ring)
+intel_ring_flag(struct intel_engine_cs *engine)
 {
-       return 1 << ring->id;
+       return 1 << engine->id;
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_engine_cs *ring,
+intel_ring_sync_index(struct intel_engine_cs *engine,
                      struct intel_engine_cs *other)
 {
        int idx;
@@ -380,7 +380,7 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
         * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
         */
 
-       idx = (other - ring) - 1;
+       idx = (other - engine) - 1;
        if (idx < 0)
                idx += I915_NUM_RINGS;
 
@@ -388,26 +388,26 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
 }
 
 static inline void
-intel_flush_status_page(struct intel_engine_cs *ring, int reg)
+intel_flush_status_page(struct intel_engine_cs *engine, int reg)
 {
-       drm_clflush_virt_range(&ring->status_page.page_addr[reg],
+       drm_clflush_virt_range(&engine->status_page.page_addr[reg],
                               sizeof(uint32_t));
 }
 
 static inline u32
-intel_read_status_page(struct intel_engine_cs *ring,
+intel_read_status_page(struct intel_engine_cs *engine,
                       int reg)
 {
        /* Ensure that the compiler doesn't optimize away the load. */
        barrier();
-       return ring->status_page.page_addr[reg];
+       return engine->status_page.page_addr[reg];
 }
 
 static inline void
-intel_write_status_page(struct intel_engine_cs *ring,
+intel_write_status_page(struct intel_engine_cs *engine,
                        int reg, u32 value)
 {
-       ring->status_page.page_addr[reg] = value;
+       engine->status_page.page_addr[reg] = value;
 }
 
 /*
@@ -438,42 +438,42 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
 
-void intel_stop_ring_buffer(struct intel_engine_cs *ring);
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+void intel_stop_ring_buffer(struct intel_engine_cs *engine);
+void intel_cleanup_ring_buffer(struct intel_engine_cs *engine);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
+static inline void intel_ring_emit(struct intel_engine_cs *engine,
                                   u32 data)
 {
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf = engine->buffer;
        iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
        ringbuf->tail += 4;
 }
-static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
+static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
                                       i915_reg_t reg)
 {
-       intel_ring_emit(ring, i915_mmio_reg_offset(reg));
+       intel_ring_emit(engine, i915_mmio_reg_offset(reg));
 }
-static inline void intel_ring_advance(struct intel_engine_cs *ring)
+static inline void intel_ring_advance(struct intel_engine_cs *engine)
 {
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf = engine->buffer;
        ringbuf->tail &= ringbuf->size - 1;
 }
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);
-bool intel_ring_stopped(struct intel_engine_cs *ring);
+bool intel_ring_stopped(struct intel_engine_cs *engine);
 
-int __must_check intel_ring_idle(struct intel_engine_cs *ring);
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int __must_check intel_ring_idle(struct intel_engine_cs *engine);
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
-void intel_fini_pipe_control(struct intel_engine_cs *ring);
-int intel_init_pipe_control(struct intel_engine_cs *ring);
+void intel_fini_pipe_control(struct intel_engine_cs *engine);
+int intel_init_pipe_control(struct intel_engine_cs *engine);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -481,9 +481,9 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev);
 int intel_init_blt_ring_buffer(struct drm_device *dev);
 int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
 
-int init_workarounds_ring(struct intel_engine_cs *ring);
+int init_workarounds_ring(struct intel_engine_cs *engine);
 
 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
 {