drm/i915: Use HWS for seqno tracking everywhere
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 1 Jul 2016 16:23:17 +0000 (17:23 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 1 Jul 2016 19:58:48 +0000 (20:58 +0100)
By using the same address for storing the HWS on every platform, we can
remove the platform specific vfuncs and reduce the get-seqno routine to
a single read of a cached memory location.

v2: Fix semaphore_passed() to look at the signaling engine (not the
waiter's)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1467390209-3576-8-git-send-email-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 309bb2f1a1e86b1d6b4bd9499a5f1e47c300c766..f5899b631c0ee8fb3f59ccd68d3bd8c9e9dd4a61 100644 (file)
@@ -662,7 +662,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                           engine->name,
                                           i915_gem_request_get_seqno(work->flip_queued_req),
                                           dev_priv->next_seqno,
-                                          engine->get_seqno(engine),
+                                          intel_engine_get_seqno(engine),
                                           i915_gem_request_completed(work->flip_queued_req));
                        } else
                                seq_printf(m, "Flip not associated with any ring\n");
@@ -792,7 +792,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
        struct rb_node *rb;
 
        seq_printf(m, "Current sequence (%s): %x\n",
-                  engine->name, engine->get_seqno(engine));
+                  engine->name, intel_engine_get_seqno(engine));
        seq_printf(m, "Current user interrupts (%s): %x\n",
                   engine->name, READ_ONCE(engine->user_interrupts));
 
@@ -1420,7 +1420,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
        for_each_engine_id(engine, dev_priv, id) {
                acthd[id] = intel_ring_get_active_head(engine);
-               seqno[id] = engine->get_seqno(engine);
+               seqno[id] = intel_engine_get_seqno(engine);
        }
 
        i915_get_extra_instdone(dev_priv, instdone);
index 0ea69c5ecc8b1100b86d7af5991b89cd3b852049..5a1e8e056ee53b5e41c9a315cd5f4ebb3d0e9957 100644 (file)
@@ -3289,13 +3289,13 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 
 static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
 {
-       return i915_seqno_passed(req->engine->get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
                                 req->previous_seqno);
 }
 
 static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
 {
-       return i915_seqno_passed(req->engine->get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
                                 req->seqno);
 }
 
index 250f0b8180990369b85a77c9aace12dd2fcac2f0..1c15a74530cd1638b7a53c914c521eba701cd218 100644 (file)
@@ -984,7 +984,7 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
        ering->waiting = intel_engine_has_waiter(engine);
        ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
        ering->acthd = intel_ring_get_active_head(engine);
-       ering->seqno = engine->get_seqno(engine);
+       ering->seqno = intel_engine_get_seqno(engine);
        ering->last_seqno = engine->last_submitted_seqno;
        ering->start = I915_READ_START(engine);
        ering->head = I915_READ_HEAD(engine);
index a11ab00cdee0900a5a09884008202d94a0447ebb..7c379afcff2f7ea74b6ae5f87ff86ea74cff1afb 100644 (file)
@@ -2952,7 +2952,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
                return -1;
 
-       if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
+       if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
                return 1;
 
        /* cursory check for an unkickable deadlock */
@@ -3140,7 +3140,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                        engine->irq_seqno_barrier(engine);
 
                acthd = intel_ring_get_active_head(engine);
-               seqno = engine->get_seqno(engine);
+               seqno = intel_engine_get_seqno(engine);
 
                /* Reset stuck interrupts between batch advances */
                user_interrupts = 0;
index 6768db032f848cc7446c6c42d2484dc5f7d7ad2c..3d13fde95fdfbb7c656ca5bb12747966577b4a7b 100644 (file)
@@ -558,7 +558,7 @@ TRACE_EVENT(i915_gem_request_notify,
            TP_fast_assign(
                           __entry->dev = engine->i915->dev->primary->index;
                           __entry->ring = engine->id;
-                          __entry->seqno = engine->get_seqno(engine);
+                          __entry->seqno = intel_engine_get_seqno(engine);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
index a3bbf2d90dce2d7739a285d42b14102ef328e680..7cdb02d18c1f3f99dea29915ecbf192bdc815bce 100644 (file)
@@ -146,7 +146,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
        first = true;
        parent = NULL;
        completed = NULL;
-       seqno = engine->get_seqno(engine);
+       seqno = intel_engine_get_seqno(engine);
 
         /* If the request completed before we managed to grab the spinlock,
          * return now before adding ourselves to the rbtree. We let the
@@ -296,7 +296,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
                         * the first_waiter. This is undesirable if that
                         * waiter is a high priority task.
                         */
-                       u32 seqno = engine->get_seqno(engine);
+                       u32 seqno = intel_engine_get_seqno(engine);
 
                        while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
                                struct rb_node *n = rb_next(next);
index 226bba22e4b4a53a001f6b30f4be00e2e6d05caa..c8411f8bb4acb2c971ffe0cdd5bd0640ad9168a9 100644 (file)
@@ -1783,16 +1783,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 {
        /*
@@ -1808,14 +1798,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
        intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-
-       /* See bxt_a_get_seqno() explaining the reason for the clflush. */
-       intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
 /*
  * Reserve space for 2 NOOPs at the end of each request to be
  * used as a workaround for not being allowed to do lite
@@ -1841,7 +1823,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
                                intel_hws_seqno_address(request->engine) |
                                MI_FLUSH_DW_USE_GTT);
        intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       intel_logical_ring_emit(ringbuf, request->seqno);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
@@ -1987,12 +1969,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
        engine->irq_get = gen8_logical_ring_get_irq;
        engine->irq_put = gen8_logical_ring_put_irq;
        engine->emit_bb_start = gen8_emit_bb_start;
-       engine->get_seqno = gen8_get_seqno;
-       engine->set_seqno = gen8_set_seqno;
-       if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
                engine->irq_seqno_barrier = bxt_a_seqno_barrier;
-               engine->set_seqno = bxt_a_set_seqno;
-       }
 }
 
 static inline void
index af50aa01bcd92bf216f15410ae08dccef37fd2fa..02104fbf9045e402a9a8c6401a60e6d833428e7a 100644 (file)
@@ -1367,19 +1367,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
                                           PIPE_CONTROL_CS_STALL);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1408,18 +1406,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->hw_id));
                intel_ring_emit(signaller, 0);
@@ -1450,11 +1446,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
 
                if (i915_mmio_reg_valid(mbox_reg)) {
-                       u32 seqno = i915_gem_request_get_seqno(signaller_req);
-
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit_reg(signaller, mbox_reg);
-                       intel_ring_emit(signaller, seqno);
+                       intel_ring_emit(signaller, signaller_req->seqno);
                }
        }
 
@@ -1490,7 +1484,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(engine, req->seqno);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
        __intel_ring_advance(engine);
 
@@ -1628,7 +1622,9 @@ static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
-       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 addr = engine->status_page.gfx_addr +
+               (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       u32 scratch_addr = addr;
        int ret;
 
        /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -1644,12 +1640,12 @@ pc_render_add_request(struct drm_i915_gem_request *req)
                return ret;
 
        intel_ring_emit(engine,
-                       GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+                       GFX_OP_PIPE_CONTROL(4) |
+                       PIPE_CONTROL_QW_WRITE |
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-       intel_ring_emit(engine,
-                       engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(engine, addr | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(engine, req->seqno);
        intel_ring_emit(engine, 0);
        PIPE_CONTROL_FLUSH(engine, scratch_addr);
        scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1668,9 +1664,8 @@ pc_render_add_request(struct drm_i915_gem_request *req)
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
-       intel_ring_emit(engine,
-                       engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(engine, addr | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(engine, req->seqno);
        intel_ring_emit(engine, 0);
        __intel_ring_advance(engine);
 
@@ -1702,30 +1697,6 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
        spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
-static u32
-ring_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static void
-ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
-static u32
-pc_render_get_seqno(struct intel_engine_cs *engine)
-{
-       return engine->scratch.cpu_page[0];
-}
-
-static void
-pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       engine->scratch.cpu_page[0] = seqno;
-}
-
 static bool
 gen5_ring_get_irq(struct intel_engine_cs *engine)
 {
@@ -1856,7 +1827,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(engine, req->seqno);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
        __intel_ring_advance(engine);
 
@@ -2675,7 +2646,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));
 
-       engine->set_seqno(engine, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
        engine->last_submitted_seqno = seqno;
 
        engine->hangcheck.seqno = seqno;
@@ -3021,8 +2994,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 {
        engine->init_hw = init_ring_common;
        engine->write_tail = ring_write_tail;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
 
        engine->add_request = i9xx_add_request;
        if (INTEL_GEN(dev_priv) >= 6)
@@ -3074,8 +3045,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        } else if (IS_GEN5(dev_priv)) {
                engine->add_request = pc_render_add_request;
                engine->flush = gen4_render_ring_flush;
-               engine->get_seqno = pc_render_get_seqno;
-               engine->set_seqno = pc_render_set_seqno;
                engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
                                        GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
        } else {
index 6fd70a56e219b1b98dc4468c485096b70155878a..b03931f6dde530dd3867277229d6f4d02ce787c5 100644 (file)
@@ -207,9 +207,6 @@ struct intel_engine_cs {
         * monotonic, even if not coherent.
         */
        void            (*irq_seqno_barrier)(struct intel_engine_cs *ring);
-       u32             (*get_seqno)(struct intel_engine_cs *ring);
-       void            (*set_seqno)(struct intel_engine_cs *ring,
-                                    u32 seqno);
        int             (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
                                               u64 offset, u32 length,
                                               unsigned dispatch_flags);
@@ -485,6 +482,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
 int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
 
 int init_workarounds_ring(struct intel_engine_cs *engine);