drm/i915: Replace last_[rwf]_seqno with last_[rwf]_req
authorJohn Harrison <John.C.Harrison@Intel.com>
Mon, 24 Nov 2014 18:49:26 +0000 (18:49 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 3 Dec 2014 08:35:14 +0000 (09:35 +0100)
The object structure contains the last read, write and fenced seqno values for
use in syncrhonisation operations. These have now been replaced with their
request structure counterparts.

Note that to ensure that objects do not end up with dangling pointers, the
assignments of last_*_req include reference count updates. Thus a request cannot
be freed if an object is still hanging on to it for any reason.

v2: Corrected 'last_rendering_' to 'last_read_' in a number of comments that did
not get updated when 'last_rendering_seqno' became 'last_read|write_seqno'
several millenia ago.

For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index a47fc25e6bb446c56de0e4ae5c5ffb7b1c9c56d6..4619873f96202e84d2e36d7f510fd496a573dd26 100644 (file)
@@ -131,9 +131,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                   obj->base.size / 1024,
                   obj->base.read_domains,
                   obj->base.write_domain,
-                  obj->last_read_seqno,
-                  obj->last_write_seqno,
-                  obj->last_fenced_seqno,
+                  i915_gem_request_get_seqno(obj->last_read_req),
+                  i915_gem_request_get_seqno(obj->last_write_req),
+                  i915_gem_request_get_seqno(obj->last_fenced_req),
                   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
                   obj->dirty ? " dirty" : "",
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
index 48c0c4a60364d6fdcbb5f7d6fbed7c9c85bb45cc..4924f1d3d4b415b8099d29267f07d379a20ee54e 100644 (file)
@@ -1944,10 +1944,10 @@ struct drm_i915_gem_object {
        struct intel_engine_cs *ring;
 
        /** Breadcrumb of last rendering to the buffer. */
-       uint32_t last_read_seqno;
-       uint32_t last_write_seqno;
+       struct drm_i915_gem_request *last_read_req;
+       struct drm_i915_gem_request *last_write_req;
        /** Breadcrumb of last fenced GPU access to the buffer. */
-       uint32_t last_fenced_seqno;
+       struct drm_i915_gem_request *last_fenced_req;
 
        /** Current tiling stride for the object, if it's tiled. */
        uint32_t stride;
@@ -1986,9 +1986,10 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
  * The request queue allows us to note sequence numbers that have been emitted
  * and may be associated with active buffers to be retired.
  *
- * By keeping this list, we can avoid having to do questionable
- * sequence-number comparisons on buffer last_rendering_seqnos, and associate
- * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
  */
 struct drm_i915_gem_request {
        struct kref ref;
index ef45e2eac95230f9c8ec801ad598c2c3805e8a19..a1110fb7e583b66544801e389cd7d2cbbb7813c4 100644 (file)
@@ -1346,11 +1346,11 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
        /* Manually manage the write flush as we may have not yet
         * retired the buffer.
         *
-        * Note that the last_write_seqno is always the earlier of
-        * the two (read/write) seqno, so if we haved successfully waited,
+        * Note that the last_write_req is always the earlier of
+        * the two (read/write) requests, so if we haved successfully waited,
         * we know we have passed the last write.
         */
-       obj->last_write_seqno = 0;
+       i915_gem_request_assign(&obj->last_write_req, NULL);
 
        return 0;
 }
@@ -1363,14 +1363,18 @@ static __must_check int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly)
 {
+       struct drm_i915_gem_request *req;
        struct intel_engine_cs *ring = obj->ring;
        u32 seqno;
        int ret;
 
-       seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-       if (seqno == 0)
+       req = readonly ? obj->last_write_req : obj->last_read_req;
+       if (!req)
                return 0;
 
+       seqno = i915_gem_request_get_seqno(req);
+       WARN_ON(seqno == 0);
+
        ret = i915_wait_seqno(ring, seqno);
        if (ret)
                return ret;
@@ -1386,6 +1390,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                                            struct drm_i915_file_private *file_priv,
                                            bool readonly)
 {
+       struct drm_i915_gem_request *req;
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring = obj->ring;
@@ -1396,10 +1401,13 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!dev_priv->mm.interruptible);
 
-       seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-       if (seqno == 0)
+       req = readonly ? obj->last_write_req : obj->last_read_req;
+       if (!req)
                return 0;
 
+       seqno = i915_gem_request_get_seqno(req);
+       WARN_ON(seqno == 0);
+
        ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
        if (ret)
                return ret;
@@ -2257,12 +2265,12 @@ static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_engine_cs *ring)
 {
-       u32 seqno = intel_ring_get_seqno(ring);
+       struct drm_i915_gem_request *req = intel_ring_get_request(ring);
 
        BUG_ON(ring == NULL);
-       if (obj->ring != ring && obj->last_write_seqno) {
-               /* Keep the seqno relative to the current ring */
-               obj->last_write_seqno = seqno;
+       if (obj->ring != ring && obj->last_write_req) {
+               /* Keep the request relative to the current ring */
+               i915_gem_request_assign(&obj->last_write_req, req);
        }
        obj->ring = ring;
 
@@ -2274,7 +2282,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 
        list_move_tail(&obj->ring_list, &ring->active_list);
 
-       obj->last_read_seqno = seqno;
+       i915_gem_request_assign(&obj->last_read_req, req);
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2305,11 +2313,11 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
 
-       obj->last_read_seqno = 0;
-       obj->last_write_seqno = 0;
+       i915_gem_request_assign(&obj->last_read_req, NULL);
+       i915_gem_request_assign(&obj->last_write_req, NULL);
        obj->base.write_domain = 0;
 
-       obj->last_fenced_seqno = 0;
+       i915_gem_request_assign(&obj->last_fenced_req, NULL);
 
        obj->active = 0;
        drm_gem_object_unreference(&obj->base);
@@ -2326,7 +2334,7 @@ i915_gem_object_retire(struct drm_i915_gem_object *obj)
                return;
 
        if (i915_seqno_passed(ring->get_seqno(ring, true),
-                             obj->last_read_seqno))
+                             i915_gem_request_get_seqno(obj->last_read_req)))
                i915_gem_object_move_to_inactive(obj);
 }
 
@@ -2753,7 +2761,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                                      struct drm_i915_gem_object,
                                      ring_list);
 
-               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+               if (!i915_seqno_passed(seqno,
+                            i915_gem_request_get_seqno(obj->last_read_req)))
                        break;
 
                i915_gem_object_move_to_inactive(obj);
@@ -2872,7 +2881,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
        int ret;
 
        if (obj->active) {
-               ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+               ret = i915_gem_check_olr(obj->ring,
+                            i915_gem_request_get_seqno(obj->last_read_req));
                if (ret)
                        return ret;
 
@@ -2933,13 +2943,12 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (ret)
                goto out;
 
-       if (obj->active) {
-               seqno = obj->last_read_seqno;
-               ring = obj->ring;
-       }
+       if (!obj->active || !obj->last_read_req)
+               goto out;
 
-       if (seqno == 0)
-                goto out;
+       seqno = i915_gem_request_get_seqno(obj->last_read_req);
+       WARN_ON(seqno == 0);
+       ring = obj->ring;
 
        /* Do this after OLR check to make sure we make forward progress polling
         * on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2990,7 +2999,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
        idx = intel_ring_sync_index(from, to);
 
-       seqno = obj->last_read_seqno;
+       seqno = i915_gem_request_get_seqno(obj->last_read_req);
        /* Optimization: Avoid semaphore sync when we are sure we already
         * waited for an object with higher seqno */
        if (seqno <= from->semaphore.sync_seqno[idx])
@@ -3003,11 +3012,12 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
        trace_i915_gem_ring_sync_to(from, to, seqno);
        ret = to->semaphore.sync_to(to, from, seqno);
        if (!ret)
-               /* We use last_read_seqno because sync_to()
+               /* We use last_read_req because sync_to()
                 * might have just caused seqno wrap under
                 * the radar.
                 */
-               from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
+               from->semaphore.sync_seqno[idx] =
+                               i915_gem_request_get_seqno(obj->last_read_req);
 
        return ret;
 }
@@ -3321,12 +3331,13 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 static int
 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 {
-       if (obj->last_fenced_seqno) {
-               int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+       if (obj->last_fenced_req) {
+               int ret = i915_wait_seqno(obj->ring,
+                          i915_gem_request_get_seqno(obj->last_fenced_req));
                if (ret)
                        return ret;
 
-               obj->last_fenced_seqno = 0;
+               i915_gem_request_assign(&obj->last_fenced_req, NULL);
        }
 
        return 0;
index f06027ba3ee5512a7718deb22112f9fe6e16458b..4d9baef28e309a62de8b5a2d03dee84676f358a2 100644 (file)
@@ -946,7 +946,7 @@ void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_engine_cs *ring)
 {
-       u32 seqno = intel_ring_get_seqno(ring);
+       struct drm_i915_gem_request *req = intel_ring_get_request(ring);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, vmas, exec_list) {
@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                i915_vma_move_to_active(vma, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
-                       obj->last_write_seqno = seqno;
+                       i915_gem_request_assign(&obj->last_write_req, req);
 
                        intel_fb_obj_invalidate(obj, ring);
 
@@ -971,7 +971,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
                }
                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-                       obj->last_fenced_seqno = seqno;
+                       i915_gem_request_assign(&obj->last_fenced_req, req);
                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
                                struct drm_i915_private *dev_priv = to_i915(ring->dev);
                                list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
index 92db6654f93ba46e1d34bebd2949f36ad3d4d3b9..dd849df6a268635f6136d8faca5e332a622465c5 100644 (file)
@@ -181,7 +181,7 @@ struct i915_address_space {
         * List of objects currently involved in rendering.
         *
         * Includes buffers having the contents of their GPU caches
-        * flushed, not necessarily primitives.  last_rendering_seqno
+        * flushed, not necessarily primitives. last_read_req
         * represents when the rendering involved will be completed.
         *
         * A reference is held on the buffer while on this list.
@@ -192,7 +192,7 @@ struct i915_address_space {
         * LRU list of objects which are not in the ringbuffer and
         * are ready to unbind, but are still in the GTT.
         *
-        * last_rendering_seqno is 0 while an object is in this list.
+        * last_read_req is NULL while an object is in this list.
         *
         * A reference is not held on the buffer while on this list,
         * as merely being GTT-bound shouldn't prevent its being
index 4727a4e2c87c98669c03c07a526723b8dcec49a9..7a24bd1a51f648b340ce15d5ee98771cd1fd77d9 100644 (file)
@@ -399,7 +399,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                        }
 
                        obj->fence_dirty =
-                               obj->last_fenced_seqno ||
+                               obj->last_fenced_req ||
                                obj->fence_reg != I915_FENCE_REG_NONE;
 
                        obj->tiling_mode = args->tiling_mode;
index eea98d56743107cdf596b9ce5f3a8b5b8a17888a..af0ceeedda9bda93da7c0be024120a909f691ea1 100644 (file)
@@ -670,8 +670,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 
        err->size = obj->base.size;
        err->name = obj->base.name;
-       err->rseqno = obj->last_read_seqno;
-       err->wseqno = obj->last_write_seqno;
+       err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
+       err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
        err->gtt_offset = vma->node.start;
        err->read_domains = obj->base.read_domains;
        err->write_domain = obj->base.write_domain;
index d54716655d3240ca28581fad1e67499e0bf35f7e..70e75805994bcf396a0d27abaf5856724c1f67f6 100644 (file)
@@ -9637,7 +9637,8 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+       intel_crtc->mmio_flip.seqno =
+                            i915_gem_request_get_seqno(obj->last_write_req);
        intel_crtc->mmio_flip.ring = obj->ring;
 
        schedule_work(&intel_crtc->mmio_flip.work);
@@ -9900,7 +9901,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                if (ret)
                        goto cleanup_unpin;
 
-               work->flip_queued_seqno = obj->last_write_seqno;
+               work->flip_queued_seqno =
+                            i915_gem_request_get_seqno(obj->last_write_req);
                work->flip_queued_ring = obj->ring;
        } else {
                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
index 20636e0e9b53b27fa0e86330928e401f61384bb8..dbac132a5cdf568d124d3c1fbfedcc267195abc7 100644 (file)
@@ -251,7 +251,7 @@ struct  intel_engine_cs {
         * ringbuffer.
         *
         * Includes buffers having the contents of their GPU caches
-        * flushed, not necessarily primitives.  last_rendering_seqno
+        * flushed, not necessarily primitives.  last_read_req
         * represents when the rendering involved will be completed.
         *
         * A reference is held on the buffer while on this list.