drm/i915: Combine seqno + tracking into a global timeline struct
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 28 Oct 2016 12:58:46 +0000 (13:58 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 28 Oct 2016 19:53:51 +0000 (20:53 +0100)
Our timelines are more than just a seqno. They also provide an ordered
list of requests to be executed. Due to the restriction of handling
individual address spaces, we are limited to a timeline per address
space but we use a fence context per engine within.

Our first step to introducing independent timelines per context (i.e. to
allow each context to have a queue of requests to execute that have a
defined set of dependencies on other requests) is to provide a timeline
abstraction for the global execution queue.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-23-chris@chris-wilson.co.uk
15 files changed:
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/gpu/drm/i915/i915_gem_timeline.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem_timeline.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 7faa04c91e1af8d93d2bbadf7b52f6f40583c086..240ce9a8d68ef773f08bfa69772486178d861264 100644 (file)
@@ -42,6 +42,7 @@ i915-y += i915_cmd_parser.o \
          i915_gem_shrinker.o \
          i915_gem_stolen.o \
          i915_gem_tiling.o \
+         i915_gem_timeline.o \
          i915_gem_userptr.o \
          i915_trace_points.o \
          intel_breadcrumbs.o \
index b6325065c8e8665c16b996b498173e971c9030f4..3a0ea5eace3771962413177c669caf4ce4ad3602 100644 (file)
@@ -552,7 +552,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
                                           engine->name,
                                           i915_gem_request_get_seqno(work->flip_queued_req),
-                                          dev_priv->next_seqno,
+                                          dev_priv->gt.global_timeline.next_seqno,
                                           intel_engine_get_seqno(engine),
                                           i915_gem_request_completed(work->flip_queued_req));
                        } else
@@ -662,13 +662,13 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
                int count;
 
                count = 0;
-               list_for_each_entry(req, &engine->request_list, link)
+               list_for_each_entry(req, &engine->timeline->requests, link)
                        count++;
                if (count == 0)
                        continue;
 
                seq_printf(m, "%s requests: %d\n", engine->name, count);
-               list_for_each_entry(req, &engine->request_list, link)
+               list_for_each_entry(req, &engine->timeline->requests, link)
                        print_request(m, req, "    ");
 
                any++;
@@ -1052,15 +1052,8 @@ static int
 i915_next_seqno_get(void *data, u64 *val)
 {
        struct drm_i915_private *dev_priv = data;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
-       if (ret)
-               return ret;
-
-       *val = dev_priv->next_seqno;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
 
+       *val = READ_ONCE(dev_priv->gt.global_timeline.next_seqno);
        return 0;
 }
 
@@ -1075,7 +1068,7 @@ i915_next_seqno_set(void *data, u64 val)
        if (ret)
                return ret;
 
-       ret = i915_gem_set_seqno(dev, val);
+       ret = i915_gem_set_global_seqno(dev, val);
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
@@ -1364,7 +1357,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
                           engine->hangcheck.seqno,
                           seqno[id],
-                          engine->last_submitted_seqno);
+                          engine->timeline->last_submitted_seqno);
                seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
                           yesno(intel_engine_has_waiter(engine)),
                           yesno(test_bit(engine->id,
@@ -3181,7 +3174,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
                seq_printf(m, "%s\n", engine->name);
                seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
                           intel_engine_get_seqno(engine),
-                          engine->last_submitted_seqno,
+                          engine->timeline->last_submitted_seqno,
                           engine->hangcheck.seqno,
                           engine->hangcheck.score);
 
@@ -3189,14 +3182,14 @@ static int i915_engine_info(struct seq_file *m, void *unused)
 
                seq_printf(m, "\tRequests:\n");
 
-               rq = list_first_entry(&engine->request_list,
-                               struct drm_i915_gem_request, link);
-               if (&rq->link != &engine->request_list)
+               rq = list_first_entry(&engine->timeline->requests,
+                                     struct drm_i915_gem_request, link);
+               if (&rq->link != &engine->timeline->requests)
                        print_request(m, rq, "\t\tfirst  ");
 
-               rq = list_last_entry(&engine->request_list,
-                               struct drm_i915_gem_request, link);
-               if (&rq->link != &engine->request_list)
+               rq = list_last_entry(&engine->timeline->requests,
+                                    struct drm_i915_gem_request, link);
+               if (&rq->link != &engine->timeline->requests)
                        print_request(m, rq, "\t\tlast   ");
 
                rq = i915_gem_find_active_request(engine);
index 91cd7b296c0f6acad5d225f7d2ad4368aa698564..839ce2ae38fa90df46045d7d05070ccec2b4ead8 100644 (file)
@@ -831,7 +831,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        intel_init_display_hooks(dev_priv);
        intel_init_clock_gating_hooks(dev_priv);
        intel_init_audio_hooks(dev_priv);
-       i915_gem_load_init(&dev_priv->drm);
+       ret = i915_gem_load_init(&dev_priv->drm);
+       if (ret < 0)
+               goto err_gvt;
 
        intel_display_crc_init(dev_priv);
 
@@ -841,6 +843,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 
        return 0;
 
+err_gvt:
+       intel_gvt_cleanup(dev_priv);
 err_workqueues:
        i915_workqueues_cleanup(dev_priv);
        return ret;
index 693ee69a4715bc0f6b51fcf914dab14a1f5197f8..f0f68f64d09caa6e585b86964f409b2651cc6574 100644 (file)
@@ -63,6 +63,7 @@
 #include "i915_gem_gtt.h"
 #include "i915_gem_render_state.h"
 #include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
 
 #include "intel_gvt.h"
 
@@ -1830,7 +1831,6 @@ struct drm_i915_private {
        struct i915_gem_context *kernel_context;
        struct intel_engine_cs *engine[I915_NUM_ENGINES];
        struct i915_vma *semaphore;
-       u32 next_seqno;
 
        struct drm_dma_handle *status_page_dmah;
        struct resource mch_res;
@@ -2090,6 +2090,9 @@ struct drm_i915_private {
                void (*resume)(struct drm_i915_private *);
                void (*cleanup_engine)(struct intel_engine_cs *engine);
 
+               struct list_head timelines;
+               struct i915_gem_timeline global_timeline;
+
                /**
                 * Is the GPU currently considered idle, or busy executing
                 * userspace requests? Whilst idle, we allow runtime power
@@ -3175,7 +3178,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
-void i915_gem_load_init(struct drm_device *dev);
+int i915_gem_load_init(struct drm_device *dev);
 void i915_gem_load_cleanup(struct drm_device *dev);
 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
 int i915_gem_freeze(struct drm_i915_private *dev_priv);
@@ -3347,7 +3350,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
                       unsigned frontbuffer_bits);
 
-int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
index 1161a21ec81065cc3bac9059fa0435aa28e7e35f..525360219bbb75debd055919436157e08f264c50 100644 (file)
@@ -371,7 +371,7 @@ out:
        if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
                i915_gem_request_retire_upto(rq);
 
-       if (rps && rq->fence.seqno == rq->engine->last_submitted_seqno) {
+       if (rps && rq->fence.seqno == rq->timeline->last_submitted_seqno) {
                /* The GPU is now idle and this client has stalled.
                 * Since no other client has submitted a request in the
                 * meantime, assume that this client is the only one
@@ -2563,7 +2563,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
         * extra delay for a recent interrupt is pointless. Hence, we do
         * not need an engine->irq_seqno_barrier() before the seqno reads.
         */
-       list_for_each_entry(request, &engine->request_list, link) {
+       list_for_each_entry(request, &engine->timeline->requests, link) {
                if (i915_gem_request_completed(request))
                        continue;
 
@@ -2632,7 +2632,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
        if (i915_gem_context_is_default(incomplete_ctx))
                return;
 
-       list_for_each_entry_continue(request, &engine->request_list, link)
+       list_for_each_entry_continue(request, &engine->timeline->requests, link)
                if (request->ctx == incomplete_ctx)
                        reset_request(request);
 }
@@ -2671,7 +2671,8 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
         * (lockless) lookup doesn't try and wait upon the request as we
         * reset it.
         */
-       intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+       intel_engine_init_global_seqno(engine,
+                                      engine->timeline->last_submitted_seqno);
 
        /*
         * Clear the execlists queue up before freeing the requests, as those
@@ -2979,18 +2980,26 @@ destroy:
        return 0;
 }
 
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                          unsigned int flags)
+static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
 {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int ret;
+       int ret, i;
 
-       for_each_engine(engine, dev_priv, id) {
-               if (engine->last_context == NULL)
-                       continue;
+       for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+               ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
+               if (ret)
+                       return ret;
+       }
 
-               ret = intel_engine_idle(engine, flags);
+       return 0;
+}
+
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+{
+       struct i915_gem_timeline *tl;
+       int ret;
+
+       list_for_each_entry(tl, &i915->gt.timelines, link) {
+               ret = wait_for_timeline(tl, flags);
                if (ret)
                        return ret;
        }
@@ -4680,21 +4689,32 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
        i915_gem_detect_bit_6_swizzle(dev);
 }
 
-void
+int
 i915_gem_load_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       int err;
 
        dev_priv->objects =
                kmem_cache_create("i915_gem_object",
                                  sizeof(struct drm_i915_gem_object), 0,
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
+       if (!dev_priv->objects) {
+               err = -ENOMEM;
+               goto err_out;
+       }
+
        dev_priv->vmas =
                kmem_cache_create("i915_gem_vma",
                                  sizeof(struct i915_vma), 0,
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
+       if (!dev_priv->vmas) {
+               err = -ENOMEM;
+               goto err_objects;
+       }
+
        dev_priv->requests =
                kmem_cache_create("i915_gem_request",
                                  sizeof(struct drm_i915_gem_request), 0,
@@ -4702,6 +4722,19 @@ i915_gem_load_init(struct drm_device *dev)
                                  SLAB_RECLAIM_ACCOUNT |
                                  SLAB_DESTROY_BY_RCU,
                                  NULL);
+       if (!dev_priv->requests) {
+               err = -ENOMEM;
+               goto err_vmas;
+       }
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       INIT_LIST_HEAD(&dev_priv->gt.timelines);
+       err = i915_gem_timeline_init(dev_priv,
+                                    &dev_priv->gt.global_timeline,
+                                    "[execution]");
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+       if (err)
+               goto err_requests;
 
        INIT_LIST_HEAD(&dev_priv->context_list);
        INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
@@ -4726,6 +4759,17 @@ i915_gem_load_init(struct drm_device *dev)
        atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
 
        spin_lock_init(&dev_priv->fb_tracking.lock);
+
+       return 0;
+
+err_requests:
+       kmem_cache_destroy(dev_priv->requests);
+err_vmas:
+       kmem_cache_destroy(dev_priv->vmas);
+err_objects:
+       kmem_cache_destroy(dev_priv->objects);
+err_out:
+       return err;
 }
 
 void i915_gem_load_cleanup(struct drm_device *dev)
index 8292e797d9b57dc91775c4a8d731775828225eee..735580d72eb11d1e4f9ef7ed1cac42d1afae73ad 100644 (file)
@@ -31,4 +31,6 @@
 #define GEM_BUG_ON(expr)
 #endif
 
+#define I915_NUM_ENGINES 5
+
 #endif /* __I915_GEM_H__ */
index 01a7fa513b4a7229a61e80795b415261729484a8..16d38f87f0a7a992b366e955ae28822fdc8cb183 100644 (file)
@@ -40,7 +40,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
         * multiple execution contexts (fence contexts) as we allow
         * engines within a single timeline to execute in parallel.
         */
-       return "global";
+       return to_request(fence)->timeline->common->name;
 }
 
 static bool i915_fence_signaled(struct dma_fence *fence)
@@ -211,7 +211,7 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
                return;
 
        do {
-               tmp = list_first_entry(&engine->request_list,
+               tmp = list_first_entry(&engine->timeline->requests,
                                       typeof(*tmp), link);
 
                i915_gem_request_retire(tmp);
@@ -238,37 +238,39 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *dev_priv,
+                                     u32 seqno)
 {
+       struct i915_gem_timeline *timeline = &dev_priv->gt.global_timeline;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        int ret;
 
        /* Carefully retire all requests without writing to the rings */
-       for_each_engine(engine, dev_priv, id) {
-               ret = intel_engine_idle(engine,
-                                       I915_WAIT_INTERRUPTIBLE |
-                                       I915_WAIT_LOCKED);
-               if (ret)
-                       return ret;
-       }
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
+       if (ret)
+               return ret;
+
        i915_gem_retire_requests(dev_priv);
 
        /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+       if (!i915_seqno_passed(seqno, timeline->next_seqno)) {
                while (intel_kick_waiters(dev_priv) ||
                       intel_kick_signalers(dev_priv))
                        yield();
+               yield();
        }
 
        /* Finally reset hw state */
        for_each_engine(engine, dev_priv, id)
-               intel_engine_init_seqno(engine, seqno);
+               intel_engine_init_global_seqno(engine, seqno);
 
        return 0;
 }
 
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
@@ -281,28 +283,31 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
        /* HWS page needs to be set less than what we
         * will inject to ring
         */
-       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+       ret = i915_gem_init_global_seqno(dev_priv, seqno - 1);
        if (ret)
                return ret;
 
-       dev_priv->next_seqno = seqno;
+       dev_priv->gt.global_timeline.next_seqno = seqno;
        return 0;
 }
 
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static int i915_gem_get_global_seqno(struct drm_i915_private *dev_priv,
+                                    u32 *seqno)
 {
+       struct i915_gem_timeline *tl = &dev_priv->gt.global_timeline;
+
        /* reserve 0 for non-seqno */
-       if (unlikely(dev_priv->next_seqno == 0)) {
+       if (unlikely(tl->next_seqno == 0)) {
                int ret;
 
-               ret = i915_gem_init_seqno(dev_priv, 0);
+               ret = i915_gem_init_global_seqno(dev_priv, 0);
                if (ret)
                        return ret;
 
-               dev_priv->next_seqno = 1;
+               tl->next_seqno = 1;
        }
 
-       *seqno = dev_priv->next_seqno++;
+       *seqno = tl->next_seqno++;
        return 0;
 }
 
@@ -311,13 +316,14 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
        struct drm_i915_gem_request *request =
                container_of(fence, typeof(*request), submit);
+       struct intel_engine_cs *engine = request->engine;
 
        /* Will be called from irq-context when using foreign DMA fences */
 
        switch (state) {
        case FENCE_COMPLETE:
-               request->engine->last_submitted_seqno = request->fence.seqno;
-               request->engine->submit_request(request);
+               engine->timeline->last_submitted_seqno = request->fence.seqno;
+               engine->submit_request(request);
                break;
 
        case FENCE_FREE:
@@ -357,7 +363,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
                return ERR_PTR(ret);
 
        /* Move the oldest request to the slab-cache (if not in use!) */
-       req = list_first_entry_or_null(&engine->request_list,
+       req = list_first_entry_or_null(&engine->timeline->requests,
                                       typeof(*req), link);
        if (req && i915_gem_request_completed(req))
                i915_gem_request_retire(req);
@@ -394,15 +400,17 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        if (!req)
                return ERR_PTR(-ENOMEM);
 
-       ret = i915_gem_get_seqno(dev_priv, &seqno);
+       ret = i915_gem_get_global_seqno(dev_priv, &seqno);
        if (ret)
                goto err;
 
+       req->timeline = engine->timeline;
+
        spin_lock_init(&req->lock);
        dma_fence_init(&req->fence,
                       &i915_fence_ops,
                       &req->lock,
-                      engine->fence_context,
+                      req->timeline->fence_context,
                       seqno);
 
        i915_sw_fence_init(&req->submit, submit_notify);
@@ -457,9 +465,16 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
 
        GEM_BUG_ON(to == from);
 
-       if (to->engine == from->engine)
+       if (to->timeline == from->timeline)
                return 0;
 
+       if (to->engine == from->engine) {
+               ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+                                                      &from->submit,
+                                                      GFP_KERNEL);
+               return ret < 0 ? ret : 0;
+       }
+
        idx = intel_engine_sync_index(from->engine, to->engine);
        if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
                return 0;
@@ -622,6 +637,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_ring *ring = request->ring;
+       struct intel_timeline *timeline = request->timeline;
        struct drm_i915_gem_request *prev;
        u32 request_start;
        u32 reserved_tail;
@@ -679,17 +695,17 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
         * see a more recent value in the hws than we are tracking.
         */
 
-       prev = i915_gem_active_raw(&engine->last_request,
+       prev = i915_gem_active_raw(&timeline->last_request,
                                   &request->i915->drm.struct_mutex);
        if (prev)
                i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
                                             &request->submitq);
 
        request->emitted_jiffies = jiffies;
-       request->previous_seqno = engine->last_pending_seqno;
-       engine->last_pending_seqno = request->fence.seqno;
-       i915_gem_active_set(&engine->last_request, request);
-       list_add_tail(&request->link, &engine->request_list);
+       request->previous_seqno = timeline->last_pending_seqno;
+       timeline->last_pending_seqno = request->fence.seqno;
+       i915_gem_active_set(&timeline->last_request, request);
+       list_add_tail(&request->link, &timeline->requests);
        list_add_tail(&request->ring_link, &ring->request_list);
 
        i915_gem_mark_busy(engine);
@@ -899,7 +915,8 @@ static bool engine_retire_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request, *next;
 
-       list_for_each_entry_safe(request, next, &engine->request_list, link) {
+       list_for_each_entry_safe(request, next,
+                                &engine->timeline->requests, link) {
                if (!i915_gem_request_completed(request))
                        return false;
 
index a51d596a60acb2335dbefbcb1faa16fd12f34042..4ac30ae93e499c6cb06c5c6a049a63a2dce220f7 100644 (file)
@@ -81,6 +81,7 @@ struct drm_i915_gem_request {
        struct i915_gem_context *ctx;
        struct intel_engine_cs *engine;
        struct intel_ring *ring;
+       struct intel_timeline *timeline;
        struct intel_signal_node signaling;
 
        struct i915_sw_fence submit;
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
new file mode 100644 (file)
index 0000000..a1bd03d
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+                          struct i915_gem_timeline *timeline,
+                          const char *name)
+{
+       unsigned int i;
+       u64 fences;
+
+       lockdep_assert_held(&i915->drm.struct_mutex);
+
+       timeline->i915 = i915;
+       timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
+       if (!timeline->name)
+               return -ENOMEM;
+
+       list_add(&timeline->link, &i915->gt.timelines);
+
+       /* Called during early_init before we know how many engines there are */
+       fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
+       for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+               struct intel_timeline *tl = &timeline->engine[i];
+
+               tl->fence_context = fences++;
+               tl->common = timeline;
+
+               init_request_active(&tl->last_request, NULL);
+               INIT_LIST_HEAD(&tl->requests);
+       }
+
+       return 0;
+}
+
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+{
+       lockdep_assert_held(&tl->i915->drm.struct_mutex);
+
+       list_del(&tl->link);
+       kfree(tl->name);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
new file mode 100644 (file)
index 0000000..bfdf033
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_TIMELINE_H
+#define I915_GEM_TIMELINE_H
+
+#include <linux/list.h>
+
+#include "i915_gem_request.h"
+
+struct i915_gem_timeline;
+
+struct intel_timeline {
+       u64 fence_context;
+       u32 last_submitted_seqno;
+       u32 last_pending_seqno;
+
+       /**
+        * List of breadcrumbs associated with GPU requests currently
+        * outstanding.
+        */
+       struct list_head requests;
+
+       /* Contains an RCU guarded pointer to the last request. No reference is
+        * held to the request, users must carefully acquire a reference to
+        * the request using i915_gem_active_get_request_rcu(), or hold the
+        * struct_mutex.
+        */
+       struct i915_gem_active last_request;
+
+       struct i915_gem_timeline *common;
+};
+
+struct i915_gem_timeline {
+       struct list_head link;
+       u32 next_seqno;
+
+       struct drm_i915_private *i915;
+       const char *name;
+
+       struct intel_timeline engine[I915_NUM_ENGINES];
+};
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+                          struct i915_gem_timeline *tl,
+                          const char *name);
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+
+#endif
index aa8dadcc669f324b790e0fa8689757c1e45151e6..12fea57d41fb7c6254066030542227e0aca2b7ee 100644 (file)
@@ -1110,7 +1110,7 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
        ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
        ee->acthd = intel_engine_get_active_head(engine);
        ee->seqno = intel_engine_get_seqno(engine);
-       ee->last_seqno = engine->last_submitted_seqno;
+       ee->last_seqno = engine->timeline->last_submitted_seqno;
        ee->start = I915_READ_START(engine);
        ee->head = I915_READ_HEAD(engine);
        ee->tail = I915_READ_TAIL(engine);
@@ -1195,7 +1195,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 
        count = 0;
        request = first;
-       list_for_each_entry_from(request, &engine->request_list, link)
+       list_for_each_entry_from(request, &engine->timeline->requests, link)
                count++;
        if (!count)
                return;
@@ -1208,7 +1208,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 
        count = 0;
        request = first;
-       list_for_each_entry_from(request, &engine->request_list, link) {
+       list_for_each_entry_from(request, &engine->timeline->requests, link) {
                if (count >= ee->num_requests) {
                        /*
                         * If the ring request list was changed in
index 74235ea3950f24347728ba4d740640ab3186e1f1..cca250e90845516225583a931db0911bcccd96ba 100644 (file)
@@ -1522,7 +1522,8 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
                engine->submit_request = i915_guc_submit;
 
                /* Replay the current set of previously submitted requests */
-               list_for_each_entry(request, &engine->request_list, link) {
+               list_for_each_entry(request,
+                                   &engine->timeline->requests, link) {
                        client->wq_rsvd += sizeof(struct guc_wq_item);
                        if (i915_sw_fence_done(&request->submit))
                                i915_guc_submit(request);
index 88239e1b29e48ddc707c65ca8a31dec0de1b0f10..90d0905592f2861e7abd7752fa197162b576fa8e 100644 (file)
@@ -3168,7 +3168,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 
                acthd = intel_engine_get_active_head(engine);
                seqno = intel_engine_get_seqno(engine);
-               submit = READ_ONCE(engine->last_submitted_seqno);
+               submit = READ_ONCE(engine->timeline->last_submitted_seqno);
 
                if (engine->hangcheck.seqno == seqno) {
                        if (i915_seqno_passed(seqno, submit)) {
index fd551824adf9413062318f5d399183a52fbadad4..6a3105512d1880a857e7da95f6c1a8daa42bb8fe 100644 (file)
@@ -174,7 +174,7 @@ cleanup:
        return ret;
 }
 
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
@@ -210,7 +210,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
        intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
        if (engine->irq_seqno_barrier)
                engine->irq_seqno_barrier(engine);
-       engine->last_submitted_seqno = seqno;
+
+       GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+       engine->timeline->last_submitted_seqno = seqno;
 
        engine->hangcheck.seqno = seqno;
 
@@ -225,10 +227,9 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
        memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
 }
 
-static void intel_engine_init_requests(struct intel_engine_cs *engine)
+static void intel_engine_init_timeline(struct intel_engine_cs *engine)
 {
-       init_request_active(&engine->last_request, NULL);
-       INIT_LIST_HEAD(&engine->request_list);
+       engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
 }
 
 /**
@@ -245,9 +246,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
        INIT_LIST_HEAD(&engine->execlist_queue);
        spin_lock_init(&engine->execlist_lock);
 
-       engine->fence_context = dma_fence_context_alloc(1);
-
-       intel_engine_init_requests(engine);
+       intel_engine_init_timeline(engine);
        intel_engine_init_hangcheck(engine);
        i915_gem_batch_pool_init(engine, &engine->batch_pool);
 
index cb6e96c6cd47d4ecd813742924613f1febc83230..a62e396c8863062d33de726daab2e44a2a1a0314 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/hashtable.h>
 #include "i915_gem_batch_pool.h"
 #include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -169,7 +170,6 @@ struct intel_engine_cs {
                VCS2,   /* Keep instances of the same type engine together. */
                VECS
        } id;
-#define I915_NUM_ENGINES 5
 #define _VCS(n) (VCS + (n))
        unsigned int exec_id;
        enum intel_engine_hw_id {
@@ -180,10 +180,10 @@ struct intel_engine_cs {
                VCS2_HW
        } hw_id;
        enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
-       u64 fence_context;
        u32             mmio_base;
        unsigned int irq_shift;
        struct intel_ring *buffer;
+       struct intel_timeline *timeline;
 
        struct intel_render_state *render_state;
 
@@ -346,27 +346,6 @@ struct intel_engine_cs {
        bool preempt_wa;
        u32 ctx_desc_template;
 
-       /**
-        * List of breadcrumbs associated with GPU requests currently
-        * outstanding.
-        */
-       struct list_head request_list;
-
-       /**
-        * Seqno of request most recently submitted to request_list.
-        * Used exclusively by hang checker to avoid grabbing lock while
-        * inspecting request list.
-        */
-       u32 last_submitted_seqno;
-       u32 last_pending_seqno;
-
-       /* An RCU guarded pointer to the last request. No reference is
-        * held to the request, users must carefully acquire a reference to
-        * the request using i915_gem_active_get_rcu(), or hold the
-        * struct_mutex.
-        */
-       struct i915_gem_active last_request;
-
        struct i915_gem_context *last_context;
 
        struct intel_engine_hangcheck hangcheck;
@@ -516,20 +495,13 @@ static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ring *ring);
 
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
 void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
-static inline int intel_engine_idle(struct intel_engine_cs *engine,
-                                   unsigned int flags)
-{
-       /* Wait upon the last request to be completed */
-       return i915_gem_active_wait(&engine->last_request, flags);
-}
-
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
@@ -619,7 +591,7 @@ unsigned int intel_kick_signalers(struct drm_i915_private *i915);
 
 static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
 {
-       return i915_gem_active_isset(&engine->last_request);
+       return i915_gem_active_isset(&engine->timeline->last_request);
 }
 
 #endif /* _INTEL_RINGBUFFER_H_ */