i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_gem_timeline.o \
i915_gem_userptr.o \
i915_trace_points.o \
intel_breadcrumbs.o \
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
- dev_priv->next_seqno,
+ dev_priv->gt.global_timeline.next_seqno,
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
} else
int count;
count = 0;
- list_for_each_entry(req, &engine->request_list, link)
+ list_for_each_entry(req, &engine->timeline->requests, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->request_list, link)
+ list_for_each_entry(req, &engine->timeline->requests, link)
print_request(m, req, " ");
any++;
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
-
- *val = dev_priv->next_seqno;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ *val = READ_ONCE(dev_priv->gt.global_timeline.next_seqno);
return 0;
}
if (ret)
return ret;
- ret = i915_gem_set_seqno(dev, val);
+ ret = i915_gem_set_global_seqno(dev, val);
mutex_unlock(&dev->struct_mutex);
return ret;
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno,
seqno[id],
- engine->last_submitted_seqno);
+ engine->timeline->last_submitted_seqno);
seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
seq_printf(m, "%s\n", engine->name);
seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
intel_engine_get_seqno(engine),
- engine->last_submitted_seqno,
+ engine->timeline->last_submitted_seqno,
engine->hangcheck.seqno,
engine->hangcheck.score);
seq_printf(m, "\tRequests:\n");
- rq = list_first_entry(&engine->request_list,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->request_list)
+ rq = list_first_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
print_request(m, rq, "\t\tfirst ");
- rq = list_last_entry(&engine->request_list,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->request_list)
+ rq = list_last_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
print_request(m, rq, "\t\tlast ");
rq = i915_gem_find_active_request(engine);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- i915_gem_load_init(&dev_priv->drm);
+ ret = i915_gem_load_init(&dev_priv->drm);
+ if (ret < 0)
+ goto err_gvt;
intel_display_crc_init(dev_priv);
return 0;
+err_gvt:
+ intel_gvt_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
#include "intel_gvt.h"
struct i915_gem_context *kernel_context;
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
- u32 next_seqno;
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
+ struct list_head timelines;
+ struct i915_gem_timeline global_timeline;
+
/**
* Is the GPU currently considered idle, or busy executing
* userspace requests? Whilst idle, we allow runtime power
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_gem_load_init(struct drm_device *dev);
+int i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
-int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
i915_gem_request_retire_upto(rq);
- if (rps && rq->fence.seqno == rq->engine->last_submitted_seqno) {
+ if (rps && rq->fence.seqno == rq->timeline->last_submitted_seqno) {
/* The GPU is now idle and this client has stalled.
* Since no other client has submitted a request in the
* meantime, assume that this client is the only one
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
- list_for_each_entry(request, &engine->request_list, link) {
+ list_for_each_entry(request, &engine->timeline->requests, link) {
if (i915_gem_request_completed(request))
continue;
if (i915_gem_context_is_default(incomplete_ctx))
return;
- list_for_each_entry_continue(request, &engine->request_list, link)
+ list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
}
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
- intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+ intel_engine_init_global_seqno(engine,
+ engine->timeline->last_submitted_seqno);
/*
* Clear the execlists queue up before freeing the requests, as those
return 0;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags)
+static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int ret;
+ int ret, i;
- for_each_engine(engine, dev_priv, id) {
- if (engine->last_context == NULL)
- continue;
+ for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+ ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
+ if (ret)
+ return ret;
+ }
- ret = intel_engine_idle(engine, flags);
+ return 0;
+}
+
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+{
+ struct i915_gem_timeline *tl;
+ int ret;
+
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ ret = wait_for_timeline(tl, flags);
if (ret)
return ret;
}
i915_gem_detect_bit_6_swizzle(dev);
}
-void
+int
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ int err;
dev_priv->objects =
kmem_cache_create("i915_gem_object",
sizeof(struct drm_i915_gem_object), 0,
SLAB_HWCACHE_ALIGN,
NULL);
+ if (!dev_priv->objects) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
dev_priv->vmas =
kmem_cache_create("i915_gem_vma",
sizeof(struct i915_vma), 0,
SLAB_HWCACHE_ALIGN,
NULL);
+ if (!dev_priv->vmas) {
+ err = -ENOMEM;
+ goto err_objects;
+ }
+
dev_priv->requests =
kmem_cache_create("i915_gem_request",
sizeof(struct drm_i915_gem_request), 0,
SLAB_RECLAIM_ACCOUNT |
SLAB_DESTROY_BY_RCU,
NULL);
+ if (!dev_priv->requests) {
+ err = -ENOMEM;
+ goto err_vmas;
+ }
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ INIT_LIST_HEAD(&dev_priv->gt.timelines);
+ err = i915_gem_timeline_init(dev_priv,
+ &dev_priv->gt.global_timeline,
+ "[execution]");
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (err)
+ goto err_requests;
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
+
+ return 0;
+
+err_requests:
+ kmem_cache_destroy(dev_priv->requests);
+err_vmas:
+ kmem_cache_destroy(dev_priv->vmas);
+err_objects:
+ kmem_cache_destroy(dev_priv->objects);
+err_out:
+ return err;
}
void i915_gem_load_cleanup(struct drm_device *dev)
#define GEM_BUG_ON(expr)
#endif
+#define I915_NUM_ENGINES 5
+
#endif /* __I915_GEM_H__ */
* multiple execution contexts (fence contexts) as we allow
* engines within a single timeline to execute in parallel.
*/
- return "global";
+ return to_request(fence)->timeline->common->name;
}
static bool i915_fence_signaled(struct dma_fence *fence)
return;
do {
- tmp = list_first_entry(&engine->request_list,
+ tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
i915_gem_request_retire(tmp);
return 0;
}
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *dev_priv,
+ u32 seqno)
{
+ struct i915_gem_timeline *timeline = &dev_priv->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_engine(engine, dev_priv, id) {
- ret = intel_engine_idle(engine,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
- if (ret)
- return ret;
- }
+ ret = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret)
+ return ret;
+
i915_gem_retire_requests(dev_priv);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+ if (!i915_seqno_passed(seqno, timeline->next_seqno)) {
while (intel_kick_waiters(dev_priv) ||
intel_kick_signalers(dev_priv))
yield();
+ yield();
}
/* Finally reset hw state */
for_each_engine(engine, dev_priv, id)
- intel_engine_init_seqno(engine, seqno);
+ intel_engine_init_global_seqno(engine, seqno);
return 0;
}
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+ ret = i915_gem_init_global_seqno(dev_priv, seqno - 1);
if (ret)
return ret;
- dev_priv->next_seqno = seqno;
+ dev_priv->gt.global_timeline.next_seqno = seqno;
return 0;
}
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static int i915_gem_get_global_seqno(struct drm_i915_private *dev_priv,
+ u32 *seqno)
{
+ struct i915_gem_timeline *tl = &dev_priv->gt.global_timeline;
+
/* reserve 0 for non-seqno */
- if (unlikely(dev_priv->next_seqno == 0)) {
+ if (unlikely(tl->next_seqno == 0)) {
int ret;
- ret = i915_gem_init_seqno(dev_priv, 0);
+ ret = i915_gem_init_global_seqno(dev_priv, 0);
if (ret)
return ret;
- dev_priv->next_seqno = 1;
+ tl->next_seqno = 1;
}
- *seqno = dev_priv->next_seqno++;
+ *seqno = tl->next_seqno++;
return 0;
}
{
struct drm_i915_gem_request *request =
container_of(fence, typeof(*request), submit);
+ struct intel_engine_cs *engine = request->engine;
/* Will be called from irq-context when using foreign DMA fences */
switch (state) {
case FENCE_COMPLETE:
- request->engine->last_submitted_seqno = request->fence.seqno;
- request->engine->submit_request(request);
+ engine->timeline->last_submitted_seqno = request->fence.seqno;
+ engine->submit_request(request);
break;
case FENCE_FREE:
return ERR_PTR(ret);
/* Move the oldest request to the slab-cache (if not in use!) */
- req = list_first_entry_or_null(&engine->request_list,
+ req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
if (req && i915_gem_request_completed(req))
i915_gem_request_retire(req);
if (!req)
return ERR_PTR(-ENOMEM);
- ret = i915_gem_get_seqno(dev_priv, &seqno);
+ ret = i915_gem_get_global_seqno(dev_priv, &seqno);
if (ret)
goto err;
+ req->timeline = engine->timeline;
+
spin_lock_init(&req->lock);
dma_fence_init(&req->fence,
&i915_fence_ops,
&req->lock,
- engine->fence_context,
+ req->timeline->fence_context,
seqno);
i915_sw_fence_init(&req->submit, submit_notify);
GEM_BUG_ON(to == from);
- if (to->engine == from->engine)
+ if (to->timeline == from->timeline)
return 0;
+ if (to->engine == from->engine) {
+ ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+ &from->submit,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
idx = intel_engine_sync_index(from->engine, to->engine);
if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
return 0;
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
+ struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
u32 request_start;
u32 reserved_tail;
* see a more recent value in the hws than we are tracking.
*/
- prev = i915_gem_active_raw(&engine->last_request,
+ prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
if (prev)
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
request->emitted_jiffies = jiffies;
- request->previous_seqno = engine->last_pending_seqno;
- engine->last_pending_seqno = request->fence.seqno;
- i915_gem_active_set(&engine->last_request, request);
- list_add_tail(&request->link, &engine->request_list);
+ request->previous_seqno = timeline->last_pending_seqno;
+ timeline->last_pending_seqno = request->fence.seqno;
+ i915_gem_active_set(&timeline->last_request, request);
+ list_add_tail(&request->link, &timeline->requests);
list_add_tail(&request->ring_link, &ring->request_list);
i915_gem_mark_busy(engine);
{
struct drm_i915_gem_request *request, *next;
- list_for_each_entry_safe(request, next, &engine->request_list, link) {
+ list_for_each_entry_safe(request, next,
+ &engine->timeline->requests, link) {
if (!i915_gem_request_completed(request))
return false;
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
struct intel_signal_node signaling;
struct i915_sw_fence submit;
--- /dev/null
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name)
+{
+ unsigned int i;
+ u64 fences;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ timeline->i915 = i915;
+ timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
+ if (!timeline->name)
+ return -ENOMEM;
+
+ list_add(&timeline->link, &i915->gt.timelines);
+
+ /* Called during early_init before we know how many engines there are */
+ fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+ struct intel_timeline *tl = &timeline->engine[i];
+
+ tl->fence_context = fences++;
+ tl->common = timeline;
+
+ init_request_active(&tl->last_request, NULL);
+ INIT_LIST_HEAD(&tl->requests);
+ }
+
+ return 0;
+}
+
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+{
+ lockdep_assert_held(&tl->i915->drm.struct_mutex);
+
+ list_del(&tl->link);
+ kfree(tl->name);
+}
--- /dev/null
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_TIMELINE_H
+#define I915_GEM_TIMELINE_H
+
+#include <linux/list.h>
+
+#include "i915_gem_request.h"
+
+struct i915_gem_timeline;
+
+struct intel_timeline {
+ u64 fence_context;
+ u32 last_submitted_seqno;
+ u32 last_pending_seqno;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /* Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_gem_active_get_request_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_gem_active last_request;
+
+ struct i915_gem_timeline *common;
+};
+
+struct i915_gem_timeline {
+ struct list_head link;
+ u32 next_seqno;
+
+ struct drm_i915_private *i915;
+ const char *name;
+
+ struct intel_timeline engine[I915_NUM_ENGINES];
+};
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *tl,
+ const char *name);
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+
+#endif
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
- ee->last_seqno = engine->last_submitted_seqno;
+ ee->last_seqno = engine->timeline->last_submitted_seqno;
ee->start = I915_READ_START(engine);
ee->head = I915_READ_HEAD(engine);
ee->tail = I915_READ_TAIL(engine);
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link)
+ list_for_each_entry_from(request, &engine->timeline->requests, link)
count++;
if (!count)
return;
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link) {
+ list_for_each_entry_from(request, &engine->timeline->requests, link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
engine->submit_request = i915_guc_submit;
/* Replay the current set of previously submitted requests */
- list_for_each_entry(request, &engine->request_list, link) {
+ list_for_each_entry(request,
+ &engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
if (i915_sw_fence_done(&request->submit))
i915_guc_submit(request);
acthd = intel_engine_get_active_head(engine);
seqno = intel_engine_get_seqno(engine);
- submit = READ_ONCE(engine->last_submitted_seqno);
+ submit = READ_ONCE(engine->timeline->last_submitted_seqno);
if (engine->hangcheck.seqno == seqno) {
if (i915_seqno_passed(seqno, submit)) {
return ret;
}
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
- engine->last_submitted_seqno = seqno;
+
+ GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+ engine->timeline->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
}
-static void intel_engine_init_requests(struct intel_engine_cs *engine)
+static void intel_engine_init_timeline(struct intel_engine_cs *engine)
{
- init_request_active(&engine->last_request, NULL);
- INIT_LIST_HEAD(&engine->request_list);
+ engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
}
/**
INIT_LIST_HEAD(&engine->execlist_queue);
spin_lock_init(&engine->execlist_lock);
- engine->fence_context = dma_fence_context_alloc(1);
-
- intel_engine_init_requests(engine);
+ intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
i915_gem_batch_pool_init(engine, &engine->batch_pool);
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
#define I915_CMD_HASH_ORDER 9
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
-#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
enum intel_engine_hw_id {
VCS2_HW
} hw_id;
enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
- u64 fence_context;
u32 mmio_base;
unsigned int irq_shift;
struct intel_ring *buffer;
+ struct intel_timeline *timeline;
struct intel_render_state *render_state;
bool preempt_wa;
u32 ctx_desc_template;
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
- * Seqno of request most recently submitted to request_list.
- * Used exclusively by hang checker to avoid grabbing lock while
- * inspecting request list.
- */
- u32 last_submitted_seqno;
- u32 last_pending_seqno;
-
- /* An RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_gem_active_get_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_gem_active last_request;
-
struct i915_gem_context *last_context;
struct intel_engine_hangcheck hangcheck;
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ring *ring);
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-static inline int intel_engine_idle(struct intel_engine_cs *engine,
- unsigned int flags)
-{
- /* Wait upon the last request to be completed */
- return i915_gem_active_wait(&engine->last_request, flags);
-}
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
{
- return i915_gem_active_isset(&engine->last_request);
+ return i915_gem_active_isset(&engine->timeline->last_request);
}
#endif /* _INTEL_RINGBUFFER_H_ */