*/
request->postfix = ring->tail;
- if (i915.enable_execlists)
- ret = engine->emit_request(request);
- else
- ret = engine->add_request(request);
/* Not allowed to fail! */
- WARN(ret, "emit|add_request failed: %d!\n", ret);
+ ret = engine->emit_request(request);
+ WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
/* Sanity check that the reserved size was large enough. */
ret = ring->tail - request_start;
reserved_tail, ret);
i915_gem_mark_busy(engine);
+ engine->submit_request(request);
}
static unsigned long local_clock_us(unsigned int *cpu)
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
-int i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
unsigned int engine_id = rq->engine->id;
struct intel_guc *guc = &rq->i915->guc;
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->fence.seqno;
-
- return b_ret;
}
/*
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client;
+ struct intel_engine_cs *engine;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
host2guc_sample_forcewake(guc, client);
guc_init_doorbell_hw(guc);
+ /* Take over from manual control of ELSP (execlists) */
+ for_each_engine(engine, dev_priv)
+ engine->submit_request = i915_guc_submit;
+
return 0;
}
{
struct intel_guc *guc = &dev_priv->guc;
+ if (!guc->execbuf_client)
+ return;
+
guc_client_free(dev_priv, guc->execbuf_client);
guc->execbuf_client = NULL;
+
+ /* Revert back to manual ELSP submission */
+ intel_execlists_enable_submission(dev_priv);
}
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
-int i915_guc_submit(struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
}
/*
- * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * intel_logical_ring_advance() - advance the tail and prepare for submission
* @request: Request to advance the logical ringbuffer of.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static int
-intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
+intel_logical_ring_advance(struct drm_i915_gem_request *request)
{
struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
*/
request->previous_context = engine->last_context;
engine->last_context = request->ctx;
-
- if (i915.enable_guc_submission)
- i915_guc_submit(request);
- else
- execlists_context_queue(request);
-
return 0;
}
intel_ring_emit(ring, request->fence.seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance_and_submit(request);
+ return intel_logical_ring_advance(request);
}
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance_and_submit(request);
+ return intel_logical_ring_advance(request);
}
static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
engine->i915 = NULL;
}
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+
+ for_each_engine(engine, dev_priv)
+ engine->submit_request = execlists_context_queue;
+}
+
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
- engine->emit_request = gen8_emit_request;
engine->emit_flush = gen8_emit_flush;
+ engine->emit_request = gen8_emit_request;
+ engine->submit_request = execlists_context_queue;
+
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
engine->emit_bb_start = gen8_emit_bb_start;
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
+
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
}
/**
- * gen6_add_request - Update the semaphore mailbox registers
+ * gen6_emit_request - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static int
-gen6_add_request(struct drm_i915_gem_request *req)
+static int gen6_emit_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
struct intel_ring *ring = req->ring;
intel_ring_advance(ring);
req->tail = ring->tail;
- engine->submit_request(req);
return 0;
}
-static int
-gen8_render_add_request(struct drm_i915_gem_request *req)
+static int gen8_render_emit_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
struct intel_ring *ring = req->ring;
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
req->tail = ring->tail;
- engine->submit_request(req);
return 0;
}
return 0;
}
-static int
-i9xx_add_request(struct drm_i915_gem_request *req)
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
int ret;
intel_ring_advance(ring);
req->tail = ring->tail;
- req->engine->submit_request(req);
return 0;
}
struct intel_engine_cs *engine)
{
engine->init_hw = init_ring_common;
- engine->submit_request = i9xx_submit_request;
- engine->add_request = i9xx_add_request;
+ engine->emit_request = i9xx_emit_request;
if (INTEL_GEN(dev_priv) >= 6)
- engine->add_request = gen6_add_request;
+ engine->emit_request = gen6_emit_request;
+ engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
engine->emit_bb_start = gen8_emit_bb_start;
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
- engine->add_request = gen8_render_add_request;
+ engine->emit_request = gen8_render_emit_request;
engine->emit_flush = gen8_render_ring_flush;
if (i915.semaphores)
engine->semaphore.signal = gen8_rcs_signal;
int (*init_context)(struct drm_i915_gem_request *req);
- int (*add_request)(struct drm_i915_gem_request *req);
+ int (*emit_flush)(struct drm_i915_gem_request *request,
+ u32 mode);
+#define EMIT_INVALIDATE BIT(0)
+#define EMIT_FLUSH BIT(1)
+#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
+ int (*emit_bb_start)(struct drm_i915_gem_request *req,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+#define I915_DISPATCH_RS BIT(2)
+ int (*emit_request)(struct drm_i915_gem_request *req);
+ void (*submit_request)(struct drm_i915_gem_request *req);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
- int (*emit_request)(struct drm_i915_gem_request *request);
- int (*emit_flush)(struct drm_i915_gem_request *request,
- u32 mode);
-#define EMIT_INVALIDATE BIT(0)
-#define EMIT_FLUSH BIT(1)
-#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
- int (*emit_bb_start)(struct drm_i915_gem_request *req,
- u64 offset, u32 length,
- unsigned int dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS 0x4
- void (*submit_request)(struct drm_i915_gem_request *req);
/**
* List of objects currently involved in rendering from the