intel_runtime_pm_get(dev_priv);
for_each_engine_id(engine, dev_priv, id) {
- acthd[id] = intel_ring_get_active_head(engine);
+ acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
return 0;
}
-static void describe_ctx_ringbuf(struct seq_file *m,
- struct intel_ringbuffer *ringbuf)
+static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
- ringbuf->space, ringbuf->head, ringbuf->tail,
- ringbuf->last_retired_head);
+ ring->space, ring->head, ring->tail,
+ ring->last_retired_head);
}
static int i915_context_status(struct seq_file *m, void *unused)
if (ce->state)
describe_obj(m, ce->state);
if (ce->ring)
- describe_ctx_ringbuf(m, ce->ring);
+ describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
}
bool waiting;
int num_waiters;
int hangcheck_score;
- enum intel_ring_hangcheck_action hangcheck_action;
+ enum intel_engine_hangcheck_action hangcheck_action;
int num_requests;
/* our own tracking of ring head and tail */
struct intel_context {
struct drm_i915_gem_object *state;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
struct i915_vma *lrc_vma;
uint32_t *lrc_reg_state;
u64 lrc_desc;
static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
{
- struct intel_ringbuffer *buffer;
+ struct intel_ring *ring;
while (!list_empty(&engine->active_list)) {
struct drm_i915_gem_object *obj;
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
- intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+ intel_engine_init_seqno(engine, engine->last_submitted_seqno);
/*
* Clear the execlists queue up before freeing the requests, as those
* upon reset is less than when we start. Do one more pass over
* all the ringbuffers to reset last_retired_head.
*/
- list_for_each_entry(buffer, &engine->buffers, link) {
- buffer->last_retired_head = buffer->tail;
- intel_ring_update_space(buffer);
+ list_for_each_entry(ring, &engine->buffers, link) {
+ ring->last_retired_head = ring->tail;
+ intel_ring_update_space(ring);
}
engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
i915_gem_object_retire_request(obj, from_req);
} else {
- int idx = intel_ring_sync_index(from, to);
+ int idx = intel_engine_sync_index(from, to);
u32 seqno = i915_gem_request_get_seqno(from_req);
WARN_ON(!to_req);
if (!i915.enable_execlists) {
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
- dev_priv->gt.cleanup_engine = intel_cleanup_engine;
- dev_priv->gt.stop_engine = intel_stop_engine;
+ dev_priv->gt.cleanup_engine = intel_engine_cleanup;
+ dev_priv->gt.stop_engine = intel_engine_stop;
} else {
dev_priv->gt.execbuf_submit = intel_execlists_submission;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
WARN_ON(ce->pin_count);
if (ce->ring)
- intel_ringbuffer_free(ce->ring);
+ intel_ring_free(ce->ring);
i915_gem_object_put(ce->state);
}
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct drm_i915_private *dev_priv = req->i915;
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int i, ret;
if (!remap_info)
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return intel_ring_invalidate_all_caches(req);
+ return intel_engine_invalidate_all_caches(req);
}
static bool
static int
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret, i;
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
if (params->engine->id == RCS &&
instp_mode != dev_priv->relative_constants_mode) {
- struct intel_ringbuffer *ring = params->request->ring;
+ struct intel_ring *ring = params->request->ring;
ret = intel_ring_begin(params->request, 4);
if (ret)
unsigned entry,
dma_addr_t addr)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
/* Finally reset hw state */
for_each_engine(engine, dev_priv)
- intel_ring_init_seqno(engine, seqno);
+ intel_engine_init_seqno(engine, seqno);
return 0;
}
bool flush_caches)
{
struct intel_engine_cs *engine;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
u32 request_start;
u32 reserved_tail;
int ret;
if (i915.enable_execlists)
ret = logical_ring_flush_all_caches(request);
else
- ret = intel_ring_flush_all_caches(request);
+ ret = intel_engine_flush_all_caches(request);
/* Not allowed to fail! */
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
*/
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
struct intel_signal_node signaling;
/** GEM sequence number associated with the previous request,
}
}
-static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
{
switch (a) {
case HANGCHECK_IDLE:
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
tmp = error->semaphore_obj->pages[0];
- idx = intel_ring_sync_index(engine, to);
+ idx = intel_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
ee->waiting = intel_engine_has_waiter(engine);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
- ee->acthd = intel_ring_get_active_head(engine);
+ ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
ee->last_seqno = engine->last_submitted_seqno;
ee->start = I915_READ_START(engine);
request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
vm = request->ctx->ppgtt ?
&request->ctx->ppgtt->base : &ggtt->base;
return stuck;
}
-static enum intel_ring_hangcheck_action
+static enum intel_engine_hangcheck_action
head_stuck(struct intel_engine_cs *engine, u64 acthd)
{
if (acthd != engine->hangcheck.acthd) {
return HANGCHECK_HUNG;
}
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *engine, u64 acthd)
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
{
struct drm_i915_private *dev_priv = engine->i915;
- enum intel_ring_hangcheck_action ha;
+ enum intel_engine_hangcheck_action ha;
u32 tmp;
ha = head_stuck(engine, acthd);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
- acthd = intel_ring_get_active_head(engine);
+ acthd = intel_engine_get_active_head(engine);
seqno = intel_engine_get_seqno(engine);
/* Reset stuck interrupts between batch advances */
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
- engine->hangcheck.action = ring_stuck(engine,
- acthd);
+ engine->hangcheck.action =
+ engine_stuck(engine, acthd);
switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
if (i915.enable_execlists)
intel_logical_ring_cleanup(&dev_priv->engine[i]);
else
- intel_cleanup_engine(&dev_priv->engine[i]);
+ intel_engine_cleanup(&dev_priv->engine[i]);
}
return ret;
static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ring = request->ring;
+ struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
intel_ring_advance(ring);
struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ringbuffer *ring = params->request->ring;
+ struct intel_ring *ring = params->request->ring;
u64 exec_start;
int instp_mode;
u32 instp_mask;
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring);
+ ret = intel_pin_and_map_ring(dev_priv, ce->ring);
if (ret)
goto unpin_map;
if (--ce->pin_count)
return;
- intel_unpin_ringbuffer_obj(ce->ring);
+ intel_unpin_ring(ce->ring);
i915_gem_object_unpin_map(ce->state);
i915_gem_object_ggtt_unpin(ce->state);
{
int ret, i;
struct intel_engine_cs *engine = req->engine;
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
u32 invalidate_domains,
u32 unused)
{
- struct intel_ringbuffer *ring = request->ring;
- uint32_t cmd;
+ struct intel_ring *ring = request->ring;
+ u32 cmd;
int ret;
ret = intel_ring_begin(request, 4);
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_ringbuffer *ring = request->ring;
+ struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ring = request->ring;
+ struct intel_ring *ring = request->ring;
int ret;
ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ring = request->ring;
+ struct intel_ring *ring = request->ring;
int ret;
ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
populate_lr_context(struct i915_gem_context *ctx,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
- struct intel_ringbuffer *ringbuf)
+ struct intel_ring *ring)
{
struct drm_i915_private *dev_priv = ctx->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
RING_START(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
RING_CTL(engine->mmio_base),
- ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
RING_BBADDR_UDW(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
struct drm_i915_gem_object *ctx_obj;
struct intel_context *ce = &ctx->engine[engine->id];
uint32_t context_size;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
int ret;
WARN_ON(ce->state);
return PTR_ERR(ctx_obj);
}
- ring = intel_engine_create_ringbuffer(engine, ctx->ring_size);
+ ring = intel_engine_create_ring(engine, ctx->ring_size);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
return 0;
error_ring_free:
- intel_ringbuffer_free(ring);
+ intel_ring_free(ring);
error_deref_obj:
i915_gem_object_put(ctx_obj);
ce->ring = NULL;
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
enum intel_engine_id engine = req->engine->id;
unsigned int index;
int ret;
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
unsigned int i;
int ret;
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
int ret;
WARN_ON(overlay->active);
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
int ret;
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct drm_i915_gem_request *req;
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return space - I915_RING_FREE_SPACE;
}
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+void intel_ring_update_space(struct intel_ring *ringbuf)
{
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
static void __intel_engine_submit(struct intel_engine_cs *engine)
{
- struct intel_ringbuffer *ringbuf = engine->buffer;
- ringbuf->tail &= ringbuf->size - 1;
- engine->write_tail(engine, ringbuf->tail);
+ struct intel_ring *ring = engine->buffer;
+
+ ring->tail &= ring->size - 1;
+ engine->write_tail(engine, ring->tail);
}
static int
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
u32 cmd;
int ret;
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
u32 cmd;
int ret;
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
u32 scratch_addr =
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
gen6_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
u32 scratch_addr =
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
u32 flags = 0;
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 4);
gen7_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
u32 scratch_addr =
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
u32 flags = 0;
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 6);
I915_WRITE_TAIL(engine, value);
}
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
static int init_ring_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct intel_ringbuffer *ringbuf = engine->buffer;
- struct drm_i915_gem_object *obj = ringbuf->obj;
+ struct intel_ring *ring = engine->buffer;
+ struct drm_i915_gem_object *obj = ring->obj;
int ret = 0;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
(void)I915_READ_HEAD(engine);
I915_WRITE_CTL(engine,
- ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
goto out;
}
- ringbuf->last_retired_head = -1;
- ringbuf->head = I915_READ_HEAD(engine);
- ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
- intel_ring_update_space(ringbuf);
+ ring->last_retired_head = -1;
+ ring->head = I915_READ_HEAD(engine);
+ ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
+ intel_ring_update_space(ring);
intel_engine_init_hangcheck(engine);
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
int ret, i;
return 0;
req->engine->gpu_caches_dirty = true;
- ret = intel_ring_flush_all_caches(req);
+ ret = intel_engine_flush_all_caches(req);
if (ret)
return ret;
intel_ring_advance(ring);
req->engine->gpu_caches_dirty = true;
- ret = intel_ring_flush_all_caches(req);
+ ret = intel_engine_flush_all_caches(req);
if (ret)
return ret;
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
- struct intel_ringbuffer *signaller = signaller_req->ring;
+ struct intel_ring *signaller = signaller_req->ring;
struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
- struct intel_ringbuffer *signaller = signaller_req->ring;
+ struct intel_ring *signaller = signaller_req->ring;
struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
- struct intel_ringbuffer *signaller = signaller_req->ring;
+ struct intel_ring *signaller = signaller_req->ring;
struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *useless;
enum intel_engine_id id;
gen6_add_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
if (engine->semaphore.signal)
gen8_render_add_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
if (engine->semaphore.signal)
struct intel_engine_cs *signaller,
u32 seqno)
{
- struct intel_ringbuffer *waiter = waiter_req->ring;
+ struct intel_ring *waiter = waiter_req->ring;
struct drm_i915_private *dev_priv = waiter_req->i915;
u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
struct i915_hw_ppgtt *ppgtt;
struct intel_engine_cs *signaller,
u32 seqno)
{
- struct intel_ringbuffer *waiter = waiter_req->ring;
+ struct intel_ring *waiter = waiter_req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
static int
i9xx_add_request(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 4);
u64 offset, u32 length,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
u32 cs_offset = req->engine->scratch.gtt_offset;
int ret;
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
return 0;
}
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ring(struct intel_ring *ringbuf)
{
GEM_BUG_ON(!ringbuf->vma);
GEM_BUG_ON(!ringbuf->vaddr);
ringbuf->vma = NULL;
}
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
- struct intel_ringbuffer *ringbuf)
+int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
+ struct intel_ring *ringbuf)
{
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
return ret;
}
-static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
{
i915_gem_object_put(ringbuf->obj);
ringbuf->obj = NULL;
}
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
- struct intel_ringbuffer *ringbuf)
+ struct intel_ring *ringbuf)
{
struct drm_i915_gem_object *obj;
return 0;
}
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
{
- struct intel_ringbuffer *ring;
+ struct intel_ring *ring;
int ret;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
}
void
-intel_ringbuffer_free(struct intel_ringbuffer *ring)
+intel_ring_free(struct intel_ring *ring)
{
intel_destroy_ringbuffer_obj(ring);
list_del(&ring->link);
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct intel_ringbuffer *ringbuf;
+ struct intel_ring *ringbuf;
int ret;
WARN_ON(engine->buffer);
if (ret)
goto error;
- ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
+ ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error;
goto error;
}
- ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
+ ret = intel_pin_and_map_ring(dev_priv, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
engine->name, ret);
return 0;
error:
- intel_cleanup_engine(engine);
+ intel_engine_cleanup(engine);
return ret;
}
-void intel_cleanup_engine(struct intel_engine_cs *engine)
+void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
dev_priv = engine->i915;
if (engine->buffer) {
- intel_stop_engine(engine);
+ intel_engine_stop(engine);
WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
- intel_unpin_ringbuffer_obj(engine->buffer);
- intel_ringbuffer_free(engine->buffer);
+ intel_unpin_ring(engine->buffer);
+ intel_ring_free(engine->buffer);
engine->buffer = NULL;
}
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *target;
int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int remain_actual = ring->size - ring->tail;
int remain_usable = ring->effective_size - ring->tail;
int bytes = num_dwords * sizeof(u32);
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int num_dwords =
(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
return 0;
}
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
uint32_t cmd;
int ret;
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
bool ppgtt = USES_PPGTT(req->i915) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_ringbuffer *ring = req->ring;
+ struct intel_ring *ring = req->ring;
uint32_t cmd;
int ret;
}
int
-intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
+intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
int ret;
}
int
-intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
+intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
uint32_t flush_domains;
return 0;
}
-void
-intel_stop_engine(struct intel_engine_cs *engine)
+void intel_engine_stop(struct intel_engine_cs *engine)
{
int ret;
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-enum intel_ring_hangcheck_action {
+enum intel_engine_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
#define HANGCHECK_SCORE_RING_HUNG 31
-struct intel_ring_hangcheck {
+struct intel_engine_hangcheck {
u64 acthd;
unsigned long user_interrupts;
u32 seqno;
int score;
- enum intel_ring_hangcheck_action action;
+ enum intel_engine_hangcheck_action action;
int deadlock;
u32 instdone[I915_NUM_INSTDONE_REG];
};
-struct intel_ringbuffer {
+struct intel_ring {
struct drm_i915_gem_object *obj;
void *vaddr;
struct i915_vma *vma;
u64 fence_context;
u32 mmio_base;
unsigned int irq_shift;
- struct intel_ringbuffer *buffer;
+ struct intel_ring *buffer;
struct list_head buffers;
/* Rather than have every client wait upon all user interrupts,
struct i915_gem_context *last_context;
- struct intel_ring_hangcheck hangcheck;
+ struct intel_engine_hangcheck hangcheck;
struct {
struct drm_i915_gem_object *obj;
}
static inline u32
-intel_ring_sync_index(struct intel_engine_cs *engine,
- struct intel_engine_cs *other)
+intel_engine_sync_index(struct intel_engine_cs *engine,
+ struct intel_engine_cs *other)
{
int idx;
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
- struct intel_ringbuffer *ringbuf);
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-void intel_ringbuffer_free(struct intel_ringbuffer *ring);
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
+ struct intel_ring *ring);
+void intel_unpin_ring(struct intel_ring *ring);
+void intel_ring_free(struct intel_ring *ring);
-void intel_stop_engine(struct intel_engine_cs *engine);
-void intel_cleanup_engine(struct intel_engine_cs *engine);
+void intel_engine_stop(struct intel_engine_cs *engine);
+void intel_engine_cleanup(struct intel_engine_cs *engine);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data)
+static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
{
*(uint32_t *)(ring->vaddr + ring->tail) = data;
ring->tail += 4;
}
-static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring,
- i915_reg_t reg)
+static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
{
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
}
-static inline void intel_ring_advance(struct intel_ringbuffer *ring)
+static inline void intel_ring_advance(struct intel_ring *ring)
{
ring->tail &= ring->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
+void intel_ring_update_space(struct intel_ring *ringbuf);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
void intel_fini_pipe_control(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
int init_workarounds_ring(struct intel_engine_cs *engine);
-static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
+static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf)
{
return ringbuf->tail;
}