int cmd_table_count;
int ret;
- if (!IS_GEN7(engine->dev))
+ if (!IS_GEN7(engine->i915))
return 0;
switch (engine->id) {
case RCS:
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else {
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
if (!engine->needs_cmd_parser)
return false;
- if (!USES_PPGTT(engine->dev))
+ if (!USES_PPGTT(engine->i915))
return false;
return (i915.enable_cmd_parser == 1);
seqno[id] = engine->get_seqno(engine);
}
- i915_get_extra_instdone(dev, instdone);
+ i915_get_extra_instdone(dev_priv, instdone);
intel_runtime_pm_put(dev_priv);
enum intel_engine_id id;
int j, ret;
- if (!i915_semaphore_is_enabled(dev)) {
+ if (!i915_semaphore_is_enabled(dev_priv)) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
intel_runtime_pm_get(dev_priv);
- i915_handle_error(dev, val,
+ i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv);
}
if (val & (DROP_RETIRE | DROP_ACTIVE))
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
value = 1;
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = i915_semaphore_is_enabled(dev);
+ value = i915_semaphore_is_enabled(dev_priv);
break;
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
info->has_eu_pg ? "y" : "n");
i915.enable_execlists =
- intel_sanitize_enable_execlists(dev, i915.enable_execlists);
+ intel_sanitize_enable_execlists(dev_priv,
+ i915.enable_execlists);
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* than every time we check intel_enable_ppgtt().
*/
i915.enable_ppgtt =
- intel_sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
+ intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
}
* Notify a valid surface after modesetting,
* when running inside a VM.
*/
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
i915_setup_sysfs(dev);
pci_dev_put(pch);
}
-bool i915_semaphore_is_enabled(struct drm_device *dev)
+bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return false;
if (i915.semaphores >= 0)
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
return false;
#endif
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_device *dev)
+int i915_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter;
int ret;
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
-int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt);
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+ int enable_ppgtt);
/* i915_dma.c */
void __printf(3, 4)
#endif
extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_device *dev);
-extern int i915_reset(struct drm_device *dev);
+extern int i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
/* i915_irq.c */
-void i915_queue_hangcheck(struct drm_device *dev);
+void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
__printf(3, 4)
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-static inline bool intel_vgpu_active(struct drm_device *dev)
+static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
{
- return to_i915(dev)->vgpu.active;
+ return dev_priv->vgpu.active;
}
void
req->seqno);
}
-int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
-bool i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
+static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
intel_gtt_chipset_flush();
}
{
kfree(eb->buf);
}
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_enable_rc6(const struct drm_device *dev);
-extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* overlay */
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
-extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
}
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
}
if (needs_clflush_after)
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
else
obj->cache_dirty = true;
struct intel_rps_client *rps)
{
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = req->i915;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *tmp;
- lockdep_assert_held(&engine->dev->struct_mutex);
+ lockdep_assert_held(&engine->i915->dev->struct_mutex);
if (list_empty(&req->list))
return;
}
static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int ret;
if (ret)
return ret;
}
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
/* Finally reset hw state */
for_each_engine(engine, dev_priv)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev, seqno - 1);
+ ret = i915_gem_init_seqno(dev_priv, seqno - 1);
if (ret)
return ret;
}
int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_init_seqno(dev, 0);
+ int ret = i915_gem_init_seqno(dev_priv, 0);
if (ret)
return ret;
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
- i915_queue_hangcheck(engine->dev);
+ i915_queue_hangcheck(engine->i915);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req;
int ret;
if (req == NULL)
return -ENOMEM;
- ret = i915_gem_get_seqno(engine->dev, &req->seqno);
+ ret = i915_gem_get_seqno(engine->i915, &req->seqno);
if (ret)
goto err;
int err;
if (ctx == NULL)
- ctx = to_i915(engine->dev)->kernel_context;
+ ctx = engine->i915->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
}
bool
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
bool idle = true;
/* Come back later if the device is busy... */
idle = false;
if (mutex_trylock(&dev->struct_mutex)) {
- idle = i915_gem_retire_requests(dev);
+ idle = i915_gem_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
if (!idle)
if (i915_gem_request_completed(from_req, true))
return 0;
- if (!i915_semaphore_is_enabled(obj->base.dev)) {
+ if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
i915->mm.interruptible,
return;
if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
}
return 0;
if (ret)
goto err;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
i915_gem_stop_engines(dev);
i915_gem_context_lost(dev_priv);
else
dev_priv->num_fence_regs = 8;
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num));
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
-static size_t get_context_alignment(struct drm_device *dev)
+static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
return GEN6_CONTEXT_ALIGN;
return GEN7_CONTEXT_ALIGN;
}
-static int get_context_size(struct drm_device *dev)
+static int get_context_size(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
* Flush any pending retires to hopefully release some
* stale contexts and try again.
*/
- i915_gem_retire_requests(dev_priv->dev);
+ i915_gem_retire_requests(dev_priv);
ret = ida_simple_get(&dev_priv->context_hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
* context.
*/
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
- get_context_alignment(dev), 0);
+ get_context_alignment(to_i915(dev)), 0);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
if (WARN_ON(dev_priv->kernel_context))
return 0;
- if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+ if (intel_vgpu_active(dev_priv) &&
+ HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0;
- } else if (HAS_HW_CONTEXTS(dev)) {
- dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+ } else if (HAS_HW_CONTEXTS(dev_priv)) {
+ dev_priv->hw_context_size =
+ round_up(get_context_size(dev_priv), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
+ struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
- i915_semaphore_is_enabled(engine->dev) ?
- hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
+ i915_semaphore_is_enabled(dev_priv) ?
+ hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
0;
int len, ret;
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
- if (IS_GEN6(engine->dev)) {
+ if (IS_GEN6(dev_priv)) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
+ if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_INFO(engine->dev)->gen < 8)
+ else if (INTEL_GEN(dev_priv) < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
- if (INTEL_INFO(engine->dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(engine->dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, to_i915(engine->dev)) {
+ for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
*/
intel_ring_emit(engine, MI_NOOP);
- if (INTEL_INFO(engine->dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, to_i915(engine->dev)) {
+ for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
if (engine->id != RCS)
return true;
- if (INTEL_INFO(engine->dev)->gen < 8)
+ if (INTEL_GEN(engine->i915) < 8)
return true;
return false;
/* Trying to pin first makes error handling easier. */
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
- get_context_alignment(engine->dev),
+ get_context_alignment(engine->i915),
0);
if (ret)
return ret;
if (ret)
return ret;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(to_i915(dev));
goto search_again;
}
if (ret)
return ret;
- i915_gem_retire_requests(vm->dev);
+ i915_gem_retire_requests(to_i915(vm->dev));
WARN_ON(!list_empty(&vm->active_list));
}
struct i915_address_space *vm;
struct list_head ordered_vmas;
struct list_head pinned_vmas;
- bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+ bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry;
i915_gem_retire_requests_ring(engine);
}
if (flush_chipset)
- i915_gem_chipset_flush(req->engine->dev);
+ i915_gem_chipset_flush(req->engine->i915);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
.type = I915_GGTT_VIEW_ROTATED,
};
-int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+ int enable_ppgtt)
{
bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
- has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
- has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
+ has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
+ has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
+ has_full_48bit_ppgtt =
+ IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
has_full_ppgtt = false; /* emulation is too hard */
if (!has_aliasing_ppgtt)
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
*/
- if (enable_ppgtt == 0 && INTEL_INFO(dev)->gen < 9)
+ if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
return 0;
if (enable_ppgtt == 1)
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+ if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0;
}
#endif
/* Early VLV doesn't have this */
- if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0;
}
- if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+ if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
return has_full_48bit_ppgtt ? 3 : 2;
else
return has_aliasing_ppgtt ? 1 : 0;
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(vm->dev))
+ if (intel_vgpu_active(to_i915(vm->dev)))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(ppgtt->base.dev)) {
+ if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(ppgtt->base.dev))
+ if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
} else
BUG();
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
ppgtt->switch_mm = vgpu_mm_switch;
ret = gen6_ppgtt_alloc(ppgtt);
i915_address_space_init(&ggtt->base, dev_priv);
ggtt->base.total += PAGE_SIZE;
- if (intel_vgpu_active(dev)) {
+ if (intel_vgpu_active(dev_priv)) {
ret = intel_vgt_balloon(dev);
if (ret)
return ret;
i915_gem_cleanup_stolen(dev);
if (drm_mm_initialized(&ggtt->base.mm)) {
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
intel_vgt_deballoon();
drm_mm_takedown(&ggtt->base.mm);
#include "intel_renderstate.h"
static const struct intel_renderstate_rodata *
-render_state_get_rodata(struct drm_device *dev, const int gen)
+render_state_get_rodata(const int gen)
{
switch (gen) {
case 6:
return NULL;
}
-static int render_state_init(struct render_state *so, struct drm_device *dev)
+static int render_state_init(struct render_state *so,
+ struct drm_i915_private *dev_priv)
{
int ret;
- so->gen = INTEL_INFO(dev)->gen;
- so->rodata = render_state_get_rodata(dev, so->gen);
+ so->gen = INTEL_GEN(dev_priv);
+ so->rodata = render_state_get_rodata(so->gen);
if (so->rodata == NULL)
return 0;
if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
- so->obj = i915_gem_object_create(dev, 4096);
+ so->obj = i915_gem_object_create(dev_priv->dev, 4096);
if (IS_ERR(so->obj))
return PTR_ERR(so->obj);
if (WARN_ON(engine->id != RCS))
return -ENOENT;
- ret = render_state_init(so, engine->dev);
+ ret = render_state_init(so, engine->i915);
if (ret)
return ret;
unsigned long count = 0;
trace_i915_gem_shrink(dev_priv, target, flags);
- i915_gem_retire_requests(dev_priv->dev);
+ i915_gem_retire_requests(dev_priv);
/*
* Unbinding of objects will require HW access; Let us not wake the
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
- i915_gem_retire_requests(dev_priv->dev);
+ i915_gem_retire_requests(dev_priv);
return count;
}
return error_code;
}
-static void i915_gem_record_fences(struct drm_device *dev,
+static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i));
- } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
+ } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
}
struct intel_engine_cs *to;
enum intel_engine_id id;
- if (!i915_semaphore_is_enabled(dev_priv->dev))
+ if (!i915_semaphore_is_enabled(dev_priv))
return;
if (!error->semaphore_obj)
}
}
-static void i915_record_ring_state(struct drm_device *dev,
+static void i915_record_ring_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
gen8_record_semaphore_state(dev_priv, error, engine,
ering);
else
gen6_record_semaphore_state(dev_priv, engine, ering);
}
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
}
ering->tail = I915_READ_TAIL(engine);
ering->ctl = I915_READ_CTL(engine);
- if (I915_NEED_GFX_HWS(dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
i915_reg_t mmio;
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
switch (engine->id) {
default:
case RCS:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->dev)) {
+ } else if (IS_GEN6(engine->i915)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
ering->hangcheck_score = engine->hangcheck.score;
ering->hangcheck_action = engine->hangcheck.action;
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
int i;
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
- else if (INTEL_INFO(dev)->gen >= 8)
+ else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i));
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */
}
}
-static void i915_gem_record_rings(struct drm_device *dev,
+static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request;
int i, count;
error->ring[i].pid = -1;
- if (engine->dev == NULL)
+ if (!intel_engine_initialized(engine))
continue;
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, engine, &error->ring[i]);
+ i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
request = i915_gem_find_active_request(engine);
if (request) {
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
- i915_get_extra_instdone(dev, error->extra_instdone);
+ i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
-static void i915_error_capture_msg(struct drm_device *dev,
+static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
u32 engine_mask,
const char *error_msg)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode;
int ring_id = -1, len;
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%d:0x%08x",
- INTEL_INFO(dev)->gen, ring_id, ecode);
+ INTEL_GEN(dev_priv), ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *error_msg)
{
static bool warned;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
- i915_gem_record_fences(dev, error);
- i915_gem_record_rings(dev, error);
+ i915_gem_record_fences(dev_priv, error);
+ i915_gem_record_rings(dev_priv, error);
do_gettimeofday(&error->time);
- error->overlay = intel_overlay_capture_error_state(dev);
- error->display = intel_display_capture_error_state(dev);
+ error->overlay = intel_overlay_capture_error_state(dev_priv);
+ error->display = intel_display_capture_error_state(dev_priv);
- i915_error_capture_msg(dev, error, engine_mask, error_msg);
+ i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
warned = true;
}
}
}
/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
+ uint32_t *instdone)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
- if (IS_GEN2(dev) || IS_GEN3(dev))
+ if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
instdone[0] = I915_READ(GEN2_INSTDONE);
- else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+ else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_reset_and_wakeup(struct drm_device *dev)
+static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int ret;
- kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
/*
* Note that there's only one work item which does gpu resets, so we
*/
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
- reset_event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/*
* In most cases it's guaranteed that we get here with an RPM
*/
intel_runtime_pm_get(dev_priv);
- intel_prepare_reset(dev);
+ intel_prepare_reset(dev_priv);
/*
* All state reset _must_ be completed before we update the
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/
- ret = i915_reset(dev);
+ ret = i915_reset(dev_priv);
- intel_finish_reset(dev);
+ intel_finish_reset(dev_priv);
intel_runtime_pm_put(dev_priv);
if (ret == 0)
- kobject_uevent_env(&dev->primary->kdev->kobj,
+ kobject_uevent_env(kobj,
KOBJ_CHANGE, reset_done_event);
/*
}
}
-static void i915_report_and_clear_eir(struct drm_device *dev)
+static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
int pipe, i;
pr_err("render error detected, EIR: 0x%08x\n", eir);
- i915_get_extra_instdone(dev, instdone);
+ i915_get_extra_instdone(dev_priv, instdone);
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
}
}
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
pr_err("page table error\n");
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
for (i = 0; i < ARRAY_SIZE(instdone); i++)
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
u32 ipeir = I915_READ(IPEIR);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *fmt, ...)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
va_list args;
char error_msg[80];
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
- i915_capture_error_state(dev, engine_mask, error_msg);
- i915_report_and_clear_eir(dev);
+ i915_capture_error_state(dev_priv, engine_mask, error_msg);
+ i915_report_and_clear_eir(dev_priv);
if (engine_mask) {
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
i915_error_wake_up(dev_priv, false);
}
- i915_reset_and_wakeup(dev);
+ i915_reset_and_wakeup(dev_priv);
}
/* Called from drm generic code, passed 'crtc' which
}
static bool
-ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
{
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
- if (INTEL_INFO(dev_priv)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for_each_engine(signaller, dev_priv) {
if (engine == signaller)
continue;
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
return NULL;
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
+ if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
return NULL;
/*
* ringbuffer itself.
*/
head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
+ backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
for (i = backwards; i; --i) {
/*
return NULL;
*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
- if (INTEL_INFO(engine->dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
offset = ioread32(engine->buffer->virtual_start + head + 12);
offset <<= 32;
offset = ioread32(engine->buffer->virtual_start + head + 8);
static int semaphore_passed(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
u32 seqno;
if (engine->id != RCS)
return true;
- i915_get_extra_instdone(engine->dev, instdone);
+ i915_get_extra_instdone(engine->i915, instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
enum intel_ring_hangcheck_action ha;
u32 tmp;
if (ha != HANGCHECK_HUNG)
return ha;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return HANGCHECK_HUNG;
/* Is the chip hanging on a WAIT_FOR_EVENT?
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
- i915_handle_error(dev, 0,
+ i915_handle_error(dev_priv, 0,
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
}
- if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
return HANGCHECK_HUNG;
case 1:
- i915_handle_error(dev, 0,
+ i915_handle_error(dev_priv, 0,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
static unsigned kick_waiters(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = to_i915(engine->dev);
+ struct drm_i915_private *i915 = engine->i915;
unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
if (engine->hangcheck.user_interrupts == user_interrupts &&
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
- struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int busy_count = 0, rings_hung = 0;
}
if (rings_hung) {
- i915_handle_error(dev, rings_hung, "Engine(s) hung");
+ i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
goto out;
}
if (busy_count)
/* Reset timer case chip hangs without another request
* being added */
- i915_queue_hangcheck(dev);
+ i915_queue_hangcheck(dev_priv);
out:
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
}
-void i915_queue_hangcheck(struct drm_device *dev)
+void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
{
- struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
+ struct i915_gpu_error *e = &dev_priv->gpu_error;
if (!i915.enable_hangcheck)
return;
),
TP_fast_assign(
- __entry->dev = from->dev->primary->index;
+ __entry->dev = from->i915->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->dev->primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
__entry->flags = flags;
- i915_trace_irq_get(engine, req);
+ i915_trace_irq_get(req->engine, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
),
TP_fast_assign(
- __entry->dev = req->engine->dev->primary->index;
+ __entry->dev = req->i915->dev->primary->index;
__entry->ring = req->engine->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->dev->primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
),
TP_fast_assign(
- __entry->dev = engine->dev->primary->index;
+ __entry->dev = engine->i915->dev->primary->index;
__entry->ring = engine->id;
__entry->seqno = engine->get_seqno(engine);
),
* less desirable.
*/
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->dev->primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
__entry->blocking =
- mutex_is_locked(&engine->dev->struct_mutex);
+ mutex_is_locked(&req->i915->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
__entry->ring = engine->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = engine->dev->primary->index;
+ __entry->dev = engine->i915->dev->primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
}
}
-void intel_prepare_reset(struct drm_device *dev)
+void intel_prepare_reset(struct drm_i915_private *dev_priv)
{
/* no reset support for gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return;
/* reset doesn't touch the display */
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
return;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_all(dev_priv->dev);
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- intel_display_suspend(dev);
+ intel_display_suspend(dev_priv->dev);
}
-void intel_finish_reset(struct drm_device *dev)
+void intel_finish_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
intel_complete_page_flips(dev_priv);
/* no reset support for gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return;
/* reset doesn't touch the display */
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
/*
* Flips in the rings have been nuked by the reset,
* so update the base address of all primary
* FIXME: Atomic will make this obsolete since we won't schedule
* CS-based flips (which might get lost in gpu resets) any more.
*/
- intel_update_primary_planes(dev);
+ intel_update_primary_planes(dev_priv->dev);
return;
}
intel_runtime_pm_disable_interrupts(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv->dev);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(dev);
+ intel_display_resume(dev_priv->dev);
intel_hpd_init(dev_priv);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(dev_priv->dev);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
if (engine == NULL)
return true;
- if (INTEL_INFO(engine->dev)->gen < 5)
+ if (INTEL_GEN(engine->i915) < 5)
return false;
if (i915.use_mmio_flip < 0)
};
struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_device *dev)
+intel_display_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int transcoders[] = {
TRANSCODER_A,
};
int i;
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(dev_priv, i) {
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
error->plane[i].size = I915_READ(DSPSIZE(i));
error->plane[i].pos = I915_READ(DSPPOS(i));
}
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
error->plane[i].addr = I915_READ(DSPADDR(i));
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
/* Note: this does not include DSI transcoders. */
- error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
if (HAS_DDI(dev_priv))
error->num_transcoders++; /* Account for eDP. */
const struct drm_framebuffer *fb, int plane,
unsigned int pitch,
unsigned int rotation);
-void intel_prepare_reset(struct drm_device *dev);
-void intel_finish_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void broxton_init_cdclk(struct drm_i915_private *dev_priv);
bool enable_by_default = IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv);
- if (intel_vgpu_active(dev_priv->dev)) {
+ if (intel_vgpu_active(dev_priv)) {
fbc->no_fbc_reason = "VGPU is active";
return false;
}
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
{
/* On platforms with execlist available, vGPU will only
* support execlist mode, no ring buffer mode.
*/
- if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
return 1;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 1;
if (enable_execlists == 0)
return 0;
- if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
+ USES_PPGTT(dev_priv) &&
i915.use_mmio_flip >= 0)
return 1;
static void
logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = engine->i915;
- if (IS_GEN8(dev) || IS_GEN9(dev))
+ if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
engine->idle_lite_restore_wa = ~0;
- engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+ engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
- engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+ engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
{
struct intel_engine_cs *engine = rq0->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = rq0->i915;
uint64_t desc[2];
if (rq1) {
* If irqs are not active generate a warning as batches that finish
* without the irqs may get lost and a GPU Hang may occur.
*/
- WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
+ WARN_ON(!intel_irqs_enabled(engine->i915));
/* Try to read in pairs */
list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
u32 *context_id)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 status;
read_pointer %= GEN8_CSB_ENTRIES;
static void intel_lrc_irq_handler(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 status_pointer;
unsigned int read_pointer, write_pointer;
u32 csb[GEN8_CSB_ENTRIES][2];
struct drm_i915_gem_request *req, *tmp;
LIST_HEAD(cancel_list);
- WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
spin_lock_bh(&engine->execlist_lock);
list_replace_init(&engine->execlist_queue, &cancel_list);
void intel_logical_ring_stop(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
if (!intel_engine_initialized(engine))
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ringbuf = ctx->engine[engine->id].ringbuf;
- ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
if (ret)
goto unpin_map;
int ret, i;
struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_workarounds *w = &dev_priv->workarounds;
+ struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
return 0;
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
- if (IS_BROADWELL(engine->dev)) {
+ if (IS_BROADWELL(engine->i915)) {
int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (rc < 0)
return rc;
uint32_t *offset)
{
int ret;
- struct drm_device *dev = engine->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
uint32_t *const batch,
uint32_t *offset)
{
- struct drm_device *dev = engine->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
}
/* WaClearTdlStateAckDirtyBits:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
}
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
{
int ret;
- engine->wa_ctx.obj = i915_gem_object_create(engine->dev,
+ engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
PAGE_ALIGN(size));
if (IS_ERR(engine->wa_ctx.obj)) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
WARN_ON(engine->id != RCS);
/* update this when WA for higher Gen are added */
- if (INTEL_INFO(engine->dev)->gen > 9) {
+ if (INTEL_GEN(engine->i915) > 9) {
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
- INTEL_INFO(engine->dev)->gen);
+ INTEL_GEN(engine->i915));
return 0;
}
batch = kmap_atomic(page);
offset = 0;
- if (INTEL_INFO(engine->dev)->gen == 8) {
+ if (IS_GEN8(engine->i915)) {
ret = gen8_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
&offset);
if (ret)
goto out;
- } else if (INTEL_INFO(engine->dev)->gen == 9) {
+ } else if (IS_GEN9(engine->i915)) {
ret = gen9_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
static void lrc_init_hws(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
(u32)engine->status_page.gfx_addr);
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned int next_context_status_buffer_hw;
lrc_init_hws(engine);
static int gen8_init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen8_init_common_ring(engine);
if (req->ctx->ppgtt &&
(intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
- !intel_vgpu_active(req->i915->dev)) {
+ !intel_vgpu_active(req->i915)) {
ret = intel_logical_ring_emit_pdps(req);
if (ret)
return ret;
static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *engine = ringbuf->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = request->i915;
uint32_t cmd;
int ret;
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(engine->dev))
+ if (IS_GEN9(request->i915))
vf_flush_wa = true;
}
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
tasklet_kill(&engine->irq_tasklet);
- dev_priv = engine->dev->dev_private;
+ dev_priv = engine->i915;
if (engine->buffer) {
intel_logical_ring_stop(engine);
engine->ctx_desc_template = 0;
lrc_destroy_wa_ctx_obj(engine);
- engine->dev = NULL;
+ engine->i915 = NULL;
}
static void
engine->emit_bb_start = gen8_emit_bb_start;
engine->get_seqno = gen8_get_seqno;
engine->set_seqno = gen8_set_seqno;
- if (IS_BXT_REVID(engine->dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
engine->set_seqno = bxt_a_set_seqno;
}
engine->guc_id = info->guc_id;
engine->mmio_base = info->mmio_base;
- engine->dev = dev;
+ engine->i915 = dev_priv;
/* Intentionally left blank. */
engine->buffer = NULL;
logical_ring_default_irqs(engine, info->irq_shift);
intel_engine_init_hangcheck(engine);
- i915_gem_batch_pool_init(engine->dev, &engine->batch_pool);
+ i915_gem_batch_pool_init(dev, &engine->batch_pool);
return engine;
}
static int
logical_ring_init(struct intel_engine_cs *engine)
{
- struct intel_context *dctx = to_i915(engine->dev)->kernel_context;
+ struct intel_context *dctx = engine->i915->kernel_context;
int ret;
ret = i915_cmd_parser_init_ring(engine);
}
static u32
-make_rpcs(struct drm_device *dev)
+make_rpcs(struct drm_i915_private *dev_priv)
{
u32 rpcs = 0;
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
/*
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev)->has_slice_pg) {
+ if (INTEL_INFO(dev_priv)->has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev)->slice_total <<
+ rpcs |= INTEL_INFO(dev_priv)->slice_total <<
GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev)->has_subslice_pg) {
+ if (INTEL_INFO(dev_priv)->has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+ rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev)->has_eu_pg) {
- rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ if (INTEL_INFO(dev_priv)->has_eu_pg) {
+ rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
- rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
{
u32 indirect_ctx_offset;
- switch (INTEL_INFO(engine->dev)->gen) {
+ switch (INTEL_GEN(engine->i915)) {
default:
- MISSING_CASE(INTEL_INFO(engine->dev)->gen);
+ MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
case 9:
indirect_ctx_offset =
struct intel_engine_cs *engine,
struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = ctx->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
void *vaddr;
u32 *reg_state;
RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- (HAS_RESOURCE_STREAMER(dev) ?
+ (HAS_RESOURCE_STREAMER(dev_priv) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
0);
if (engine->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- make_rpcs(dev));
+ make_rpcs(dev_priv));
}
i915_gem_object_unpin_map(ctx_obj);
{
int ret = 0;
- WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
+ WARN_ON(INTEL_GEN(engine->i915) < 8);
switch (engine->id) {
case RCS:
- if (INTEL_INFO(engine->dev)->gen >= 9)
+ if (INTEL_GEN(engine->i915) >= 9)
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
else
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
static int execlists_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
- ctx_obj = i915_gem_object_create(dev, context_size);
+ ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
if (IS_ERR(ctx_obj)) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
return PTR_ERR(ctx_obj);
struct intel_engine_cs *engine);
/* Execlists */
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
+ int enable_execlists);
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
*/
int intel_mocs_init_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_mocs_table table;
unsigned int index;
struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_device *dev)
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
struct drm_i915_private *dev_priv = dev->dev_private;
/* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
return;
if (IS_IRONLAKE_M(dev)) {
struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(req, true))
- gen6_rps_boost(to_i915(req->engine->dev), NULL,
- req->emitted_jiffies);
+ gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
i915_gem_request_unreference(req);
kfree(boost);
bool intel_engine_stopped(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
}
u32 flush_domains)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
u32 cmd;
int ret;
cmd |= MI_EXE_FLUSH;
if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
- (IS_G4X(dev) || IS_GEN5(dev)))
+ (IS_G4X(req->i915) || IS_GEN5(req->i915)))
cmd |= MI_INVALIDATE_ISP;
ret = intel_ring_begin(req, 2);
static void ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_TAIL(engine, value);
}
u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
- if (INTEL_INFO(engine->dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_INFO(engine->dev)->gen >= 4)
+ else if (INTEL_GEN(dev_priv) >= 4)
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
else
acthd = I915_READ(ACTHD);
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(engine->dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t mmio;
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
switch (engine->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->dev)) {
+ } else if (IS_GEN6(dev_priv)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8) {
i915_reg_t reg = RING_INSTPM(engine->mmio_base);
/* ring should be idle before issuing a sync flush*/
static bool stop_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
- if (!IS_GEN2(engine->dev)) {
+ if (!IS_GEN2(dev_priv)) {
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
I915_WRITE_HEAD(engine, 0);
engine->write_tail(engine, 0);
- if (!IS_GEN2(engine->dev)) {
+ if (!IS_GEN2(dev_priv)) {
(void)I915_READ_CTL(engine);
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
static int init_ring_common(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_ringbuffer *ringbuf = engine->buffer;
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
}
}
- if (I915_NEED_GFX_HWS(dev))
+ if (I915_NEED_GFX_HWS(dev_priv))
intel_ring_setup_status_page(engine);
else
ring_setup_phys_status_page(engine);
void
intel_fini_pipe_control(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
-
if (engine->scratch.obj == NULL)
return;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(engine->i915) >= 5) {
kunmap(sg_page(engine->scratch.obj->pages->sgl));
i915_gem_object_ggtt_unpin(engine->scratch.obj);
}
WARN_ON(engine->scratch.obj);
- engine->scratch.obj = i915_gem_object_create(engine->dev, 4096);
+ engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096);
if (IS_ERR(engine->scratch.obj)) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = PTR_ERR(engine->scratch.obj);
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- int ret, i;
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_workarounds *w = &dev_priv->workarounds;
+ struct i915_workarounds *w = &req->i915->workarounds;
+ int ret, i;
if (w->count == 0)
return 0;
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct i915_workarounds *wa = &dev_priv->workarounds;
const uint32_t index = wa->hw_whitelist_count[engine->id];
static int gen8_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
static int bdw_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen8_init_workarounds(engine);
if (ret)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen8_init_workarounds(engine);
if (ret)
static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
uint32_t tmp;
int ret;
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
- IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_F0, REVID_FOREVER) ||
+ IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
- if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ if (IS_SKYLAKE(dev_priv) || IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
static int skl_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen9_init_workarounds(engine);
if (ret)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
- if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
}
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0)) {
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
- if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
}
/* WaDisablePowerCompilerClockGating:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
/* This is tied to WaForceContextSaveRestoreNonCoherent */
- if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
}
/* WaBarrierPerformanceFixDisable:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:skl */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen9_init_workarounds(engine);
if (ret)
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
STALL_DOP_GATING_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
}
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
- if (IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
L3_HIGH_PRIO_CREDITS(2));
int init_workarounds_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(engine->id != RCS);
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
return bdw_init_workarounds(engine);
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return chv_init_workarounds(engine);
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv))
return skl_init_workarounds(engine);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return bxt_init_workarounds(engine);
return 0;
static int init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret = init_ring_common(engine);
if (ret)
return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) >= 4 && INTEL_GEN(dev_priv) < 7)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+ if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+ if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (HAS_L3_DPF(dev))
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+ if (HAS_L3_DPF(dev_priv))
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
return init_workarounds_ring(engine);
}
static void render_ring_cleanup(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
if (dev_priv->semaphore_obj) {
i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
{
#define MBOX_UPDATE_DWORDS 8
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
{
#define MBOX_UPDATE_DWORDS 6
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
unsigned int num_dwords)
{
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *useless;
enum intel_engine_id id;
int ret, num_rings;
#define MBOX_UPDATE_DWORDS 3
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
return 0;
}
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->last_seqno < seqno;
}
u32 seqno)
{
struct intel_engine_cs *waiter = waiter_req->engine;
- struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+ struct drm_i915_private *dev_priv = waiter_req->i915;
struct i915_hw_ppgtt *ppgtt;
int ret;
return ret;
/* If seqno wrap happened, omit the wait with no-ops */
- if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+ if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
intel_ring_emit(waiter, dw1 | wait_mbox);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
static bool
gen5_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
static void
gen5_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
static bool
i9xx_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (!intel_irqs_enabled(dev_priv))
static void
i9xx_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
static bool
i8xx_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (!intel_irqs_enabled(dev_priv))
static void
i8xx_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
static bool
gen6_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS)
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
- GT_PARITY_ERROR(dev)));
+ GT_PARITY_ERROR(dev_priv)));
else
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
static void
gen6_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS)
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
else
I915_WRITE_IMR(engine, ~0);
gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
static bool
hsw_vebox_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
static void
hsw_vebox_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
static bool
gen8_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS) {
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
static void
gen8_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS) {
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
I915_WRITE_IMR(engine,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
} else {
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah)
return;
- drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+ drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
engine->status_page.page_addr = NULL;
}
unsigned flags;
int ret;
- obj = i915_gem_object_create(engine->dev, 4096);
+ obj = i915_gem_object_create(engine->i915->dev, 4096);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
goto err_unref;
flags = 0;
- if (!HAS_LLC(engine->dev))
+ if (!HAS_LLC(engine->i915))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
static int init_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
- drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
+ drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
ringbuf->vma = NULL;
}
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
* of the buffer.
*/
ring->effective_size = size;
- if (IS_I830(engine->dev) || IS_845G(engine->dev))
+ if (IS_I830(engine->i915) || IS_845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
ring->last_retired_head = -1;
intel_ring_update_space(ring);
- ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+ ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
engine->name, ret);
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ringbuffer *ringbuf;
int ret;
WARN_ON(engine->buffer);
- engine->dev = dev;
+ engine->i915 = dev_priv;
INIT_LIST_HEAD(&engine->active_list);
INIT_LIST_HEAD(&engine->request_list);
INIT_LIST_HEAD(&engine->execlist_queue);
}
engine->buffer = ringbuf;
- if (I915_NEED_GFX_HWS(dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
ret = init_status_page(engine);
if (ret)
goto error;
goto error;
}
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
engine->name, ret);
if (!intel_engine_initialized(engine))
return;
- dev_priv = to_i915(engine->dev);
+ dev_priv = engine->i915;
if (engine->buffer) {
intel_stop_engine(engine);
- WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
intel_unpin_ringbuffer_obj(engine->buffer);
intel_ringbuffer_free(engine->buffer);
if (engine->cleanup)
engine->cleanup(engine);
- if (I915_NEED_GFX_HWS(engine->dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
cleanup_status_page(engine);
} else {
WARN_ON(engine->id != RCS);
i915_cmd_parser_fini_ring(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- engine->dev = NULL;
+ engine->i915 = NULL;
}
int intel_engine_idle(struct intel_engine_cs *engine)
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
* so long as the semaphore value in the register/page is greater
static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
/* Every tail move must follow the sequence below */
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(engine->dev)->gen >= 8)
+ if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
intel_ring_emit(engine, cmd);
intel_ring_emit(engine,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(engine->dev)->gen >= 8) {
+ if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(engine, 0); /* upper addr */
intel_ring_emit(engine, 0); /* value */
} else {
u32 invalidate, u32 flush)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
uint32_t cmd;
int ret;
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
intel_ring_emit(engine, cmd);
intel_ring_emit(engine,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(engine, 0); /* upper addr */
intel_ring_emit(engine, 0); /* value */
} else {
engine->hw_id = 0;
engine->mmio_base = RENDER_RING_BASE;
- if (INTEL_INFO(dev)->gen >= 8) {
- if (i915_semaphore_is_enabled(dev)) {
+ if (INTEL_GEN(dev_priv) >= 8) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
obj = i915_gem_object_create(dev, 4096);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
WARN_ON(!dev_priv->semaphore_obj);
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_rcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
}
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->add_request = gen6_add_request;
engine->flush = gen7_render_ring_flush;
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
engine->flush = gen6_render_ring_flush;
engine->irq_get = gen6_ring_get_irq;
engine->irq_put = gen6_ring_put_irq;
engine->irq_seqno_barrier = gen6_seqno_barrier;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen6_ring_sync;
engine->semaphore.signal = gen6_signal;
/*
engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
- } else if (IS_GEN5(dev)) {
+ } else if (IS_GEN5(dev_priv)) {
engine->add_request = pc_render_add_request;
engine->flush = gen4_render_ring_flush;
engine->get_seqno = pc_render_get_seqno;
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
engine->add_request = i9xx_add_request;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
engine->flush = gen2_render_ring_flush;
else
engine->flush = gen4_render_ring_flush;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
engine->irq_get = i8xx_ring_get_irq;
engine->irq_put = i8xx_ring_put_irq;
} else {
}
engine->write_tail = ring_write_tail;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev_priv))
engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 4)
+ else if (INTEL_GEN(dev_priv) >= 4)
engine->dispatch_execbuffer = i965_dispatch_execbuffer;
- else if (IS_I830(dev) || IS_845G(dev))
+ else if (IS_I830(dev_priv) || IS_845G(dev_priv))
engine->dispatch_execbuffer = i830_dispatch_execbuffer;
else
engine->dispatch_execbuffer = i915_dispatch_execbuffer;
engine->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
- if (HAS_BROKEN_CS_TLB(dev)) {
+ if (HAS_BROKEN_CS_TLB(dev_priv)) {
obj = i915_gem_object_create(dev, I830_WA_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate batch bo\n");
if (ret)
return ret;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
ret = intel_init_pipe_control(engine);
if (ret)
return ret;
engine->hw_id = 1;
engine->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
engine->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
engine->write_tail = gen6_bsd_ring_write_tail;
engine->flush = gen6_bsd_ring_flush;
engine->add_request = gen6_add_request;
engine->irq_seqno_barrier = gen6_seqno_barrier;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
engine->irq_get = gen8_ring_get_irq;
engine->irq_put = gen8_ring_put_irq;
engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
engine->irq_put = gen6_ring_put_irq;
engine->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen6_ring_sync;
engine->semaphore.signal = gen6_signal;
engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
engine->add_request = i9xx_add_request;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
engine->irq_get = gen5_ring_get_irq;
engine->irq_put = gen5_ring_put_irq;
engine->irq_put = gen8_ring_put_irq;
engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
engine->irq_seqno_barrier = gen6_seqno_barrier;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
engine->irq_get = gen8_ring_get_irq;
engine->irq_put = gen8_ring_put_irq;
engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
engine->irq_get = gen6_ring_get_irq;
engine->irq_put = gen6_ring_put_irq;
engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.signal = gen6_signal;
engine->semaphore.sync_to = gen6_ring_sync;
/*
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
engine->irq_get = gen8_ring_get_irq;
engine->irq_put = gen8_ring_put_irq;
engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
engine->irq_get = hsw_vebox_get_irq;
engine->irq_put = hsw_vebox_put_irq;
engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen6_ring_sync;
engine->semaphore.signal = gen6_signal;
engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
struct drm_i915_gem_object *obj;
};
-struct intel_engine_cs {
+struct intel_engine_cs {
+ struct drm_i915_private *i915;
const char *name;
enum intel_engine_id {
RCS = 0,
unsigned int hw_id;
unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base;
- struct drm_device *dev;
struct intel_ringbuffer *buffer;
struct list_head buffers;
static inline bool
intel_engine_initialized(struct intel_engine_cs *engine)
{
- return engine->dev != NULL;
+ return engine->i915 != NULL;
}
static inline unsigned
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf);
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
break;
}
- if (intel_vgpu_active(dev)) {
+ if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
}
static int gen8_request_engine_reset(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
{
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv->dev))
+ if (intel_vgpu_active(dev_priv))
return 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 9:
fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
break;
{
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv->dev))
+ if (intel_vgpu_active(dev_priv))
return 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 9:
fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
break;