From: Ben Widawsky Date: Fri, 6 Dec 2013 22:11:11 +0000 (-0800) Subject: drm/i915: Use LRI for switching PP_DIR_BASE X-Git-Tag: MMI-PSA29.97-13-9~12412^2~62^2~80^2~30 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=48a10389c82df842658c5d2560768eb674b71258;p=GitHub%2FMotorolaMobilityLLC%2Fkernel-slsi.git drm/i915: Use LRI for switching PP_DIR_BASE The docs seem to suggest this is the appropriate method (though it doesn't say so outright). In other words, we probably should have done this before. We certainly must do this for switching VMs on the fly, since synchronizing the rings to MMIO updates isn't acceptable. v2: Make the reset code actually work for all rings. Note that this was fixed in subsequent commits, but was indeed broken for this commit. Add a posting read to the reset case. It probably should have existed before hand, but since we have no failures; there is no reason to make it a separate commit. Make IS_GEN6 not use the ring because I am seeing crashes when using it. It is a bit of a hack in this patch, it will get fixed up in a couple of patches. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index bf6abf14f04a..08a706d0a737 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -486,6 +486,50 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) return (ppgtt->pd_offset / 64) << 16; } +static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_ring_buffer *ring, + bool synchronous) +{ + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + /* If we're in reset, we can assume the GPU is sufficiently idle to + * manually frob these bits. Ideally we could use the ring functions, + * except our error handling makes it quite difficult (can't use + * intel_ring_begin, ring->flush, or intel_ring_advance) + * + * FIXME: We should try not to special case reset + */ + if (synchronous || + i915_reset_in_progress(&dev_priv->gpu_error)) { + WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); + POSTING_READ(RING_PP_DIR_BASE(ring)); + return 0; + } + + /* NB: TLBs must be flushed and invalidated before a switch */ + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); + intel_ring_emit(ring, PP_DIR_DCLV_2G); + intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); + intel_ring_emit(ring, get_pd_offset(ppgtt)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, struct intel_ring_buffer *ring, bool synchronous) @@ -493,6 +537,9 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + if (!synchronous) + return 0; + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); @@ -712,13 +759,14 @@ alloc: ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; - if (IS_GEN6(dev)) + if (IS_GEN6(dev)) { ppgtt->enable = gen6_ppgtt_enable; - if (IS_GEN7(dev)) + ppgtt->switch_mm = gen6_mm_switch; + } else if (IS_GEN7(dev)) { ppgtt->enable = gen7_ppgtt_enable; - else + ppgtt->switch_mm = gen7_mm_switch; + } else BUG(); - ppgtt->switch_mm = gen6_mm_switch; ppgtt->base.clear_range = gen6_ppgtt_clear_range; ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.cleanup = gen6_ppgtt_cleanup;