* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_GEN9_BC(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (ret < 0)
return ret;
index = ret;
- /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
+ /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
wa_ctx_emit(batch, index, 0);
}
- /* WaMediaPoolStateCmdInWABB:bxt */
+ /* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
/*
* EU pool configuration is setup along with golden context
"Platform that should have a MOCS table does not.\n");
}
- /* WaDisableSkipCaching:skl,bxt,kbl */
+ /* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (IS_GEN9(dev_priv)) {
int i;
I915_WRITE(GEN8_CONFIG0,
I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
- /* WaEnableChickenDCPR:skl,bxt,kbl */
+ /* WaEnableChickenDCPR:skl,bxt,kbl,glk */
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
- /* WaFbcWakeMemOn:skl,bxt,kbl */
+ /* WaFbcWakeMemOn:skl,bxt,kbl,glk */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS |
DISP_FBC_MEMORY_WAKE);
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
+static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ gen9_init_clock_gating(dev_priv);
+
+ /*
+ * WaDisablePWMClockGating:glk
+ * Backlight PWM may stop in the asserted state, causing backlight
+ * to stay fully on.
+ */
+ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ PWM1_GATING_DIS | PWM2_GATING_DIS);
+}
+
static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
}
/*
- * WaWmMemoryReadLatency:skl
+ * WaWmMemoryReadLatency:skl,glk
*
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from the
dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
+ else if (IS_GEMINILAKE(dev_priv))
+ dev_priv->display.init_clock_gating = glk_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
else if (IS_CHERRYVIEW(dev_priv))
struct drm_i915_private *dev_priv = engine->i915;
int ret;
- /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
- /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
- /* WaDisablePartialInstShootdown:skl,bxt,kbl */
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_GPGPU_PREEMPTION);
- /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
/* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
- /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
- /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
- /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
if (ret)
return ret;
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
return 0;
}
+static int glk_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaToEnableHwFixForPushConstHWBug:glk */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ return 0;
+}
+
int init_workarounds_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
if (IS_KABYLAKE(dev_priv))
return kbl_init_workarounds(engine);
+ if (IS_GEMINILAKE(dev_priv))
+ return glk_init_workarounds(engine);
+
return 0;
}