2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_i915_private
*dev_priv
)
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 I915_WRITE(CHICKEN_PAR1_1
,
63 I915_READ(CHICKEN_PAR1_1
) | SKL_EDP_PSR_FIX_RDWRAP
);
65 I915_WRITE(GEN8_CONFIG0
,
66 I915_READ(GEN8_CONFIG0
) | GEN9_DEFAULT_FIXES
);
68 /* WaEnableChickenDCPR:skl,bxt,kbl,glk */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
70 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl,glk */
74 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
76 DISP_FBC_MEMORY_WAKE
);
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
79 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
80 ILK_DPFC_DISABLE_DUMMY0
);
83 static void bxt_init_clock_gating(struct drm_i915_private
*dev_priv
)
85 gen9_init_clock_gating(dev_priv
);
87 /* WaDisableSDEUnitClockGating:bxt */
88 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
89 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
93 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
95 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
96 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ
);
99 * Wa: Backlight PWM may stop in the asserted state, causing backlight
102 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
103 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
106 static void glk_init_clock_gating(struct drm_i915_private
*dev_priv
)
108 gen9_init_clock_gating(dev_priv
);
111 * WaDisablePWMClockGating:glk
112 * Backlight PWM may stop in the asserted state, causing backlight
115 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
116 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
118 /* WaDDIIOTimeout:glk */
119 if (IS_GLK_REVID(dev_priv
, 0, GLK_REVID_A1
)) {
120 u32 val
= I915_READ(CHICKEN_MISC_2
);
121 val
&= ~(GLK_CL0_PWR_DOWN
|
124 I915_WRITE(CHICKEN_MISC_2
, val
);
129 static void i915_pineview_get_mem_freq(struct drm_i915_private
*dev_priv
)
133 tmp
= I915_READ(CLKCFG
);
135 switch (tmp
& CLKCFG_FSB_MASK
) {
137 dev_priv
->fsb_freq
= 533; /* 133*4 */
140 dev_priv
->fsb_freq
= 800; /* 200*4 */
143 dev_priv
->fsb_freq
= 667; /* 167*4 */
146 dev_priv
->fsb_freq
= 400; /* 100*4 */
150 switch (tmp
& CLKCFG_MEM_MASK
) {
152 dev_priv
->mem_freq
= 533;
155 dev_priv
->mem_freq
= 667;
158 dev_priv
->mem_freq
= 800;
162 /* detect pineview DDR3 setting */
163 tmp
= I915_READ(CSHRDDR3CTL
);
164 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
167 static void i915_ironlake_get_mem_freq(struct drm_i915_private
*dev_priv
)
171 ddrpll
= I915_READ16(DDRMPLL1
);
172 csipll
= I915_READ16(CSIPLL0
);
174 switch (ddrpll
& 0xff) {
176 dev_priv
->mem_freq
= 800;
179 dev_priv
->mem_freq
= 1066;
182 dev_priv
->mem_freq
= 1333;
185 dev_priv
->mem_freq
= 1600;
188 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
190 dev_priv
->mem_freq
= 0;
194 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
196 switch (csipll
& 0x3ff) {
198 dev_priv
->fsb_freq
= 3200;
201 dev_priv
->fsb_freq
= 3733;
204 dev_priv
->fsb_freq
= 4266;
207 dev_priv
->fsb_freq
= 4800;
210 dev_priv
->fsb_freq
= 5333;
213 dev_priv
->fsb_freq
= 5866;
216 dev_priv
->fsb_freq
= 6400;
219 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
221 dev_priv
->fsb_freq
= 0;
225 if (dev_priv
->fsb_freq
== 3200) {
226 dev_priv
->ips
.c_m
= 0;
227 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
228 dev_priv
->ips
.c_m
= 1;
230 dev_priv
->ips
.c_m
= 2;
234 static const struct cxsr_latency cxsr_latency_table
[] = {
235 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
236 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
237 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
238 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
239 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
241 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
242 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
243 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
244 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
245 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
247 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
248 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
249 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
250 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
251 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
253 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
254 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
255 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
256 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
257 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
259 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
260 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
261 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
262 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
263 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
265 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
266 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
267 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
268 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
269 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
272 static const struct cxsr_latency
*intel_get_cxsr_latency(bool is_desktop
,
277 const struct cxsr_latency
*latency
;
280 if (fsb
== 0 || mem
== 0)
283 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
284 latency
= &cxsr_latency_table
[i
];
285 if (is_desktop
== latency
->is_desktop
&&
286 is_ddr3
== latency
->is_ddr3
&&
287 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
291 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
296 static void chv_set_memory_dvfs(struct drm_i915_private
*dev_priv
, bool enable
)
300 mutex_lock(&dev_priv
->rps
.hw_lock
);
302 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
304 val
&= ~FORCE_DDR_HIGH_FREQ
;
306 val
|= FORCE_DDR_HIGH_FREQ
;
307 val
&= ~FORCE_DDR_LOW_FREQ
;
308 val
|= FORCE_DDR_FREQ_REQ_ACK
;
309 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
311 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
312 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3))
313 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
315 mutex_unlock(&dev_priv
->rps
.hw_lock
);
318 static void chv_set_memory_pm5(struct drm_i915_private
*dev_priv
, bool enable
)
322 mutex_lock(&dev_priv
->rps
.hw_lock
);
324 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
326 val
|= DSP_MAXFIFO_PM5_ENABLE
;
328 val
&= ~DSP_MAXFIFO_PM5_ENABLE
;
329 vlv_punit_write(dev_priv
, PUNIT_REG_DSPFREQ
, val
);
331 mutex_unlock(&dev_priv
->rps
.hw_lock
);
334 #define FW_WM(value, plane) \
335 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
337 static bool _intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
342 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
343 was_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
344 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
345 POSTING_READ(FW_BLC_SELF_VLV
);
346 } else if (IS_G4X(dev_priv
) || IS_I965GM(dev_priv
)) {
347 was_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
348 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
349 POSTING_READ(FW_BLC_SELF
);
350 } else if (IS_PINEVIEW(dev_priv
)) {
351 val
= I915_READ(DSPFW3
);
352 was_enabled
= val
& PINEVIEW_SELF_REFRESH_EN
;
354 val
|= PINEVIEW_SELF_REFRESH_EN
;
356 val
&= ~PINEVIEW_SELF_REFRESH_EN
;
357 I915_WRITE(DSPFW3
, val
);
358 POSTING_READ(DSPFW3
);
359 } else if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
)) {
360 was_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
361 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
362 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
363 I915_WRITE(FW_BLC_SELF
, val
);
364 POSTING_READ(FW_BLC_SELF
);
365 } else if (IS_I915GM(dev_priv
)) {
367 * FIXME can't find a bit like this for 915G, and
368 * and yet it does have the related watermark in
369 * FW_BLC_SELF. What's going on?
371 was_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
372 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
373 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
374 I915_WRITE(INSTPM
, val
);
375 POSTING_READ(INSTPM
);
380 trace_intel_memory_cxsr(dev_priv
, was_enabled
, enable
);
382 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
383 enableddisabled(enable
),
384 enableddisabled(was_enabled
));
389 bool intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
393 mutex_lock(&dev_priv
->wm
.wm_mutex
);
394 ret
= _intel_set_memory_cxsr(dev_priv
, enable
);
395 dev_priv
->wm
.vlv
.cxsr
= enable
;
396 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
402 * Latency for FIFO fetches is dependent on several factors:
403 * - memory configuration (speed, channels)
405 * - current MCH state
406 * It can be fairly high in some situations, so here we assume a fairly
407 * pessimal value. It's a tradeoff between extra memory fetches (if we
408 * set this value too high, the FIFO will fetch frequently to stay full)
409 * and power consumption (set it too low to save power and we might see
410 * FIFO underruns and display "flicker").
412 * A value of 5us seems to be a good balance; safe for very low end
413 * platforms but not overly aggressive on lower latency configs.
415 static const int pessimal_latency_ns
= 5000;
417 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
418 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
420 static void vlv_get_fifo_size(struct intel_crtc_state
*crtc_state
)
422 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
423 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
424 struct vlv_fifo_state
*fifo_state
= &crtc_state
->wm
.vlv
.fifo_state
;
425 enum pipe pipe
= crtc
->pipe
;
426 int sprite0_start
, sprite1_start
;
429 uint32_t dsparb
, dsparb2
, dsparb3
;
431 dsparb
= I915_READ(DSPARB
);
432 dsparb2
= I915_READ(DSPARB2
);
433 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 0, 0);
434 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 8, 4);
437 dsparb
= I915_READ(DSPARB
);
438 dsparb2
= I915_READ(DSPARB2
);
439 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 16, 8);
440 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 24, 12);
443 dsparb2
= I915_READ(DSPARB2
);
444 dsparb3
= I915_READ(DSPARB3
);
445 sprite0_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 0, 16);
446 sprite1_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 8, 20);
453 fifo_state
->plane
[PLANE_PRIMARY
] = sprite0_start
;
454 fifo_state
->plane
[PLANE_SPRITE0
] = sprite1_start
- sprite0_start
;
455 fifo_state
->plane
[PLANE_SPRITE1
] = 511 - sprite1_start
;
456 fifo_state
->plane
[PLANE_CURSOR
] = 63;
458 DRM_DEBUG_KMS("Pipe %c FIFO size: %d/%d/%d/%d\n",
460 fifo_state
->plane
[PLANE_PRIMARY
],
461 fifo_state
->plane
[PLANE_SPRITE0
],
462 fifo_state
->plane
[PLANE_SPRITE1
],
463 fifo_state
->plane
[PLANE_CURSOR
]);
466 static int i9xx_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
468 uint32_t dsparb
= I915_READ(DSPARB
);
471 size
= dsparb
& 0x7f;
473 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
475 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
476 plane
? "B" : "A", size
);
481 static int i830_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
483 uint32_t dsparb
= I915_READ(DSPARB
);
486 size
= dsparb
& 0x1ff;
488 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
489 size
>>= 1; /* Convert to cachelines */
491 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
492 plane
? "B" : "A", size
);
497 static int i845_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
499 uint32_t dsparb
= I915_READ(DSPARB
);
502 size
= dsparb
& 0x7f;
503 size
>>= 2; /* Convert to cachelines */
505 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
512 /* Pineview has different values for various configs */
513 static const struct intel_watermark_params pineview_display_wm
= {
514 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
515 .max_wm
= PINEVIEW_MAX_WM
,
516 .default_wm
= PINEVIEW_DFT_WM
,
517 .guard_size
= PINEVIEW_GUARD_WM
,
518 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
520 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
521 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
522 .max_wm
= PINEVIEW_MAX_WM
,
523 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
524 .guard_size
= PINEVIEW_GUARD_WM
,
525 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
527 static const struct intel_watermark_params pineview_cursor_wm
= {
528 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
529 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
530 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
531 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
532 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
534 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
535 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
536 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
537 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
538 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
539 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
541 static const struct intel_watermark_params g4x_wm_info
= {
542 .fifo_size
= G4X_FIFO_SIZE
,
543 .max_wm
= G4X_MAX_WM
,
544 .default_wm
= G4X_MAX_WM
,
546 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
548 static const struct intel_watermark_params g4x_cursor_wm_info
= {
549 .fifo_size
= I965_CURSOR_FIFO
,
550 .max_wm
= I965_CURSOR_MAX_WM
,
551 .default_wm
= I965_CURSOR_DFT_WM
,
553 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
555 static const struct intel_watermark_params i965_cursor_wm_info
= {
556 .fifo_size
= I965_CURSOR_FIFO
,
557 .max_wm
= I965_CURSOR_MAX_WM
,
558 .default_wm
= I965_CURSOR_DFT_WM
,
560 .cacheline_size
= I915_FIFO_LINE_SIZE
,
562 static const struct intel_watermark_params i945_wm_info
= {
563 .fifo_size
= I945_FIFO_SIZE
,
564 .max_wm
= I915_MAX_WM
,
567 .cacheline_size
= I915_FIFO_LINE_SIZE
,
569 static const struct intel_watermark_params i915_wm_info
= {
570 .fifo_size
= I915_FIFO_SIZE
,
571 .max_wm
= I915_MAX_WM
,
574 .cacheline_size
= I915_FIFO_LINE_SIZE
,
576 static const struct intel_watermark_params i830_a_wm_info
= {
577 .fifo_size
= I855GM_FIFO_SIZE
,
578 .max_wm
= I915_MAX_WM
,
581 .cacheline_size
= I830_FIFO_LINE_SIZE
,
583 static const struct intel_watermark_params i830_bc_wm_info
= {
584 .fifo_size
= I855GM_FIFO_SIZE
,
585 .max_wm
= I915_MAX_WM
/2,
588 .cacheline_size
= I830_FIFO_LINE_SIZE
,
590 static const struct intel_watermark_params i845_wm_info
= {
591 .fifo_size
= I830_FIFO_SIZE
,
592 .max_wm
= I915_MAX_WM
,
595 .cacheline_size
= I830_FIFO_LINE_SIZE
,
599 * intel_calculate_wm - calculate watermark level
600 * @clock_in_khz: pixel clock
601 * @wm: chip FIFO params
602 * @cpp: bytes per pixel
603 * @latency_ns: memory latency for the platform
605 * Calculate the watermark level (the level at which the display plane will
606 * start fetching from memory again). Each chip has a different display
607 * FIFO size and allocation, so the caller needs to figure that out and pass
608 * in the correct intel_watermark_params structure.
610 * As the pixel clock runs, the FIFO will be drained at a rate that depends
611 * on the pixel size. When it reaches the watermark level, it'll start
612 * fetching FIFO line sized based chunks from memory until the FIFO fills
613 * past the watermark point. If the FIFO drains completely, a FIFO underrun
614 * will occur, and a display engine hang could result.
616 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
617 const struct intel_watermark_params
*wm
,
618 int fifo_size
, int cpp
,
619 unsigned long latency_ns
)
621 long entries_required
, wm_size
;
624 * Note: we need to make sure we don't overflow for various clock &
626 * clocks go from a few thousand to several hundred thousand.
627 * latency is usually a few thousand
629 entries_required
= ((clock_in_khz
/ 1000) * cpp
* latency_ns
) /
631 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
633 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
635 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
637 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
639 /* Don't promote wm_size to unsigned... */
640 if (wm_size
> (long)wm
->max_wm
)
641 wm_size
= wm
->max_wm
;
643 wm_size
= wm
->default_wm
;
646 * Bspec seems to indicate that the value shouldn't be lower than
647 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
648 * Lets go for 8 which is the burst size since certain platforms
649 * already use a hardcoded 8 (which is what the spec says should be
658 static bool intel_wm_plane_visible(const struct intel_crtc_state
*crtc_state
,
659 const struct intel_plane_state
*plane_state
)
661 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
663 /* FIXME check the 'enable' instead */
664 if (!crtc_state
->base
.active
)
668 * Treat cursor with fb as always visible since cursor updates
669 * can happen faster than the vrefresh rate, and the current
670 * watermark code doesn't handle that correctly. Cursor updates
671 * which set/clear the fb or change the cursor size are going
672 * to get throttled by intel_legacy_cursor_update() to work
673 * around this problem with the watermark code.
675 if (plane
->id
== PLANE_CURSOR
)
676 return plane_state
->base
.fb
!= NULL
;
678 return plane_state
->base
.visible
;
681 static struct intel_crtc
*single_enabled_crtc(struct drm_i915_private
*dev_priv
)
683 struct intel_crtc
*crtc
, *enabled
= NULL
;
685 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
686 if (intel_crtc_active(crtc
)) {
696 static void pineview_update_wm(struct intel_crtc
*unused_crtc
)
698 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
699 struct intel_crtc
*crtc
;
700 const struct cxsr_latency
*latency
;
704 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv
),
709 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
710 intel_set_memory_cxsr(dev_priv
, false);
714 crtc
= single_enabled_crtc(dev_priv
);
716 const struct drm_display_mode
*adjusted_mode
=
717 &crtc
->config
->base
.adjusted_mode
;
718 const struct drm_framebuffer
*fb
=
719 crtc
->base
.primary
->state
->fb
;
720 int cpp
= fb
->format
->cpp
[0];
721 int clock
= adjusted_mode
->crtc_clock
;
724 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
725 pineview_display_wm
.fifo_size
,
726 cpp
, latency
->display_sr
);
727 reg
= I915_READ(DSPFW1
);
728 reg
&= ~DSPFW_SR_MASK
;
729 reg
|= FW_WM(wm
, SR
);
730 I915_WRITE(DSPFW1
, reg
);
731 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
734 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
735 pineview_display_wm
.fifo_size
,
736 cpp
, latency
->cursor_sr
);
737 reg
= I915_READ(DSPFW3
);
738 reg
&= ~DSPFW_CURSOR_SR_MASK
;
739 reg
|= FW_WM(wm
, CURSOR_SR
);
740 I915_WRITE(DSPFW3
, reg
);
742 /* Display HPLL off SR */
743 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
744 pineview_display_hplloff_wm
.fifo_size
,
745 cpp
, latency
->display_hpll_disable
);
746 reg
= I915_READ(DSPFW3
);
747 reg
&= ~DSPFW_HPLL_SR_MASK
;
748 reg
|= FW_WM(wm
, HPLL_SR
);
749 I915_WRITE(DSPFW3
, reg
);
751 /* cursor HPLL off SR */
752 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
753 pineview_display_hplloff_wm
.fifo_size
,
754 cpp
, latency
->cursor_hpll_disable
);
755 reg
= I915_READ(DSPFW3
);
756 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
757 reg
|= FW_WM(wm
, HPLL_CURSOR
);
758 I915_WRITE(DSPFW3
, reg
);
759 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
761 intel_set_memory_cxsr(dev_priv
, true);
763 intel_set_memory_cxsr(dev_priv
, false);
767 static bool g4x_compute_wm0(struct drm_i915_private
*dev_priv
,
769 const struct intel_watermark_params
*display
,
770 int display_latency_ns
,
771 const struct intel_watermark_params
*cursor
,
772 int cursor_latency_ns
,
776 struct intel_crtc
*crtc
;
777 const struct drm_display_mode
*adjusted_mode
;
778 const struct drm_framebuffer
*fb
;
779 int htotal
, hdisplay
, clock
, cpp
;
780 int line_time_us
, line_count
;
781 int entries
, tlb_miss
;
783 crtc
= intel_get_crtc_for_plane(dev_priv
, plane
);
784 if (!intel_crtc_active(crtc
)) {
785 *cursor_wm
= cursor
->guard_size
;
786 *plane_wm
= display
->guard_size
;
790 adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
791 fb
= crtc
->base
.primary
->state
->fb
;
792 clock
= adjusted_mode
->crtc_clock
;
793 htotal
= adjusted_mode
->crtc_htotal
;
794 hdisplay
= crtc
->config
->pipe_src_w
;
795 cpp
= fb
->format
->cpp
[0];
797 /* Use the small buffer method to calculate plane watermark */
798 entries
= ((clock
* cpp
/ 1000) * display_latency_ns
) / 1000;
799 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
802 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
803 *plane_wm
= entries
+ display
->guard_size
;
804 if (*plane_wm
> (int)display
->max_wm
)
805 *plane_wm
= display
->max_wm
;
807 /* Use the large buffer method to calculate cursor watermark */
808 line_time_us
= max(htotal
* 1000 / clock
, 1);
809 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
810 entries
= line_count
* crtc
->base
.cursor
->state
->crtc_w
* cpp
;
811 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
814 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
815 *cursor_wm
= entries
+ cursor
->guard_size
;
816 if (*cursor_wm
> (int)cursor
->max_wm
)
817 *cursor_wm
= (int)cursor
->max_wm
;
823 * Check the wm result.
825 * If any calculated watermark values is larger than the maximum value that
826 * can be programmed into the associated watermark register, that watermark
829 static bool g4x_check_srwm(struct drm_i915_private
*dev_priv
,
830 int display_wm
, int cursor_wm
,
831 const struct intel_watermark_params
*display
,
832 const struct intel_watermark_params
*cursor
)
834 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
835 display_wm
, cursor_wm
);
837 if (display_wm
> display
->max_wm
) {
838 DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
839 display_wm
, display
->max_wm
);
843 if (cursor_wm
> cursor
->max_wm
) {
844 DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
845 cursor_wm
, cursor
->max_wm
);
849 if (!(display_wm
|| cursor_wm
)) {
850 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
857 static bool g4x_compute_srwm(struct drm_i915_private
*dev_priv
,
860 const struct intel_watermark_params
*display
,
861 const struct intel_watermark_params
*cursor
,
862 int *display_wm
, int *cursor_wm
)
864 struct intel_crtc
*crtc
;
865 const struct drm_display_mode
*adjusted_mode
;
866 const struct drm_framebuffer
*fb
;
867 int hdisplay
, htotal
, cpp
, clock
;
868 unsigned long line_time_us
;
869 int line_count
, line_size
;
874 *display_wm
= *cursor_wm
= 0;
878 crtc
= intel_get_crtc_for_plane(dev_priv
, plane
);
879 adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
880 fb
= crtc
->base
.primary
->state
->fb
;
881 clock
= adjusted_mode
->crtc_clock
;
882 htotal
= adjusted_mode
->crtc_htotal
;
883 hdisplay
= crtc
->config
->pipe_src_w
;
884 cpp
= fb
->format
->cpp
[0];
886 line_time_us
= max(htotal
* 1000 / clock
, 1);
887 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
888 line_size
= hdisplay
* cpp
;
890 /* Use the minimum of the small and large buffer method for primary */
891 small
= ((clock
* cpp
/ 1000) * latency_ns
) / 1000;
892 large
= line_count
* line_size
;
894 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
895 *display_wm
= entries
+ display
->guard_size
;
897 /* calculate the self-refresh watermark for display cursor */
898 entries
= line_count
* cpp
* crtc
->base
.cursor
->state
->crtc_w
;
899 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
900 *cursor_wm
= entries
+ cursor
->guard_size
;
902 return g4x_check_srwm(dev_priv
,
903 *display_wm
, *cursor_wm
,
907 #define FW_WM_VLV(value, plane) \
908 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
910 static void vlv_write_wm_values(struct drm_i915_private
*dev_priv
,
911 const struct vlv_wm_values
*wm
)
915 for_each_pipe(dev_priv
, pipe
) {
916 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv
, pipe
), wm
);
918 I915_WRITE(VLV_DDL(pipe
),
919 (wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] << DDL_CURSOR_SHIFT
) |
920 (wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] << DDL_SPRITE_SHIFT(1)) |
921 (wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] << DDL_SPRITE_SHIFT(0)) |
922 (wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] << DDL_PLANE_SHIFT
));
926 * Zero the (unused) WM1 watermarks, and also clear all the
927 * high order bits so that there are no out of bounds values
928 * present in the registers during the reprogramming.
930 I915_WRITE(DSPHOWM
, 0);
931 I915_WRITE(DSPHOWM1
, 0);
932 I915_WRITE(DSPFW4
, 0);
933 I915_WRITE(DSPFW5
, 0);
934 I915_WRITE(DSPFW6
, 0);
937 FW_WM(wm
->sr
.plane
, SR
) |
938 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
], CURSORB
) |
939 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
], PLANEB
) |
940 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
], PLANEA
));
942 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
], SPRITEB
) |
943 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
], CURSORA
) |
944 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
], SPRITEA
));
946 FW_WM(wm
->sr
.cursor
, CURSOR_SR
));
948 if (IS_CHERRYVIEW(dev_priv
)) {
949 I915_WRITE(DSPFW7_CHV
,
950 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
], SPRITED
) |
951 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEC
));
952 I915_WRITE(DSPFW8_CHV
,
953 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
], SPRITEF
) |
954 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
], SPRITEE
));
955 I915_WRITE(DSPFW9_CHV
,
956 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
], PLANEC
) |
957 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_CURSOR
], CURSORC
));
959 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
960 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] >> 8, SPRITEF_HI
) |
961 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] >> 8, SPRITEE_HI
) |
962 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] >> 8, PLANEC_HI
) |
963 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] >> 8, SPRITED_HI
) |
964 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] >> 8, SPRITEC_HI
) |
965 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] >> 8, PLANEB_HI
) |
966 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] >> 8, SPRITEB_HI
) |
967 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] >> 8, SPRITEA_HI
) |
968 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] >> 8, PLANEA_HI
));
971 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
], SPRITED
) |
972 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEC
));
974 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
975 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] >> 8, SPRITED_HI
) |
976 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] >> 8, SPRITEC_HI
) |
977 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] >> 8, PLANEB_HI
) |
978 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] >> 8, SPRITEB_HI
) |
979 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] >> 8, SPRITEA_HI
) |
980 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] >> 8, PLANEA_HI
));
983 POSTING_READ(DSPFW1
);
988 /* latency must be in 0.1us units. */
989 static unsigned int vlv_wm_method2(unsigned int pixel_rate
,
990 unsigned int pipe_htotal
,
991 unsigned int horiz_pixels
,
993 unsigned int latency
)
997 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
998 ret
= (ret
+ 1) * horiz_pixels
* cpp
;
999 ret
= DIV_ROUND_UP(ret
, 64);
1004 static void vlv_setup_wm_latency(struct drm_i915_private
*dev_priv
)
1006 /* all latencies in usec */
1007 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM2
] = 3;
1009 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM2
;
1011 if (IS_CHERRYVIEW(dev_priv
)) {
1012 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM5
] = 12;
1013 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_DDR_DVFS
] = 33;
1015 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_DDR_DVFS
;
1019 static uint16_t vlv_compute_wm_level(const struct intel_crtc_state
*crtc_state
,
1020 const struct intel_plane_state
*plane_state
,
1023 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
1024 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
1025 const struct drm_display_mode
*adjusted_mode
=
1026 &crtc_state
->base
.adjusted_mode
;
1027 int clock
, htotal
, cpp
, width
, wm
;
1029 if (dev_priv
->wm
.pri_latency
[level
] == 0)
1032 if (!plane_state
->base
.visible
)
1035 cpp
= plane_state
->base
.fb
->format
->cpp
[0];
1036 clock
= adjusted_mode
->crtc_clock
;
1037 htotal
= adjusted_mode
->crtc_htotal
;
1038 width
= crtc_state
->pipe_src_w
;
1039 if (WARN_ON(htotal
== 0))
1042 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
1044 * FIXME the formula gives values that are
1045 * too big for the cursor FIFO, and hence we
1046 * would never be able to use cursors. For
1047 * now just hardcode the watermark.
1051 wm
= vlv_wm_method2(clock
, htotal
, width
, cpp
,
1052 dev_priv
->wm
.pri_latency
[level
] * 10);
1055 return min_t(int, wm
, USHRT_MAX
);
1058 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes
)
1060 return (active_planes
& (BIT(PLANE_SPRITE0
) |
1061 BIT(PLANE_SPRITE1
))) == BIT(PLANE_SPRITE1
);
1064 static int vlv_compute_fifo(struct intel_crtc_state
*crtc_state
)
1066 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1067 const struct vlv_pipe_wm
*raw
=
1068 &crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM2
];
1069 struct vlv_fifo_state
*fifo_state
= &crtc_state
->wm
.vlv
.fifo_state
;
1070 unsigned int active_planes
= crtc_state
->active_planes
& ~BIT(PLANE_CURSOR
);
1071 int num_active_planes
= hweight32(active_planes
);
1072 const int fifo_size
= 511;
1073 int fifo_extra
, fifo_left
= fifo_size
;
1074 int sprite0_fifo_extra
= 0;
1075 unsigned int total_rate
;
1076 enum plane_id plane_id
;
1079 * When enabling sprite0 after sprite1 has already been enabled
1080 * we tend to get an underrun unless sprite0 already has some
1081 * FIFO space allcoated. Hence we always allocate at least one
1082 * cacheline for sprite0 whenever sprite1 is enabled.
1084 * All other plane enable sequences appear immune to this problem.
1086 if (vlv_need_sprite0_fifo_workaround(active_planes
))
1087 sprite0_fifo_extra
= 1;
1089 total_rate
= raw
->plane
[PLANE_PRIMARY
] +
1090 raw
->plane
[PLANE_SPRITE0
] +
1091 raw
->plane
[PLANE_SPRITE1
] +
1094 if (total_rate
> fifo_size
)
1097 if (total_rate
== 0)
1100 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1103 if ((active_planes
& BIT(plane_id
)) == 0) {
1104 fifo_state
->plane
[plane_id
] = 0;
1108 rate
= raw
->plane
[plane_id
];
1109 fifo_state
->plane
[plane_id
] = fifo_size
* rate
/ total_rate
;
1110 fifo_left
-= fifo_state
->plane
[plane_id
];
1113 fifo_state
->plane
[PLANE_SPRITE0
] += sprite0_fifo_extra
;
1114 fifo_left
-= sprite0_fifo_extra
;
1116 fifo_state
->plane
[PLANE_CURSOR
] = 63;
1118 fifo_extra
= DIV_ROUND_UP(fifo_left
, num_active_planes
?: 1);
1120 /* spread the remainder evenly */
1121 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1127 if ((active_planes
& BIT(plane_id
)) == 0)
1130 plane_extra
= min(fifo_extra
, fifo_left
);
1131 fifo_state
->plane
[plane_id
] += plane_extra
;
1132 fifo_left
-= plane_extra
;
1135 WARN_ON(active_planes
!= 0 && fifo_left
!= 0);
1137 /* give it all to the first plane if none are active */
1138 if (active_planes
== 0) {
1139 WARN_ON(fifo_left
!= fifo_size
);
1140 fifo_state
->plane
[PLANE_PRIMARY
] = fifo_left
;
1146 static int vlv_num_wm_levels(struct drm_i915_private
*dev_priv
)
1148 return dev_priv
->wm
.max_level
+ 1;
1151 /* mark all levels starting from 'level' as invalid */
1152 static void vlv_invalidate_wms(struct intel_crtc
*crtc
,
1153 struct vlv_wm_state
*wm_state
, int level
)
1155 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1157 for (; level
< vlv_num_wm_levels(dev_priv
); level
++) {
1158 enum plane_id plane_id
;
1160 for_each_plane_id_on_crtc(crtc
, plane_id
)
1161 wm_state
->wm
[level
].plane
[plane_id
] = USHRT_MAX
;
1163 wm_state
->sr
[level
].cursor
= USHRT_MAX
;
1164 wm_state
->sr
[level
].plane
= USHRT_MAX
;
1168 static u16
vlv_invert_wm_value(u16 wm
, u16 fifo_size
)
1173 return fifo_size
- wm
;
1177 * Starting from 'level' set all higher
1178 * levels to 'value' in the "raw" watermarks.
1180 static bool vlv_raw_plane_wm_set(struct intel_crtc_state
*crtc_state
,
1181 int level
, enum plane_id plane_id
, u16 value
)
1183 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1184 int num_levels
= vlv_num_wm_levels(dev_priv
);
1187 for (; level
< num_levels
; level
++) {
1188 struct vlv_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1190 dirty
|= raw
->plane
[plane_id
] != value
;
1191 raw
->plane
[plane_id
] = value
;
1197 static bool vlv_plane_wm_compute(struct intel_crtc_state
*crtc_state
,
1198 const struct intel_plane_state
*plane_state
)
1200 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
1201 enum plane_id plane_id
= plane
->id
;
1202 int num_levels
= vlv_num_wm_levels(to_i915(plane
->base
.dev
));
1206 if (!plane_state
->base
.visible
) {
1207 dirty
|= vlv_raw_plane_wm_set(crtc_state
, 0, plane_id
, 0);
1211 for (level
= 0; level
< num_levels
; level
++) {
1212 struct vlv_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1213 int wm
= vlv_compute_wm_level(crtc_state
, plane_state
, level
);
1214 int max_wm
= plane_id
== PLANE_CURSOR
? 63 : 511;
1219 dirty
|= raw
->plane
[plane_id
] != wm
;
1220 raw
->plane
[plane_id
] = wm
;
1223 /* mark all higher levels as invalid */
1224 dirty
|= vlv_raw_plane_wm_set(crtc_state
, level
, plane_id
, USHRT_MAX
);
1228 DRM_DEBUG_KMS("%s wms: [0]=%d,[1]=%d,[2]=%d\n",
1230 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM2
].plane
[plane_id
],
1231 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM5
].plane
[plane_id
],
1232 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_DDR_DVFS
].plane
[plane_id
]);
1237 static bool vlv_plane_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1238 enum plane_id plane_id
, int level
)
1240 const struct vlv_pipe_wm
*raw
=
1241 &crtc_state
->wm
.vlv
.raw
[level
];
1242 const struct vlv_fifo_state
*fifo_state
=
1243 &crtc_state
->wm
.vlv
.fifo_state
;
1245 return raw
->plane
[plane_id
] <= fifo_state
->plane
[plane_id
];
1248 static bool vlv_crtc_wm_is_valid(const struct intel_crtc_state
*crtc_state
, int level
)
1250 return vlv_plane_wm_is_valid(crtc_state
, PLANE_PRIMARY
, level
) &&
1251 vlv_plane_wm_is_valid(crtc_state
, PLANE_SPRITE0
, level
) &&
1252 vlv_plane_wm_is_valid(crtc_state
, PLANE_SPRITE1
, level
) &&
1253 vlv_plane_wm_is_valid(crtc_state
, PLANE_CURSOR
, level
);
1256 static int vlv_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
1258 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1259 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1260 struct intel_atomic_state
*state
=
1261 to_intel_atomic_state(crtc_state
->base
.state
);
1262 struct vlv_wm_state
*wm_state
= &crtc_state
->wm
.vlv
.optimal
;
1263 const struct vlv_fifo_state
*fifo_state
=
1264 &crtc_state
->wm
.vlv
.fifo_state
;
1265 int num_active_planes
= hweight32(crtc_state
->active_planes
&
1266 ~BIT(PLANE_CURSOR
));
1267 bool needs_modeset
= drm_atomic_crtc_needs_modeset(&crtc_state
->base
);
1268 struct intel_plane_state
*plane_state
;
1269 struct intel_plane
*plane
;
1270 enum plane_id plane_id
;
1272 unsigned int dirty
= 0;
1274 for_each_intel_plane_in_state(state
, plane
, plane_state
, i
) {
1275 const struct intel_plane_state
*old_plane_state
=
1276 to_intel_plane_state(plane
->base
.state
);
1278 if (plane_state
->base
.crtc
!= &crtc
->base
&&
1279 old_plane_state
->base
.crtc
!= &crtc
->base
)
1282 if (vlv_plane_wm_compute(crtc_state
, plane_state
))
1283 dirty
|= BIT(plane
->id
);
1287 * DSPARB registers may have been reset due to the
1288 * power well being turned off. Make sure we restore
1289 * them to a consistent state even if no primary/sprite
1290 * planes are initially active.
1293 crtc_state
->fifo_changed
= true;
1298 /* cursor changes don't warrant a FIFO recompute */
1299 if (dirty
& ~BIT(PLANE_CURSOR
)) {
1300 const struct intel_crtc_state
*old_crtc_state
=
1301 to_intel_crtc_state(crtc
->base
.state
);
1302 const struct vlv_fifo_state
*old_fifo_state
=
1303 &old_crtc_state
->wm
.vlv
.fifo_state
;
1305 ret
= vlv_compute_fifo(crtc_state
);
1309 if (needs_modeset
||
1310 memcmp(old_fifo_state
, fifo_state
,
1311 sizeof(*fifo_state
)) != 0)
1312 crtc_state
->fifo_changed
= true;
1315 /* initially allow all levels */
1316 wm_state
->num_levels
= vlv_num_wm_levels(dev_priv
);
1318 * Note that enabling cxsr with no primary/sprite planes
1319 * enabled can wedge the pipe. Hence we only allow cxsr
1320 * with exactly one enabled primary/sprite plane.
1322 wm_state
->cxsr
= crtc
->pipe
!= PIPE_C
&& num_active_planes
== 1;
1324 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1325 const struct vlv_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1326 const int sr_fifo_size
= INTEL_INFO(dev_priv
)->num_pipes
* 512 - 1;
1328 if (!vlv_crtc_wm_is_valid(crtc_state
, level
))
1331 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1332 wm_state
->wm
[level
].plane
[plane_id
] =
1333 vlv_invert_wm_value(raw
->plane
[plane_id
],
1334 fifo_state
->plane
[plane_id
]);
1337 wm_state
->sr
[level
].plane
=
1338 vlv_invert_wm_value(max3(raw
->plane
[PLANE_PRIMARY
],
1339 raw
->plane
[PLANE_SPRITE0
],
1340 raw
->plane
[PLANE_SPRITE1
]),
1343 wm_state
->sr
[level
].cursor
=
1344 vlv_invert_wm_value(raw
->plane
[PLANE_CURSOR
],
1351 /* limit to only levels we can actually handle */
1352 wm_state
->num_levels
= level
;
1354 /* invalidate the higher levels */
1355 vlv_invalidate_wms(crtc
, wm_state
, level
);
1360 #define VLV_FIFO(plane, value) \
1361 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1363 static void vlv_atomic_update_fifo(struct intel_atomic_state
*state
,
1364 struct intel_crtc_state
*crtc_state
)
1366 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1367 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1368 const struct vlv_fifo_state
*fifo_state
=
1369 &crtc_state
->wm
.vlv
.fifo_state
;
1370 int sprite0_start
, sprite1_start
, fifo_size
;
1372 if (!crtc_state
->fifo_changed
)
1375 sprite0_start
= fifo_state
->plane
[PLANE_PRIMARY
];
1376 sprite1_start
= fifo_state
->plane
[PLANE_SPRITE0
] + sprite0_start
;
1377 fifo_size
= fifo_state
->plane
[PLANE_SPRITE1
] + sprite1_start
;
1379 WARN_ON(fifo_state
->plane
[PLANE_CURSOR
] != 63);
1380 WARN_ON(fifo_size
!= 511);
1382 trace_vlv_fifo_size(crtc
, sprite0_start
, sprite1_start
, fifo_size
);
1385 * uncore.lock serves a double purpose here. It allows us to
1386 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1387 * it protects the DSPARB registers from getting clobbered by
1388 * parallel updates from multiple pipes.
1390 * intel_pipe_update_start() has already disabled interrupts
1391 * for us, so a plain spin_lock() is sufficient here.
1393 spin_lock(&dev_priv
->uncore
.lock
);
1395 switch (crtc
->pipe
) {
1396 uint32_t dsparb
, dsparb2
, dsparb3
;
1398 dsparb
= I915_READ_FW(DSPARB
);
1399 dsparb2
= I915_READ_FW(DSPARB2
);
1401 dsparb
&= ~(VLV_FIFO(SPRITEA
, 0xff) |
1402 VLV_FIFO(SPRITEB
, 0xff));
1403 dsparb
|= (VLV_FIFO(SPRITEA
, sprite0_start
) |
1404 VLV_FIFO(SPRITEB
, sprite1_start
));
1406 dsparb2
&= ~(VLV_FIFO(SPRITEA_HI
, 0x1) |
1407 VLV_FIFO(SPRITEB_HI
, 0x1));
1408 dsparb2
|= (VLV_FIFO(SPRITEA_HI
, sprite0_start
>> 8) |
1409 VLV_FIFO(SPRITEB_HI
, sprite1_start
>> 8));
1411 I915_WRITE_FW(DSPARB
, dsparb
);
1412 I915_WRITE_FW(DSPARB2
, dsparb2
);
1415 dsparb
= I915_READ_FW(DSPARB
);
1416 dsparb2
= I915_READ_FW(DSPARB2
);
1418 dsparb
&= ~(VLV_FIFO(SPRITEC
, 0xff) |
1419 VLV_FIFO(SPRITED
, 0xff));
1420 dsparb
|= (VLV_FIFO(SPRITEC
, sprite0_start
) |
1421 VLV_FIFO(SPRITED
, sprite1_start
));
1423 dsparb2
&= ~(VLV_FIFO(SPRITEC_HI
, 0xff) |
1424 VLV_FIFO(SPRITED_HI
, 0xff));
1425 dsparb2
|= (VLV_FIFO(SPRITEC_HI
, sprite0_start
>> 8) |
1426 VLV_FIFO(SPRITED_HI
, sprite1_start
>> 8));
1428 I915_WRITE_FW(DSPARB
, dsparb
);
1429 I915_WRITE_FW(DSPARB2
, dsparb2
);
1432 dsparb3
= I915_READ_FW(DSPARB3
);
1433 dsparb2
= I915_READ_FW(DSPARB2
);
1435 dsparb3
&= ~(VLV_FIFO(SPRITEE
, 0xff) |
1436 VLV_FIFO(SPRITEF
, 0xff));
1437 dsparb3
|= (VLV_FIFO(SPRITEE
, sprite0_start
) |
1438 VLV_FIFO(SPRITEF
, sprite1_start
));
1440 dsparb2
&= ~(VLV_FIFO(SPRITEE_HI
, 0xff) |
1441 VLV_FIFO(SPRITEF_HI
, 0xff));
1442 dsparb2
|= (VLV_FIFO(SPRITEE_HI
, sprite0_start
>> 8) |
1443 VLV_FIFO(SPRITEF_HI
, sprite1_start
>> 8));
1445 I915_WRITE_FW(DSPARB3
, dsparb3
);
1446 I915_WRITE_FW(DSPARB2
, dsparb2
);
1452 POSTING_READ_FW(DSPARB
);
1454 spin_unlock(&dev_priv
->uncore
.lock
);
1459 static int vlv_compute_intermediate_wm(struct drm_device
*dev
,
1460 struct intel_crtc
*crtc
,
1461 struct intel_crtc_state
*crtc_state
)
1463 struct vlv_wm_state
*intermediate
= &crtc_state
->wm
.vlv
.intermediate
;
1464 const struct vlv_wm_state
*optimal
= &crtc_state
->wm
.vlv
.optimal
;
1465 const struct vlv_wm_state
*active
= &crtc
->wm
.active
.vlv
;
1468 intermediate
->num_levels
= min(optimal
->num_levels
, active
->num_levels
);
1469 intermediate
->cxsr
= optimal
->cxsr
&& active
->cxsr
&&
1470 !crtc_state
->disable_cxsr
;
1472 for (level
= 0; level
< intermediate
->num_levels
; level
++) {
1473 enum plane_id plane_id
;
1475 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1476 intermediate
->wm
[level
].plane
[plane_id
] =
1477 min(optimal
->wm
[level
].plane
[plane_id
],
1478 active
->wm
[level
].plane
[plane_id
]);
1481 intermediate
->sr
[level
].plane
= min(optimal
->sr
[level
].plane
,
1482 active
->sr
[level
].plane
);
1483 intermediate
->sr
[level
].cursor
= min(optimal
->sr
[level
].cursor
,
1484 active
->sr
[level
].cursor
);
1487 vlv_invalidate_wms(crtc
, intermediate
, level
);
1490 * If our intermediate WM are identical to the final WM, then we can
1491 * omit the post-vblank programming; only update if it's different.
1493 if (memcmp(intermediate
, optimal
, sizeof(*intermediate
)) != 0)
1494 crtc_state
->wm
.need_postvbl_update
= true;
1499 static void vlv_merge_wm(struct drm_i915_private
*dev_priv
,
1500 struct vlv_wm_values
*wm
)
1502 struct intel_crtc
*crtc
;
1503 int num_active_crtcs
= 0;
1505 wm
->level
= dev_priv
->wm
.max_level
;
1508 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1509 const struct vlv_wm_state
*wm_state
= &crtc
->wm
.active
.vlv
;
1514 if (!wm_state
->cxsr
)
1518 wm
->level
= min_t(int, wm
->level
, wm_state
->num_levels
- 1);
1521 if (num_active_crtcs
!= 1)
1524 if (num_active_crtcs
> 1)
1525 wm
->level
= VLV_WM_LEVEL_PM2
;
1527 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1528 const struct vlv_wm_state
*wm_state
= &crtc
->wm
.active
.vlv
;
1529 enum pipe pipe
= crtc
->pipe
;
1531 wm
->pipe
[pipe
] = wm_state
->wm
[wm
->level
];
1532 if (crtc
->active
&& wm
->cxsr
)
1533 wm
->sr
= wm_state
->sr
[wm
->level
];
1535 wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] = DDL_PRECISION_HIGH
| 2;
1536 wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] = DDL_PRECISION_HIGH
| 2;
1537 wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] = DDL_PRECISION_HIGH
| 2;
1538 wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] = DDL_PRECISION_HIGH
| 2;
1542 static bool is_disabling(int old
, int new, int threshold
)
1544 return old
>= threshold
&& new < threshold
;
1547 static bool is_enabling(int old
, int new, int threshold
)
1549 return old
< threshold
&& new >= threshold
;
1552 static void vlv_program_watermarks(struct drm_i915_private
*dev_priv
)
1554 struct vlv_wm_values
*old_wm
= &dev_priv
->wm
.vlv
;
1555 struct vlv_wm_values new_wm
= {};
1557 vlv_merge_wm(dev_priv
, &new_wm
);
1559 if (memcmp(old_wm
, &new_wm
, sizeof(new_wm
)) == 0)
1562 if (is_disabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_DDR_DVFS
))
1563 chv_set_memory_dvfs(dev_priv
, false);
1565 if (is_disabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_PM5
))
1566 chv_set_memory_pm5(dev_priv
, false);
1568 if (is_disabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
1569 _intel_set_memory_cxsr(dev_priv
, false);
1571 vlv_write_wm_values(dev_priv
, &new_wm
);
1573 if (is_enabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
1574 _intel_set_memory_cxsr(dev_priv
, true);
1576 if (is_enabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_PM5
))
1577 chv_set_memory_pm5(dev_priv
, true);
1579 if (is_enabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_DDR_DVFS
))
1580 chv_set_memory_dvfs(dev_priv
, true);
1585 static void vlv_initial_watermarks(struct intel_atomic_state
*state
,
1586 struct intel_crtc_state
*crtc_state
)
1588 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1589 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1591 mutex_lock(&dev_priv
->wm
.wm_mutex
);
1592 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.intermediate
;
1593 vlv_program_watermarks(dev_priv
);
1594 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
1597 static void vlv_optimize_watermarks(struct intel_atomic_state
*state
,
1598 struct intel_crtc_state
*crtc_state
)
1600 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1601 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1603 if (!crtc_state
->wm
.need_postvbl_update
)
1606 mutex_lock(&dev_priv
->wm
.wm_mutex
);
1607 intel_crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.optimal
;
1608 vlv_program_watermarks(dev_priv
);
1609 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
1612 #define single_plane_enabled(mask) is_power_of_2(mask)
1614 static void g4x_update_wm(struct intel_crtc
*crtc
)
1616 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1617 static const int sr_latency_ns
= 12000;
1618 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1619 int plane_sr
, cursor_sr
;
1620 unsigned int enabled
= 0;
1623 if (g4x_compute_wm0(dev_priv
, PIPE_A
,
1624 &g4x_wm_info
, pessimal_latency_ns
,
1625 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1626 &planea_wm
, &cursora_wm
))
1627 enabled
|= 1 << PIPE_A
;
1629 if (g4x_compute_wm0(dev_priv
, PIPE_B
,
1630 &g4x_wm_info
, pessimal_latency_ns
,
1631 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1632 &planeb_wm
, &cursorb_wm
))
1633 enabled
|= 1 << PIPE_B
;
1635 if (single_plane_enabled(enabled
) &&
1636 g4x_compute_srwm(dev_priv
, ffs(enabled
) - 1,
1639 &g4x_cursor_wm_info
,
1640 &plane_sr
, &cursor_sr
)) {
1641 cxsr_enabled
= true;
1643 cxsr_enabled
= false;
1644 intel_set_memory_cxsr(dev_priv
, false);
1645 plane_sr
= cursor_sr
= 0;
1648 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1649 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1650 planea_wm
, cursora_wm
,
1651 planeb_wm
, cursorb_wm
,
1652 plane_sr
, cursor_sr
);
1655 FW_WM(plane_sr
, SR
) |
1656 FW_WM(cursorb_wm
, CURSORB
) |
1657 FW_WM(planeb_wm
, PLANEB
) |
1658 FW_WM(planea_wm
, PLANEA
));
1660 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1661 FW_WM(cursora_wm
, CURSORA
));
1662 /* HPLL off in SR has some issues on G4x... disable it */
1664 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1665 FW_WM(cursor_sr
, CURSOR_SR
));
1668 intel_set_memory_cxsr(dev_priv
, true);
1671 static void i965_update_wm(struct intel_crtc
*unused_crtc
)
1673 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
1674 struct intel_crtc
*crtc
;
1679 /* Calc sr entries for one plane configs */
1680 crtc
= single_enabled_crtc(dev_priv
);
1682 /* self-refresh has much higher latency */
1683 static const int sr_latency_ns
= 12000;
1684 const struct drm_display_mode
*adjusted_mode
=
1685 &crtc
->config
->base
.adjusted_mode
;
1686 const struct drm_framebuffer
*fb
=
1687 crtc
->base
.primary
->state
->fb
;
1688 int clock
= adjusted_mode
->crtc_clock
;
1689 int htotal
= adjusted_mode
->crtc_htotal
;
1690 int hdisplay
= crtc
->config
->pipe_src_w
;
1691 int cpp
= fb
->format
->cpp
[0];
1692 unsigned long line_time_us
;
1695 line_time_us
= max(htotal
* 1000 / clock
, 1);
1697 /* Use ns/us then divide to preserve precision */
1698 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1700 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1701 srwm
= I965_FIFO_SIZE
- entries
;
1705 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1708 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1709 cpp
* crtc
->base
.cursor
->state
->crtc_w
;
1710 entries
= DIV_ROUND_UP(entries
,
1711 i965_cursor_wm_info
.cacheline_size
);
1712 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1713 (entries
+ i965_cursor_wm_info
.guard_size
);
1715 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1716 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1718 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1719 "cursor %d\n", srwm
, cursor_sr
);
1721 cxsr_enabled
= true;
1723 cxsr_enabled
= false;
1724 /* Turn off self refresh if both pipes are enabled */
1725 intel_set_memory_cxsr(dev_priv
, false);
1728 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1731 /* 965 has limitations... */
1732 I915_WRITE(DSPFW1
, FW_WM(srwm
, SR
) |
1736 I915_WRITE(DSPFW2
, FW_WM(8, CURSORA
) |
1737 FW_WM(8, PLANEC_OLD
));
1738 /* update cursor SR watermark */
1739 I915_WRITE(DSPFW3
, FW_WM(cursor_sr
, CURSOR_SR
));
1742 intel_set_memory_cxsr(dev_priv
, true);
1747 static void i9xx_update_wm(struct intel_crtc
*unused_crtc
)
1749 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
1750 const struct intel_watermark_params
*wm_info
;
1755 int planea_wm
, planeb_wm
;
1756 struct intel_crtc
*crtc
, *enabled
= NULL
;
1758 if (IS_I945GM(dev_priv
))
1759 wm_info
= &i945_wm_info
;
1760 else if (!IS_GEN2(dev_priv
))
1761 wm_info
= &i915_wm_info
;
1763 wm_info
= &i830_a_wm_info
;
1765 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, 0);
1766 crtc
= intel_get_crtc_for_plane(dev_priv
, 0);
1767 if (intel_crtc_active(crtc
)) {
1768 const struct drm_display_mode
*adjusted_mode
=
1769 &crtc
->config
->base
.adjusted_mode
;
1770 const struct drm_framebuffer
*fb
=
1771 crtc
->base
.primary
->state
->fb
;
1774 if (IS_GEN2(dev_priv
))
1777 cpp
= fb
->format
->cpp
[0];
1779 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1780 wm_info
, fifo_size
, cpp
,
1781 pessimal_latency_ns
);
1784 planea_wm
= fifo_size
- wm_info
->guard_size
;
1785 if (planea_wm
> (long)wm_info
->max_wm
)
1786 planea_wm
= wm_info
->max_wm
;
1789 if (IS_GEN2(dev_priv
))
1790 wm_info
= &i830_bc_wm_info
;
1792 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, 1);
1793 crtc
= intel_get_crtc_for_plane(dev_priv
, 1);
1794 if (intel_crtc_active(crtc
)) {
1795 const struct drm_display_mode
*adjusted_mode
=
1796 &crtc
->config
->base
.adjusted_mode
;
1797 const struct drm_framebuffer
*fb
=
1798 crtc
->base
.primary
->state
->fb
;
1801 if (IS_GEN2(dev_priv
))
1804 cpp
= fb
->format
->cpp
[0];
1806 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1807 wm_info
, fifo_size
, cpp
,
1808 pessimal_latency_ns
);
1809 if (enabled
== NULL
)
1814 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1815 if (planeb_wm
> (long)wm_info
->max_wm
)
1816 planeb_wm
= wm_info
->max_wm
;
1819 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1821 if (IS_I915GM(dev_priv
) && enabled
) {
1822 struct drm_i915_gem_object
*obj
;
1824 obj
= intel_fb_obj(enabled
->base
.primary
->state
->fb
);
1826 /* self-refresh seems busted with untiled */
1827 if (!i915_gem_object_is_tiled(obj
))
1832 * Overlay gets an aggressive default since video jitter is bad.
1836 /* Play safe and disable self-refresh before adjusting watermarks. */
1837 intel_set_memory_cxsr(dev_priv
, false);
1839 /* Calc sr entries for one plane configs */
1840 if (HAS_FW_BLC(dev_priv
) && enabled
) {
1841 /* self-refresh has much higher latency */
1842 static const int sr_latency_ns
= 6000;
1843 const struct drm_display_mode
*adjusted_mode
=
1844 &enabled
->config
->base
.adjusted_mode
;
1845 const struct drm_framebuffer
*fb
=
1846 enabled
->base
.primary
->state
->fb
;
1847 int clock
= adjusted_mode
->crtc_clock
;
1848 int htotal
= adjusted_mode
->crtc_htotal
;
1849 int hdisplay
= enabled
->config
->pipe_src_w
;
1851 unsigned long line_time_us
;
1854 if (IS_I915GM(dev_priv
) || IS_I945GM(dev_priv
))
1857 cpp
= fb
->format
->cpp
[0];
1859 line_time_us
= max(htotal
* 1000 / clock
, 1);
1861 /* Use ns/us then divide to preserve precision */
1862 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1864 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1865 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1866 srwm
= wm_info
->fifo_size
- entries
;
1870 if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
1871 I915_WRITE(FW_BLC_SELF
,
1872 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1874 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1877 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1878 planea_wm
, planeb_wm
, cwm
, srwm
);
1880 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1881 fwater_hi
= (cwm
& 0x1f);
1883 /* Set request length to 8 cachelines per fetch */
1884 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1885 fwater_hi
= fwater_hi
| (1 << 8);
1887 I915_WRITE(FW_BLC
, fwater_lo
);
1888 I915_WRITE(FW_BLC2
, fwater_hi
);
1891 intel_set_memory_cxsr(dev_priv
, true);
1894 static void i845_update_wm(struct intel_crtc
*unused_crtc
)
1896 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
1897 struct intel_crtc
*crtc
;
1898 const struct drm_display_mode
*adjusted_mode
;
1902 crtc
= single_enabled_crtc(dev_priv
);
1906 adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
1907 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1909 dev_priv
->display
.get_fifo_size(dev_priv
, 0),
1910 4, pessimal_latency_ns
);
1911 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1912 fwater_lo
|= (3<<8) | planea_wm
;
1914 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1916 I915_WRITE(FW_BLC
, fwater_lo
);
1919 /* latency must be in 0.1us units. */
1920 static uint32_t ilk_wm_method1(uint32_t pixel_rate
, uint8_t cpp
, uint32_t latency
)
1924 if (WARN(latency
== 0, "Latency value missing\n"))
1927 ret
= (uint64_t) pixel_rate
* cpp
* latency
;
1928 ret
= DIV_ROUND_UP_ULL(ret
, 64 * 10000) + 2;
1933 /* latency must be in 0.1us units. */
1934 static uint32_t ilk_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
1935 uint32_t horiz_pixels
, uint8_t cpp
,
1940 if (WARN(latency
== 0, "Latency value missing\n"))
1942 if (WARN_ON(!pipe_htotal
))
1945 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
1946 ret
= (ret
+ 1) * horiz_pixels
* cpp
;
1947 ret
= DIV_ROUND_UP(ret
, 64) + 2;
1951 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
1955 * Neither of these should be possible since this function shouldn't be
1956 * called if the CRTC is off or the plane is invisible. But let's be
1957 * extra paranoid to avoid a potential divide-by-zero if we screw up
1958 * elsewhere in the driver.
1962 if (WARN_ON(!horiz_pixels
))
1965 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* cpp
) + 2;
1968 struct ilk_wm_maximums
{
1976 * For both WM_PIPE and WM_LP.
1977 * mem_value must be in 0.1us units.
1979 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state
*cstate
,
1980 const struct intel_plane_state
*pstate
,
1984 uint32_t method1
, method2
;
1987 if (!intel_wm_plane_visible(cstate
, pstate
))
1990 cpp
= pstate
->base
.fb
->format
->cpp
[0];
1992 method1
= ilk_wm_method1(cstate
->pixel_rate
, cpp
, mem_value
);
1997 method2
= ilk_wm_method2(cstate
->pixel_rate
,
1998 cstate
->base
.adjusted_mode
.crtc_htotal
,
1999 drm_rect_width(&pstate
->base
.dst
),
2002 return min(method1
, method2
);
2006 * For both WM_PIPE and WM_LP.
2007 * mem_value must be in 0.1us units.
2009 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state
*cstate
,
2010 const struct intel_plane_state
*pstate
,
2013 uint32_t method1
, method2
;
2016 if (!intel_wm_plane_visible(cstate
, pstate
))
2019 cpp
= pstate
->base
.fb
->format
->cpp
[0];
2021 method1
= ilk_wm_method1(cstate
->pixel_rate
, cpp
, mem_value
);
2022 method2
= ilk_wm_method2(cstate
->pixel_rate
,
2023 cstate
->base
.adjusted_mode
.crtc_htotal
,
2024 drm_rect_width(&pstate
->base
.dst
),
2026 return min(method1
, method2
);
2030 * For both WM_PIPE and WM_LP.
2031 * mem_value must be in 0.1us units.
2033 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state
*cstate
,
2034 const struct intel_plane_state
*pstate
,
2039 if (!intel_wm_plane_visible(cstate
, pstate
))
2042 cpp
= pstate
->base
.fb
->format
->cpp
[0];
2044 return ilk_wm_method2(cstate
->pixel_rate
,
2045 cstate
->base
.adjusted_mode
.crtc_htotal
,
2046 pstate
->base
.crtc_w
, cpp
, mem_value
);
2049 /* Only for WM_LP. */
2050 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state
*cstate
,
2051 const struct intel_plane_state
*pstate
,
2056 if (!intel_wm_plane_visible(cstate
, pstate
))
2059 cpp
= pstate
->base
.fb
->format
->cpp
[0];
2061 return ilk_wm_fbc(pri_val
, drm_rect_width(&pstate
->base
.dst
), cpp
);
2065 ilk_display_fifo_size(const struct drm_i915_private
*dev_priv
)
2067 if (INTEL_GEN(dev_priv
) >= 8)
2069 else if (INTEL_GEN(dev_priv
) >= 7)
2076 ilk_plane_wm_reg_max(const struct drm_i915_private
*dev_priv
,
2077 int level
, bool is_sprite
)
2079 if (INTEL_GEN(dev_priv
) >= 8)
2080 /* BDW primary/sprite plane watermarks */
2081 return level
== 0 ? 255 : 2047;
2082 else if (INTEL_GEN(dev_priv
) >= 7)
2083 /* IVB/HSW primary/sprite plane watermarks */
2084 return level
== 0 ? 127 : 1023;
2085 else if (!is_sprite
)
2086 /* ILK/SNB primary plane watermarks */
2087 return level
== 0 ? 127 : 511;
2089 /* ILK/SNB sprite plane watermarks */
2090 return level
== 0 ? 63 : 255;
2094 ilk_cursor_wm_reg_max(const struct drm_i915_private
*dev_priv
, int level
)
2096 if (INTEL_GEN(dev_priv
) >= 7)
2097 return level
== 0 ? 63 : 255;
2099 return level
== 0 ? 31 : 63;
2102 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private
*dev_priv
)
2104 if (INTEL_GEN(dev_priv
) >= 8)
2110 /* Calculate the maximum primary/sprite plane watermark */
2111 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
2113 const struct intel_wm_config
*config
,
2114 enum intel_ddb_partitioning ddb_partitioning
,
2117 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2118 unsigned int fifo_size
= ilk_display_fifo_size(dev_priv
);
2120 /* if sprites aren't enabled, sprites get nothing */
2121 if (is_sprite
&& !config
->sprites_enabled
)
2124 /* HSW allows LP1+ watermarks even with multiple pipes */
2125 if (level
== 0 || config
->num_pipes_active
> 1) {
2126 fifo_size
/= INTEL_INFO(dev_priv
)->num_pipes
;
2129 * For some reason the non self refresh
2130 * FIFO size is only half of the self
2131 * refresh FIFO size on ILK/SNB.
2133 if (INTEL_GEN(dev_priv
) <= 6)
2137 if (config
->sprites_enabled
) {
2138 /* level 0 is always calculated with 1:1 split */
2139 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
2148 /* clamp to max that the registers can hold */
2149 return min(fifo_size
, ilk_plane_wm_reg_max(dev_priv
, level
, is_sprite
));
2152 /* Calculate the maximum cursor plane watermark */
2153 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
2155 const struct intel_wm_config
*config
)
2157 /* HSW LP1+ watermarks w/ multiple pipes */
2158 if (level
> 0 && config
->num_pipes_active
> 1)
2161 /* otherwise just report max that registers can hold */
2162 return ilk_cursor_wm_reg_max(to_i915(dev
), level
);
2165 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
2167 const struct intel_wm_config
*config
,
2168 enum intel_ddb_partitioning ddb_partitioning
,
2169 struct ilk_wm_maximums
*max
)
2171 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
2172 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
2173 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
2174 max
->fbc
= ilk_fbc_wm_reg_max(to_i915(dev
));
2177 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private
*dev_priv
,
2179 struct ilk_wm_maximums
*max
)
2181 max
->pri
= ilk_plane_wm_reg_max(dev_priv
, level
, false);
2182 max
->spr
= ilk_plane_wm_reg_max(dev_priv
, level
, true);
2183 max
->cur
= ilk_cursor_wm_reg_max(dev_priv
, level
);
2184 max
->fbc
= ilk_fbc_wm_reg_max(dev_priv
);
2187 static bool ilk_validate_wm_level(int level
,
2188 const struct ilk_wm_maximums
*max
,
2189 struct intel_wm_level
*result
)
2193 /* already determined to be invalid? */
2194 if (!result
->enable
)
2197 result
->enable
= result
->pri_val
<= max
->pri
&&
2198 result
->spr_val
<= max
->spr
&&
2199 result
->cur_val
<= max
->cur
;
2201 ret
= result
->enable
;
2204 * HACK until we can pre-compute everything,
2205 * and thus fail gracefully if LP0 watermarks
2208 if (level
== 0 && !result
->enable
) {
2209 if (result
->pri_val
> max
->pri
)
2210 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2211 level
, result
->pri_val
, max
->pri
);
2212 if (result
->spr_val
> max
->spr
)
2213 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2214 level
, result
->spr_val
, max
->spr
);
2215 if (result
->cur_val
> max
->cur
)
2216 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2217 level
, result
->cur_val
, max
->cur
);
2219 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
2220 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
2221 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
2222 result
->enable
= true;
2228 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2229 const struct intel_crtc
*intel_crtc
,
2231 struct intel_crtc_state
*cstate
,
2232 struct intel_plane_state
*pristate
,
2233 struct intel_plane_state
*sprstate
,
2234 struct intel_plane_state
*curstate
,
2235 struct intel_wm_level
*result
)
2237 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2238 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2239 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2241 /* WM1+ latency values stored in 0.5us units */
2249 result
->pri_val
= ilk_compute_pri_wm(cstate
, pristate
,
2250 pri_latency
, level
);
2251 result
->fbc_val
= ilk_compute_fbc_wm(cstate
, pristate
, result
->pri_val
);
2255 result
->spr_val
= ilk_compute_spr_wm(cstate
, sprstate
, spr_latency
);
2258 result
->cur_val
= ilk_compute_cur_wm(cstate
, curstate
, cur_latency
);
2260 result
->enable
= true;
2264 hsw_compute_linetime_wm(const struct intel_crtc_state
*cstate
)
2266 const struct intel_atomic_state
*intel_state
=
2267 to_intel_atomic_state(cstate
->base
.state
);
2268 const struct drm_display_mode
*adjusted_mode
=
2269 &cstate
->base
.adjusted_mode
;
2270 u32 linetime
, ips_linetime
;
2272 if (!cstate
->base
.active
)
2274 if (WARN_ON(adjusted_mode
->crtc_clock
== 0))
2276 if (WARN_ON(intel_state
->cdclk
.logical
.cdclk
== 0))
2279 /* The WM are computed with base on how long it takes to fill a single
2280 * row at the given clock rate, multiplied by 8.
2282 linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2283 adjusted_mode
->crtc_clock
);
2284 ips_linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2285 intel_state
->cdclk
.logical
.cdclk
);
2287 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2288 PIPE_WM_LINETIME_TIME(linetime
);
2291 static void intel_read_wm_latency(struct drm_i915_private
*dev_priv
,
2294 if (IS_GEN9(dev_priv
)) {
2297 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2299 /* read the first set of memory latencies[0:3] */
2300 val
= 0; /* data0 to be programmed to 0 for first set */
2301 mutex_lock(&dev_priv
->rps
.hw_lock
);
2302 ret
= sandybridge_pcode_read(dev_priv
,
2303 GEN9_PCODE_READ_MEM_LATENCY
,
2305 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2308 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2312 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2313 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2314 GEN9_MEM_LATENCY_LEVEL_MASK
;
2315 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2316 GEN9_MEM_LATENCY_LEVEL_MASK
;
2317 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2318 GEN9_MEM_LATENCY_LEVEL_MASK
;
2320 /* read the second set of memory latencies[4:7] */
2321 val
= 1; /* data0 to be programmed to 1 for second set */
2322 mutex_lock(&dev_priv
->rps
.hw_lock
);
2323 ret
= sandybridge_pcode_read(dev_priv
,
2324 GEN9_PCODE_READ_MEM_LATENCY
,
2326 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2328 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2332 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2333 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2334 GEN9_MEM_LATENCY_LEVEL_MASK
;
2335 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2336 GEN9_MEM_LATENCY_LEVEL_MASK
;
2337 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2338 GEN9_MEM_LATENCY_LEVEL_MASK
;
2341 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2342 * need to be disabled. We make sure to sanitize the values out
2343 * of the punit to satisfy this requirement.
2345 for (level
= 1; level
<= max_level
; level
++) {
2346 if (wm
[level
] == 0) {
2347 for (i
= level
+ 1; i
<= max_level
; i
++)
2354 * WaWmMemoryReadLatency:skl,glk
2356 * punit doesn't take into account the read latency so we need
2357 * to add 2us to the various latency levels we retrieve from the
2358 * punit when level 0 response data us 0us.
2362 for (level
= 1; level
<= max_level
; level
++) {
2369 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2370 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
2372 wm
[0] = (sskpd
>> 56) & 0xFF;
2374 wm
[0] = sskpd
& 0xF;
2375 wm
[1] = (sskpd
>> 4) & 0xFF;
2376 wm
[2] = (sskpd
>> 12) & 0xFF;
2377 wm
[3] = (sskpd
>> 20) & 0x1FF;
2378 wm
[4] = (sskpd
>> 32) & 0x1FF;
2379 } else if (INTEL_GEN(dev_priv
) >= 6) {
2380 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
2382 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2383 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2384 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2385 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2386 } else if (INTEL_GEN(dev_priv
) >= 5) {
2387 uint32_t mltr
= I915_READ(MLTR_ILK
);
2389 /* ILK primary LP0 latency is 700 ns */
2391 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2392 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2396 static void intel_fixup_spr_wm_latency(struct drm_i915_private
*dev_priv
,
2399 /* ILK sprite LP0 latency is 1300 ns */
2400 if (IS_GEN5(dev_priv
))
2404 static void intel_fixup_cur_wm_latency(struct drm_i915_private
*dev_priv
,
2407 /* ILK cursor LP0 latency is 1300 ns */
2408 if (IS_GEN5(dev_priv
))
2411 /* WaDoubleCursorLP3Latency:ivb */
2412 if (IS_IVYBRIDGE(dev_priv
))
2416 int ilk_wm_max_level(const struct drm_i915_private
*dev_priv
)
2418 /* how many WM levels are we expecting */
2419 if (INTEL_GEN(dev_priv
) >= 9)
2421 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2423 else if (INTEL_GEN(dev_priv
) >= 6)
2429 static void intel_print_wm_latency(struct drm_i915_private
*dev_priv
,
2431 const uint16_t wm
[8])
2433 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2435 for (level
= 0; level
<= max_level
; level
++) {
2436 unsigned int latency
= wm
[level
];
2439 DRM_ERROR("%s WM%d latency not provided\n",
2445 * - latencies are in us on gen9.
2446 * - before then, WM1+ latency values are in 0.5us units
2448 if (IS_GEN9(dev_priv
))
2453 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2454 name
, level
, wm
[level
],
2455 latency
/ 10, latency
% 10);
2459 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
2460 uint16_t wm
[5], uint16_t min
)
2462 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2467 wm
[0] = max(wm
[0], min
);
2468 for (level
= 1; level
<= max_level
; level
++)
2469 wm
[level
] = max_t(uint16_t, wm
[level
], DIV_ROUND_UP(min
, 5));
2474 static void snb_wm_latency_quirk(struct drm_i915_private
*dev_priv
)
2479 * The BIOS provided WM memory latency values are often
2480 * inadequate for high resolution displays. Adjust them.
2482 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
2483 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
2484 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
2489 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2490 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
2491 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
2492 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
2495 static void ilk_setup_wm_latency(struct drm_i915_private
*dev_priv
)
2497 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
);
2499 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
2500 sizeof(dev_priv
->wm
.pri_latency
));
2501 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
2502 sizeof(dev_priv
->wm
.pri_latency
));
2504 intel_fixup_spr_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
);
2505 intel_fixup_cur_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
);
2507 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
2508 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
2509 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
2511 if (IS_GEN6(dev_priv
))
2512 snb_wm_latency_quirk(dev_priv
);
2515 static void skl_setup_wm_latency(struct drm_i915_private
*dev_priv
)
2517 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.skl_latency
);
2518 intel_print_wm_latency(dev_priv
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
2521 static bool ilk_validate_pipe_wm(struct drm_device
*dev
,
2522 struct intel_pipe_wm
*pipe_wm
)
2524 /* LP0 watermark maximums depend on this pipe alone */
2525 const struct intel_wm_config config
= {
2526 .num_pipes_active
= 1,
2527 .sprites_enabled
= pipe_wm
->sprites_enabled
,
2528 .sprites_scaled
= pipe_wm
->sprites_scaled
,
2530 struct ilk_wm_maximums max
;
2532 /* LP0 watermarks always use 1/2 DDB partitioning */
2533 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
2535 /* At least LP0 must be valid */
2536 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0])) {
2537 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2544 /* Compute new watermarks for the pipe */
2545 static int ilk_compute_pipe_wm(struct intel_crtc_state
*cstate
)
2547 struct drm_atomic_state
*state
= cstate
->base
.state
;
2548 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
2549 struct intel_pipe_wm
*pipe_wm
;
2550 struct drm_device
*dev
= state
->dev
;
2551 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
2552 struct intel_plane
*intel_plane
;
2553 struct intel_plane_state
*pristate
= NULL
;
2554 struct intel_plane_state
*sprstate
= NULL
;
2555 struct intel_plane_state
*curstate
= NULL
;
2556 int level
, max_level
= ilk_wm_max_level(dev_priv
), usable_level
;
2557 struct ilk_wm_maximums max
;
2559 pipe_wm
= &cstate
->wm
.ilk
.optimal
;
2561 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
2562 struct intel_plane_state
*ps
;
2564 ps
= intel_atomic_get_existing_plane_state(state
,
2569 if (intel_plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
2571 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_OVERLAY
)
2573 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
2577 pipe_wm
->pipe_enabled
= cstate
->base
.active
;
2579 pipe_wm
->sprites_enabled
= sprstate
->base
.visible
;
2580 pipe_wm
->sprites_scaled
= sprstate
->base
.visible
&&
2581 (drm_rect_width(&sprstate
->base
.dst
) != drm_rect_width(&sprstate
->base
.src
) >> 16 ||
2582 drm_rect_height(&sprstate
->base
.dst
) != drm_rect_height(&sprstate
->base
.src
) >> 16);
2585 usable_level
= max_level
;
2587 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2588 if (INTEL_GEN(dev_priv
) <= 6 && pipe_wm
->sprites_enabled
)
2591 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2592 if (pipe_wm
->sprites_scaled
)
2595 ilk_compute_wm_level(dev_priv
, intel_crtc
, 0, cstate
,
2596 pristate
, sprstate
, curstate
, &pipe_wm
->raw_wm
[0]);
2598 memset(&pipe_wm
->wm
, 0, sizeof(pipe_wm
->wm
));
2599 pipe_wm
->wm
[0] = pipe_wm
->raw_wm
[0];
2601 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2602 pipe_wm
->linetime
= hsw_compute_linetime_wm(cstate
);
2604 if (!ilk_validate_pipe_wm(dev
, pipe_wm
))
2607 ilk_compute_wm_reg_maximums(dev_priv
, 1, &max
);
2609 for (level
= 1; level
<= max_level
; level
++) {
2610 struct intel_wm_level
*wm
= &pipe_wm
->raw_wm
[level
];
2612 ilk_compute_wm_level(dev_priv
, intel_crtc
, level
, cstate
,
2613 pristate
, sprstate
, curstate
, wm
);
2616 * Disable any watermark level that exceeds the
2617 * register maximums since such watermarks are
2620 if (level
> usable_level
)
2623 if (ilk_validate_wm_level(level
, &max
, wm
))
2624 pipe_wm
->wm
[level
] = *wm
;
2626 usable_level
= level
;
2633 * Build a set of 'intermediate' watermark values that satisfy both the old
2634 * state and the new state. These can be programmed to the hardware
2637 static int ilk_compute_intermediate_wm(struct drm_device
*dev
,
2638 struct intel_crtc
*intel_crtc
,
2639 struct intel_crtc_state
*newstate
)
2641 struct intel_pipe_wm
*a
= &newstate
->wm
.ilk
.intermediate
;
2642 struct intel_pipe_wm
*b
= &intel_crtc
->wm
.active
.ilk
;
2643 int level
, max_level
= ilk_wm_max_level(to_i915(dev
));
2646 * Start with the final, target watermarks, then combine with the
2647 * currently active watermarks to get values that are safe both before
2648 * and after the vblank.
2650 *a
= newstate
->wm
.ilk
.optimal
;
2651 a
->pipe_enabled
|= b
->pipe_enabled
;
2652 a
->sprites_enabled
|= b
->sprites_enabled
;
2653 a
->sprites_scaled
|= b
->sprites_scaled
;
2655 for (level
= 0; level
<= max_level
; level
++) {
2656 struct intel_wm_level
*a_wm
= &a
->wm
[level
];
2657 const struct intel_wm_level
*b_wm
= &b
->wm
[level
];
2659 a_wm
->enable
&= b_wm
->enable
;
2660 a_wm
->pri_val
= max(a_wm
->pri_val
, b_wm
->pri_val
);
2661 a_wm
->spr_val
= max(a_wm
->spr_val
, b_wm
->spr_val
);
2662 a_wm
->cur_val
= max(a_wm
->cur_val
, b_wm
->cur_val
);
2663 a_wm
->fbc_val
= max(a_wm
->fbc_val
, b_wm
->fbc_val
);
2667 * We need to make sure that these merged watermark values are
2668 * actually a valid configuration themselves. If they're not,
2669 * there's no safe way to transition from the old state to
2670 * the new state, so we need to fail the atomic transaction.
2672 if (!ilk_validate_pipe_wm(dev
, a
))
2676 * If our intermediate WM are identical to the final WM, then we can
2677 * omit the post-vblank programming; only update if it's different.
2679 if (memcmp(a
, &newstate
->wm
.ilk
.optimal
, sizeof(*a
)) != 0)
2680 newstate
->wm
.need_postvbl_update
= true;
2686 * Merge the watermarks from all active pipes for a specific level.
2688 static void ilk_merge_wm_level(struct drm_device
*dev
,
2690 struct intel_wm_level
*ret_wm
)
2692 const struct intel_crtc
*intel_crtc
;
2694 ret_wm
->enable
= true;
2696 for_each_intel_crtc(dev
, intel_crtc
) {
2697 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
.ilk
;
2698 const struct intel_wm_level
*wm
= &active
->wm
[level
];
2700 if (!active
->pipe_enabled
)
2704 * The watermark values may have been used in the past,
2705 * so we must maintain them in the registers for some
2706 * time even if the level is now disabled.
2709 ret_wm
->enable
= false;
2711 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
2712 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
2713 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
2714 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
2719 * Merge all low power watermarks for all active pipes.
2721 static void ilk_wm_merge(struct drm_device
*dev
,
2722 const struct intel_wm_config
*config
,
2723 const struct ilk_wm_maximums
*max
,
2724 struct intel_pipe_wm
*merged
)
2726 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2727 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2728 int last_enabled_level
= max_level
;
2730 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2731 if ((INTEL_GEN(dev_priv
) <= 6 || IS_IVYBRIDGE(dev_priv
)) &&
2732 config
->num_pipes_active
> 1)
2733 last_enabled_level
= 0;
2735 /* ILK: FBC WM must be disabled always */
2736 merged
->fbc_wm_enabled
= INTEL_GEN(dev_priv
) >= 6;
2738 /* merge each WM1+ level */
2739 for (level
= 1; level
<= max_level
; level
++) {
2740 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2742 ilk_merge_wm_level(dev
, level
, wm
);
2744 if (level
> last_enabled_level
)
2746 else if (!ilk_validate_wm_level(level
, max
, wm
))
2747 /* make sure all following levels get disabled */
2748 last_enabled_level
= level
- 1;
2751 * The spec says it is preferred to disable
2752 * FBC WMs instead of disabling a WM level.
2754 if (wm
->fbc_val
> max
->fbc
) {
2756 merged
->fbc_wm_enabled
= false;
2761 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2763 * FIXME this is racy. FBC might get enabled later.
2764 * What we should check here is whether FBC can be
2765 * enabled sometime later.
2767 if (IS_GEN5(dev_priv
) && !merged
->fbc_wm_enabled
&&
2768 intel_fbc_is_active(dev_priv
)) {
2769 for (level
= 2; level
<= max_level
; level
++) {
2770 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2777 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
2779 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2780 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
2783 /* The value we need to program into the WM_LPx latency field */
2784 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
2786 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2788 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2791 return dev_priv
->wm
.pri_latency
[level
];
2794 static void ilk_compute_wm_results(struct drm_device
*dev
,
2795 const struct intel_pipe_wm
*merged
,
2796 enum intel_ddb_partitioning partitioning
,
2797 struct ilk_wm_values
*results
)
2799 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2800 struct intel_crtc
*intel_crtc
;
2803 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
2804 results
->partitioning
= partitioning
;
2806 /* LP1+ register values */
2807 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2808 const struct intel_wm_level
*r
;
2810 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
2812 r
= &merged
->wm
[level
];
2815 * Maintain the watermark values even if the level is
2816 * disabled. Doing otherwise could cause underruns.
2818 results
->wm_lp
[wm_lp
- 1] =
2819 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
2820 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
2824 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
2826 if (INTEL_GEN(dev_priv
) >= 8)
2827 results
->wm_lp
[wm_lp
- 1] |=
2828 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
2830 results
->wm_lp
[wm_lp
- 1] |=
2831 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
2834 * Always set WM1S_LP_EN when spr_val != 0, even if the
2835 * level is disabled. Doing otherwise could cause underruns.
2837 if (INTEL_GEN(dev_priv
) <= 6 && r
->spr_val
) {
2838 WARN_ON(wm_lp
!= 1);
2839 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
2841 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
2844 /* LP0 register values */
2845 for_each_intel_crtc(dev
, intel_crtc
) {
2846 enum pipe pipe
= intel_crtc
->pipe
;
2847 const struct intel_wm_level
*r
=
2848 &intel_crtc
->wm
.active
.ilk
.wm
[0];
2850 if (WARN_ON(!r
->enable
))
2853 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.ilk
.linetime
;
2855 results
->wm_pipe
[pipe
] =
2856 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
2857 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
2862 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2863 * case both are at the same level. Prefer r1 in case they're the same. */
2864 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
2865 struct intel_pipe_wm
*r1
,
2866 struct intel_pipe_wm
*r2
)
2868 int level
, max_level
= ilk_wm_max_level(to_i915(dev
));
2869 int level1
= 0, level2
= 0;
2871 for (level
= 1; level
<= max_level
; level
++) {
2872 if (r1
->wm
[level
].enable
)
2874 if (r2
->wm
[level
].enable
)
2878 if (level1
== level2
) {
2879 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
2883 } else if (level1
> level2
) {
2890 /* dirty bits used to track which watermarks need changes */
2891 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2892 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2893 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2894 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2895 #define WM_DIRTY_FBC (1 << 24)
2896 #define WM_DIRTY_DDB (1 << 25)
2898 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
2899 const struct ilk_wm_values
*old
,
2900 const struct ilk_wm_values
*new)
2902 unsigned int dirty
= 0;
2906 for_each_pipe(dev_priv
, pipe
) {
2907 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
2908 dirty
|= WM_DIRTY_LINETIME(pipe
);
2909 /* Must disable LP1+ watermarks too */
2910 dirty
|= WM_DIRTY_LP_ALL
;
2913 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
2914 dirty
|= WM_DIRTY_PIPE(pipe
);
2915 /* Must disable LP1+ watermarks too */
2916 dirty
|= WM_DIRTY_LP_ALL
;
2920 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
2921 dirty
|= WM_DIRTY_FBC
;
2922 /* Must disable LP1+ watermarks too */
2923 dirty
|= WM_DIRTY_LP_ALL
;
2926 if (old
->partitioning
!= new->partitioning
) {
2927 dirty
|= WM_DIRTY_DDB
;
2928 /* Must disable LP1+ watermarks too */
2929 dirty
|= WM_DIRTY_LP_ALL
;
2932 /* LP1+ watermarks already deemed dirty, no need to continue */
2933 if (dirty
& WM_DIRTY_LP_ALL
)
2936 /* Find the lowest numbered LP1+ watermark in need of an update... */
2937 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2938 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
2939 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
2943 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2944 for (; wm_lp
<= 3; wm_lp
++)
2945 dirty
|= WM_DIRTY_LP(wm_lp
);
2950 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
2953 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2954 bool changed
= false;
2956 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
2957 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
2958 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
2961 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
2962 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
2963 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
2966 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
2967 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
2968 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
2973 * Don't touch WM1S_LP_EN here.
2974 * Doing so could cause underruns.
2981 * The spec says we shouldn't write when we don't need, because every write
2982 * causes WMs to be re-evaluated, expending some power.
2984 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
2985 struct ilk_wm_values
*results
)
2987 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2991 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
2995 _ilk_disable_lp_wm(dev_priv
, dirty
);
2997 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
2998 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
2999 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
3000 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
3001 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
3002 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
3004 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
3005 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
3006 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
3007 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
3008 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
3009 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
3011 if (dirty
& WM_DIRTY_DDB
) {
3012 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
3013 val
= I915_READ(WM_MISC
);
3014 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
3015 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
3017 val
|= WM_MISC_DATA_PARTITION_5_6
;
3018 I915_WRITE(WM_MISC
, val
);
3020 val
= I915_READ(DISP_ARB_CTL2
);
3021 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
3022 val
&= ~DISP_DATA_PARTITION_5_6
;
3024 val
|= DISP_DATA_PARTITION_5_6
;
3025 I915_WRITE(DISP_ARB_CTL2
, val
);
3029 if (dirty
& WM_DIRTY_FBC
) {
3030 val
= I915_READ(DISP_ARB_CTL
);
3031 if (results
->enable_fbc_wm
)
3032 val
&= ~DISP_FBC_WM_DIS
;
3034 val
|= DISP_FBC_WM_DIS
;
3035 I915_WRITE(DISP_ARB_CTL
, val
);
3038 if (dirty
& WM_DIRTY_LP(1) &&
3039 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
3040 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
3042 if (INTEL_GEN(dev_priv
) >= 7) {
3043 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
3044 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
3045 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
3046 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
3049 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
3050 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
3051 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
3052 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
3053 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
3054 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
3056 dev_priv
->wm
.hw
= *results
;
3059 bool ilk_disable_lp_wm(struct drm_device
*dev
)
3061 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3063 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
3066 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
3069 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3070 * so assume we'll always need it in order to avoid underruns.
3072 static bool skl_needs_memory_bw_wa(struct intel_atomic_state
*state
)
3074 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
3076 if (IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
))
3083 intel_has_sagv(struct drm_i915_private
*dev_priv
)
3085 if (IS_KABYLAKE(dev_priv
))
3088 if (IS_SKYLAKE(dev_priv
) &&
3089 dev_priv
->sagv_status
!= I915_SAGV_NOT_CONTROLLED
)
3096 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3097 * depending on power and performance requirements. The display engine access
3098 * to system memory is blocked during the adjustment time. Because of the
3099 * blocking time, having this enabled can cause full system hangs and/or pipe
3100 * underruns if we don't meet all of the following requirements:
3102 * - <= 1 pipe enabled
3103 * - All planes can enable watermarks for latencies >= SAGV engine block time
3104 * - We're not using an interlaced display configuration
3107 intel_enable_sagv(struct drm_i915_private
*dev_priv
)
3111 if (!intel_has_sagv(dev_priv
))
3114 if (dev_priv
->sagv_status
== I915_SAGV_ENABLED
)
3117 DRM_DEBUG_KMS("Enabling the SAGV\n");
3118 mutex_lock(&dev_priv
->rps
.hw_lock
);
3120 ret
= sandybridge_pcode_write(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
3123 /* We don't need to wait for the SAGV when enabling */
3124 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3127 * Some skl systems, pre-release machines in particular,
3128 * don't actually have an SAGV.
3130 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
3131 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3132 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
3134 } else if (ret
< 0) {
3135 DRM_ERROR("Failed to enable the SAGV\n");
3139 dev_priv
->sagv_status
= I915_SAGV_ENABLED
;
3144 intel_disable_sagv(struct drm_i915_private
*dev_priv
)
3148 if (!intel_has_sagv(dev_priv
))
3151 if (dev_priv
->sagv_status
== I915_SAGV_DISABLED
)
3154 DRM_DEBUG_KMS("Disabling the SAGV\n");
3155 mutex_lock(&dev_priv
->rps
.hw_lock
);
3157 /* bspec says to keep retrying for at least 1 ms */
3158 ret
= skl_pcode_request(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
3160 GEN9_SAGV_IS_DISABLED
, GEN9_SAGV_IS_DISABLED
,
3162 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3165 * Some skl systems, pre-release machines in particular,
3166 * don't actually have an SAGV.
3168 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
3169 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3170 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
3172 } else if (ret
< 0) {
3173 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret
);
3177 dev_priv
->sagv_status
= I915_SAGV_DISABLED
;
3181 bool intel_can_enable_sagv(struct drm_atomic_state
*state
)
3183 struct drm_device
*dev
= state
->dev
;
3184 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3185 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3186 struct intel_crtc
*crtc
;
3187 struct intel_plane
*plane
;
3188 struct intel_crtc_state
*cstate
;
3192 if (!intel_has_sagv(dev_priv
))
3196 * SKL workaround: bspec recommends we disable the SAGV when we have
3197 * more then one pipe enabled
3199 * If there are no active CRTCs, no additional checks need be performed
3201 if (hweight32(intel_state
->active_crtcs
) == 0)
3203 else if (hweight32(intel_state
->active_crtcs
) > 1)
3206 /* Since we're now guaranteed to only have one active CRTC... */
3207 pipe
= ffs(intel_state
->active_crtcs
) - 1;
3208 crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
3209 cstate
= to_intel_crtc_state(crtc
->base
.state
);
3211 if (crtc
->base
.state
->adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
3214 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
3215 struct skl_plane_wm
*wm
=
3216 &cstate
->wm
.skl
.optimal
.planes
[plane
->id
];
3218 /* Skip this plane if it's not enabled */
3219 if (!wm
->wm
[0].plane_en
)
3222 /* Find the highest enabled wm level for this plane */
3223 for (level
= ilk_wm_max_level(dev_priv
);
3224 !wm
->wm
[level
].plane_en
; --level
)
3227 latency
= dev_priv
->wm
.skl_latency
[level
];
3229 if (skl_needs_memory_bw_wa(intel_state
) &&
3230 plane
->base
.state
->fb
->modifier
==
3231 I915_FORMAT_MOD_X_TILED
)
3235 * If any of the planes on this pipe don't enable wm levels
3236 * that incur memory latencies higher then 30µs we can't enable
3239 if (latency
< SKL_SAGV_BLOCK_TIME
)
3247 skl_ddb_get_pipe_allocation_limits(struct drm_device
*dev
,
3248 const struct intel_crtc_state
*cstate
,
3249 struct skl_ddb_entry
*alloc
, /* out */
3250 int *num_active
/* out */)
3252 struct drm_atomic_state
*state
= cstate
->base
.state
;
3253 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3254 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3255 struct drm_crtc
*for_crtc
= cstate
->base
.crtc
;
3256 unsigned int pipe_size
, ddb_size
;
3257 int nth_active_pipe
;
3259 if (WARN_ON(!state
) || !cstate
->base
.active
) {
3262 *num_active
= hweight32(dev_priv
->active_crtcs
);
3266 if (intel_state
->active_pipe_changes
)
3267 *num_active
= hweight32(intel_state
->active_crtcs
);
3269 *num_active
= hweight32(dev_priv
->active_crtcs
);
3271 ddb_size
= INTEL_INFO(dev_priv
)->ddb_size
;
3272 WARN_ON(ddb_size
== 0);
3274 ddb_size
-= 4; /* 4 blocks for bypass path allocation */
3277 * If the state doesn't change the active CRTC's, then there's
3278 * no need to recalculate; the existing pipe allocation limits
3279 * should remain unchanged. Note that we're safe from racing
3280 * commits since any racing commit that changes the active CRTC
3281 * list would need to grab _all_ crtc locks, including the one
3282 * we currently hold.
3284 if (!intel_state
->active_pipe_changes
) {
3286 * alloc may be cleared by clear_intel_crtc_state,
3287 * copy from old state to be sure
3289 *alloc
= to_intel_crtc_state(for_crtc
->state
)->wm
.skl
.ddb
;
3293 nth_active_pipe
= hweight32(intel_state
->active_crtcs
&
3294 (drm_crtc_mask(for_crtc
) - 1));
3295 pipe_size
= ddb_size
/ hweight32(intel_state
->active_crtcs
);
3296 alloc
->start
= nth_active_pipe
* ddb_size
/ *num_active
;
3297 alloc
->end
= alloc
->start
+ pipe_size
;
3300 static unsigned int skl_cursor_allocation(int num_active
)
3302 if (num_active
== 1)
3308 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry
*entry
, u32 reg
)
3310 entry
->start
= reg
& 0x3ff;
3311 entry
->end
= (reg
>> 16) & 0x3ff;
3316 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
,
3317 struct skl_ddb_allocation
*ddb
/* out */)
3319 struct intel_crtc
*crtc
;
3321 memset(ddb
, 0, sizeof(*ddb
));
3323 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
3324 enum intel_display_power_domain power_domain
;
3325 enum plane_id plane_id
;
3326 enum pipe pipe
= crtc
->pipe
;
3328 power_domain
= POWER_DOMAIN_PIPE(pipe
);
3329 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
))
3332 for_each_plane_id_on_crtc(crtc
, plane_id
) {
3335 if (plane_id
!= PLANE_CURSOR
)
3336 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane_id
));
3338 val
= I915_READ(CUR_BUF_CFG(pipe
));
3340 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][plane_id
], val
);
3343 intel_display_power_put(dev_priv
, power_domain
);
3348 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3349 * The bspec defines downscale amount as:
3352 * Horizontal down scale amount = maximum[1, Horizontal source size /
3353 * Horizontal destination size]
3354 * Vertical down scale amount = maximum[1, Vertical source size /
3355 * Vertical destination size]
3356 * Total down scale amount = Horizontal down scale amount *
3357 * Vertical down scale amount
3360 * Return value is provided in 16.16 fixed point form to retain fractional part.
3361 * Caller should take care of dividing & rounding off the value.
3364 skl_plane_downscale_amount(const struct intel_crtc_state
*cstate
,
3365 const struct intel_plane_state
*pstate
)
3367 struct intel_plane
*plane
= to_intel_plane(pstate
->base
.plane
);
3368 uint32_t downscale_h
, downscale_w
;
3369 uint32_t src_w
, src_h
, dst_w
, dst_h
;
3371 if (WARN_ON(!intel_wm_plane_visible(cstate
, pstate
)))
3372 return DRM_PLANE_HELPER_NO_SCALING
;
3374 /* n.b., src is 16.16 fixed point, dst is whole integer */
3375 if (plane
->id
== PLANE_CURSOR
) {
3376 src_w
= pstate
->base
.src_w
;
3377 src_h
= pstate
->base
.src_h
;
3378 dst_w
= pstate
->base
.crtc_w
;
3379 dst_h
= pstate
->base
.crtc_h
;
3381 src_w
= drm_rect_width(&pstate
->base
.src
);
3382 src_h
= drm_rect_height(&pstate
->base
.src
);
3383 dst_w
= drm_rect_width(&pstate
->base
.dst
);
3384 dst_h
= drm_rect_height(&pstate
->base
.dst
);
3387 if (drm_rotation_90_or_270(pstate
->base
.rotation
))
3390 downscale_h
= max(src_h
/ dst_h
, (uint32_t)DRM_PLANE_HELPER_NO_SCALING
);
3391 downscale_w
= max(src_w
/ dst_w
, (uint32_t)DRM_PLANE_HELPER_NO_SCALING
);
3393 /* Provide result in 16.16 fixed point */
3394 return (uint64_t)downscale_w
* downscale_h
>> 16;
3398 skl_plane_relative_data_rate(const struct intel_crtc_state
*cstate
,
3399 const struct drm_plane_state
*pstate
,
3402 struct intel_plane
*plane
= to_intel_plane(pstate
->plane
);
3403 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
3404 uint32_t down_scale_amount
, data_rate
;
3405 uint32_t width
= 0, height
= 0;
3406 struct drm_framebuffer
*fb
;
3409 if (!intel_pstate
->base
.visible
)
3413 format
= fb
->format
->format
;
3415 if (plane
->id
== PLANE_CURSOR
)
3417 if (y
&& format
!= DRM_FORMAT_NV12
)
3420 width
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
3421 height
= drm_rect_height(&intel_pstate
->base
.src
) >> 16;
3423 if (drm_rotation_90_or_270(pstate
->rotation
))
3424 swap(width
, height
);
3426 /* for planar format */
3427 if (format
== DRM_FORMAT_NV12
) {
3428 if (y
) /* y-plane data rate */
3429 data_rate
= width
* height
*
3431 else /* uv-plane data rate */
3432 data_rate
= (width
/ 2) * (height
/ 2) *
3435 /* for packed formats */
3436 data_rate
= width
* height
* fb
->format
->cpp
[0];
3439 down_scale_amount
= skl_plane_downscale_amount(cstate
, intel_pstate
);
3441 return (uint64_t)data_rate
* down_scale_amount
>> 16;
3445 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3446 * a 8192x4096@32bpp framebuffer:
3447 * 3 * 4096 * 8192 * 4 < 2^32
3450 skl_get_total_relative_data_rate(struct intel_crtc_state
*intel_cstate
,
3451 unsigned *plane_data_rate
,
3452 unsigned *plane_y_data_rate
)
3454 struct drm_crtc_state
*cstate
= &intel_cstate
->base
;
3455 struct drm_atomic_state
*state
= cstate
->state
;
3456 struct drm_plane
*plane
;
3457 const struct drm_plane_state
*pstate
;
3458 unsigned int total_data_rate
= 0;
3460 if (WARN_ON(!state
))
3463 /* Calculate and cache data rate for each plane */
3464 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, cstate
) {
3465 enum plane_id plane_id
= to_intel_plane(plane
)->id
;
3469 rate
= skl_plane_relative_data_rate(intel_cstate
,
3471 plane_data_rate
[plane_id
] = rate
;
3473 total_data_rate
+= rate
;
3476 rate
= skl_plane_relative_data_rate(intel_cstate
,
3478 plane_y_data_rate
[plane_id
] = rate
;
3480 total_data_rate
+= rate
;
3483 return total_data_rate
;
3487 skl_ddb_min_alloc(const struct drm_plane_state
*pstate
,
3490 struct drm_framebuffer
*fb
= pstate
->fb
;
3491 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
3492 uint32_t src_w
, src_h
;
3493 uint32_t min_scanlines
= 8;
3499 /* For packed formats, no y-plane, return 0 */
3500 if (y
&& fb
->format
->format
!= DRM_FORMAT_NV12
)
3503 /* For Non Y-tile return 8-blocks */
3504 if (fb
->modifier
!= I915_FORMAT_MOD_Y_TILED
&&
3505 fb
->modifier
!= I915_FORMAT_MOD_Yf_TILED
)
3508 src_w
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
3509 src_h
= drm_rect_height(&intel_pstate
->base
.src
) >> 16;
3511 if (drm_rotation_90_or_270(pstate
->rotation
))
3514 /* Halve UV plane width and height for NV12 */
3515 if (fb
->format
->format
== DRM_FORMAT_NV12
&& !y
) {
3520 if (fb
->format
->format
== DRM_FORMAT_NV12
&& !y
)
3521 plane_bpp
= fb
->format
->cpp
[1];
3523 plane_bpp
= fb
->format
->cpp
[0];
3525 if (drm_rotation_90_or_270(pstate
->rotation
)) {
3526 switch (plane_bpp
) {
3540 WARN(1, "Unsupported pixel depth %u for rotation",
3546 return DIV_ROUND_UP((4 * src_w
* plane_bpp
), 512) * min_scanlines
/4 + 3;
3550 skl_ddb_calc_min(const struct intel_crtc_state
*cstate
, int num_active
,
3551 uint16_t *minimum
, uint16_t *y_minimum
)
3553 const struct drm_plane_state
*pstate
;
3554 struct drm_plane
*plane
;
3556 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, &cstate
->base
) {
3557 enum plane_id plane_id
= to_intel_plane(plane
)->id
;
3559 if (plane_id
== PLANE_CURSOR
)
3562 if (!pstate
->visible
)
3565 minimum
[plane_id
] = skl_ddb_min_alloc(pstate
, 0);
3566 y_minimum
[plane_id
] = skl_ddb_min_alloc(pstate
, 1);
3569 minimum
[PLANE_CURSOR
] = skl_cursor_allocation(num_active
);
3573 skl_allocate_pipe_ddb(struct intel_crtc_state
*cstate
,
3574 struct skl_ddb_allocation
*ddb
/* out */)
3576 struct drm_atomic_state
*state
= cstate
->base
.state
;
3577 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
3578 struct drm_device
*dev
= crtc
->dev
;
3579 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3580 enum pipe pipe
= intel_crtc
->pipe
;
3581 struct skl_ddb_entry
*alloc
= &cstate
->wm
.skl
.ddb
;
3582 uint16_t alloc_size
, start
;
3583 uint16_t minimum
[I915_MAX_PLANES
] = {};
3584 uint16_t y_minimum
[I915_MAX_PLANES
] = {};
3585 unsigned int total_data_rate
;
3586 enum plane_id plane_id
;
3588 unsigned plane_data_rate
[I915_MAX_PLANES
] = {};
3589 unsigned plane_y_data_rate
[I915_MAX_PLANES
] = {};
3591 /* Clear the partitioning for disabled planes. */
3592 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
3593 memset(ddb
->y_plane
[pipe
], 0, sizeof(ddb
->y_plane
[pipe
]));
3595 if (WARN_ON(!state
))
3598 if (!cstate
->base
.active
) {
3599 alloc
->start
= alloc
->end
= 0;
3603 skl_ddb_get_pipe_allocation_limits(dev
, cstate
, alloc
, &num_active
);
3604 alloc_size
= skl_ddb_entry_size(alloc
);
3605 if (alloc_size
== 0) {
3606 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
3610 skl_ddb_calc_min(cstate
, num_active
, minimum
, y_minimum
);
3613 * 1. Allocate the mininum required blocks for each active plane
3614 * and allocate the cursor, it doesn't require extra allocation
3615 * proportional to the data rate.
3618 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
3619 alloc_size
-= minimum
[plane_id
];
3620 alloc_size
-= y_minimum
[plane_id
];
3623 ddb
->plane
[pipe
][PLANE_CURSOR
].start
= alloc
->end
- minimum
[PLANE_CURSOR
];
3624 ddb
->plane
[pipe
][PLANE_CURSOR
].end
= alloc
->end
;
3627 * 2. Distribute the remaining space in proportion to the amount of
3628 * data each plane needs to fetch from memory.
3630 * FIXME: we may not allocate every single block here.
3632 total_data_rate
= skl_get_total_relative_data_rate(cstate
,
3635 if (total_data_rate
== 0)
3638 start
= alloc
->start
;
3639 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
3640 unsigned int data_rate
, y_data_rate
;
3641 uint16_t plane_blocks
, y_plane_blocks
= 0;
3643 if (plane_id
== PLANE_CURSOR
)
3646 data_rate
= plane_data_rate
[plane_id
];
3649 * allocation for (packed formats) or (uv-plane part of planar format):
3650 * promote the expression to 64 bits to avoid overflowing, the
3651 * result is < available as data_rate / total_data_rate < 1
3653 plane_blocks
= minimum
[plane_id
];
3654 plane_blocks
+= div_u64((uint64_t)alloc_size
* data_rate
,
3657 /* Leave disabled planes at (0,0) */
3659 ddb
->plane
[pipe
][plane_id
].start
= start
;
3660 ddb
->plane
[pipe
][plane_id
].end
= start
+ plane_blocks
;
3663 start
+= plane_blocks
;
3666 * allocation for y_plane part of planar format:
3668 y_data_rate
= plane_y_data_rate
[plane_id
];
3670 y_plane_blocks
= y_minimum
[plane_id
];
3671 y_plane_blocks
+= div_u64((uint64_t)alloc_size
* y_data_rate
,
3675 ddb
->y_plane
[pipe
][plane_id
].start
= start
;
3676 ddb
->y_plane
[pipe
][plane_id
].end
= start
+ y_plane_blocks
;
3679 start
+= y_plane_blocks
;
3686 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3687 * for the read latency) and cpp should always be <= 8, so that
3688 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3689 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3691 static uint_fixed_16_16_t
skl_wm_method1(uint32_t pixel_rate
, uint8_t cpp
,
3694 uint32_t wm_intermediate_val
;
3695 uint_fixed_16_16_t ret
;
3698 return FP_16_16_MAX
;
3700 wm_intermediate_val
= latency
* pixel_rate
* cpp
;
3701 ret
= fixed_16_16_div_round_up_u64(wm_intermediate_val
, 1000 * 512);
3705 static uint_fixed_16_16_t
skl_wm_method2(uint32_t pixel_rate
,
3706 uint32_t pipe_htotal
,
3708 uint_fixed_16_16_t plane_blocks_per_line
)
3710 uint32_t wm_intermediate_val
;
3711 uint_fixed_16_16_t ret
;
3714 return FP_16_16_MAX
;
3716 wm_intermediate_val
= latency
* pixel_rate
;
3717 wm_intermediate_val
= DIV_ROUND_UP(wm_intermediate_val
,
3718 pipe_htotal
* 1000);
3719 ret
= mul_u32_fixed_16_16(wm_intermediate_val
, plane_blocks_per_line
);
3723 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state
*cstate
,
3724 struct intel_plane_state
*pstate
)
3726 uint64_t adjusted_pixel_rate
;
3727 uint64_t downscale_amount
;
3728 uint64_t pixel_rate
;
3730 /* Shouldn't reach here on disabled planes... */
3731 if (WARN_ON(!intel_wm_plane_visible(cstate
, pstate
)))
3735 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3736 * with additional adjustments for plane-specific scaling.
3738 adjusted_pixel_rate
= cstate
->pixel_rate
;
3739 downscale_amount
= skl_plane_downscale_amount(cstate
, pstate
);
3741 pixel_rate
= adjusted_pixel_rate
* downscale_amount
>> 16;
3742 WARN_ON(pixel_rate
!= clamp_t(uint32_t, pixel_rate
, 0, ~0));
3747 static int skl_compute_plane_wm(const struct drm_i915_private
*dev_priv
,
3748 struct intel_crtc_state
*cstate
,
3749 struct intel_plane_state
*intel_pstate
,
3750 uint16_t ddb_allocation
,
3752 uint16_t *out_blocks
, /* out */
3753 uint8_t *out_lines
, /* out */
3754 bool *enabled
/* out */)
3756 struct intel_plane
*plane
= to_intel_plane(intel_pstate
->base
.plane
);
3757 struct drm_plane_state
*pstate
= &intel_pstate
->base
;
3758 struct drm_framebuffer
*fb
= pstate
->fb
;
3759 uint32_t latency
= dev_priv
->wm
.skl_latency
[level
];
3760 uint_fixed_16_16_t method1
, method2
;
3761 uint_fixed_16_16_t plane_blocks_per_line
;
3762 uint_fixed_16_16_t selected_result
;
3763 uint32_t interm_pbpl
;
3764 uint32_t plane_bytes_per_line
;
3765 uint32_t res_blocks
, res_lines
;
3767 uint32_t width
= 0, height
= 0;
3768 uint32_t plane_pixel_rate
;
3769 uint_fixed_16_16_t y_tile_minimum
;
3770 uint32_t y_min_scanlines
;
3771 struct intel_atomic_state
*state
=
3772 to_intel_atomic_state(cstate
->base
.state
);
3773 bool apply_memory_bw_wa
= skl_needs_memory_bw_wa(state
);
3774 bool y_tiled
, x_tiled
;
3777 !intel_wm_plane_visible(cstate
, intel_pstate
)) {
3782 y_tiled
= fb
->modifier
== I915_FORMAT_MOD_Y_TILED
||
3783 fb
->modifier
== I915_FORMAT_MOD_Yf_TILED
;
3784 x_tiled
= fb
->modifier
== I915_FORMAT_MOD_X_TILED
;
3786 /* Display WA #1141: kbl. */
3787 if (IS_KABYLAKE(dev_priv
) && dev_priv
->ipc_enabled
)
3790 if (apply_memory_bw_wa
&& x_tiled
)
3793 if (plane
->id
== PLANE_CURSOR
) {
3794 width
= intel_pstate
->base
.crtc_w
;
3795 height
= intel_pstate
->base
.crtc_h
;
3797 width
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
3798 height
= drm_rect_height(&intel_pstate
->base
.src
) >> 16;
3801 if (drm_rotation_90_or_270(pstate
->rotation
))
3802 swap(width
, height
);
3804 cpp
= fb
->format
->cpp
[0];
3805 plane_pixel_rate
= skl_adjusted_plane_pixel_rate(cstate
, intel_pstate
);
3807 if (drm_rotation_90_or_270(pstate
->rotation
)) {
3808 int cpp
= (fb
->format
->format
== DRM_FORMAT_NV12
) ?
3809 fb
->format
->cpp
[1] :
3814 y_min_scanlines
= 16;
3817 y_min_scanlines
= 8;
3820 y_min_scanlines
= 4;
3827 y_min_scanlines
= 4;
3830 if (apply_memory_bw_wa
)
3831 y_min_scanlines
*= 2;
3833 plane_bytes_per_line
= width
* cpp
;
3835 interm_pbpl
= DIV_ROUND_UP(plane_bytes_per_line
*
3836 y_min_scanlines
, 512);
3837 plane_blocks_per_line
=
3838 fixed_16_16_div_round_up(interm_pbpl
, y_min_scanlines
);
3839 } else if (x_tiled
) {
3840 interm_pbpl
= DIV_ROUND_UP(plane_bytes_per_line
, 512);
3841 plane_blocks_per_line
= u32_to_fixed_16_16(interm_pbpl
);
3843 interm_pbpl
= DIV_ROUND_UP(plane_bytes_per_line
, 512) + 1;
3844 plane_blocks_per_line
= u32_to_fixed_16_16(interm_pbpl
);
3847 method1
= skl_wm_method1(plane_pixel_rate
, cpp
, latency
);
3848 method2
= skl_wm_method2(plane_pixel_rate
,
3849 cstate
->base
.adjusted_mode
.crtc_htotal
,
3851 plane_blocks_per_line
);
3853 y_tile_minimum
= mul_u32_fixed_16_16(y_min_scanlines
,
3854 plane_blocks_per_line
);
3857 selected_result
= max_fixed_16_16(method2
, y_tile_minimum
);
3859 if ((cpp
* cstate
->base
.adjusted_mode
.crtc_htotal
/ 512 < 1) &&
3860 (plane_bytes_per_line
/ 512 < 1))
3861 selected_result
= method2
;
3862 else if ((ddb_allocation
/
3863 fixed_16_16_to_u32_round_up(plane_blocks_per_line
)) >= 1)
3864 selected_result
= min_fixed_16_16(method1
, method2
);
3866 selected_result
= method1
;
3869 res_blocks
= fixed_16_16_to_u32_round_up(selected_result
) + 1;
3870 res_lines
= DIV_ROUND_UP(selected_result
.val
,
3871 plane_blocks_per_line
.val
);
3873 if (level
>= 1 && level
<= 7) {
3875 res_blocks
+= fixed_16_16_to_u32_round_up(y_tile_minimum
);
3876 res_lines
+= y_min_scanlines
;
3882 if (res_blocks
>= ddb_allocation
|| res_lines
> 31) {
3886 * If there are no valid level 0 watermarks, then we can't
3887 * support this display configuration.
3892 struct drm_plane
*plane
= pstate
->plane
;
3894 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3895 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
3896 plane
->base
.id
, plane
->name
,
3897 res_blocks
, ddb_allocation
, res_lines
);
3902 *out_blocks
= res_blocks
;
3903 *out_lines
= res_lines
;
3910 skl_compute_wm_level(const struct drm_i915_private
*dev_priv
,
3911 struct skl_ddb_allocation
*ddb
,
3912 struct intel_crtc_state
*cstate
,
3913 struct intel_plane
*intel_plane
,
3915 struct skl_wm_level
*result
)
3917 struct drm_atomic_state
*state
= cstate
->base
.state
;
3918 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
3919 struct drm_plane
*plane
= &intel_plane
->base
;
3920 struct intel_plane_state
*intel_pstate
= NULL
;
3921 uint16_t ddb_blocks
;
3922 enum pipe pipe
= intel_crtc
->pipe
;
3927 intel_atomic_get_existing_plane_state(state
,
3931 * Note: If we start supporting multiple pending atomic commits against
3932 * the same planes/CRTC's in the future, plane->state will no longer be
3933 * the correct pre-state to use for the calculations here and we'll
3934 * need to change where we get the 'unchanged' plane data from.
3936 * For now this is fine because we only allow one queued commit against
3937 * a CRTC. Even if the plane isn't modified by this transaction and we
3938 * don't have a plane lock, we still have the CRTC's lock, so we know
3939 * that no other transactions are racing with us to update it.
3942 intel_pstate
= to_intel_plane_state(plane
->state
);
3944 WARN_ON(!intel_pstate
->base
.fb
);
3946 ddb_blocks
= skl_ddb_entry_size(&ddb
->plane
[pipe
][intel_plane
->id
]);
3948 ret
= skl_compute_plane_wm(dev_priv
,
3953 &result
->plane_res_b
,
3954 &result
->plane_res_l
,
3963 skl_compute_linetime_wm(struct intel_crtc_state
*cstate
)
3965 struct drm_atomic_state
*state
= cstate
->base
.state
;
3966 struct drm_i915_private
*dev_priv
= to_i915(state
->dev
);
3967 uint32_t pixel_rate
;
3968 uint32_t linetime_wm
;
3970 if (!cstate
->base
.active
)
3973 pixel_rate
= cstate
->pixel_rate
;
3975 if (WARN_ON(pixel_rate
== 0))
3978 linetime_wm
= DIV_ROUND_UP(8 * cstate
->base
.adjusted_mode
.crtc_htotal
*
3981 /* Display WA #1135: bxt. */
3982 if (IS_BROXTON(dev_priv
) && dev_priv
->ipc_enabled
)
3983 linetime_wm
= DIV_ROUND_UP(linetime_wm
, 2);
3988 static void skl_compute_transition_wm(struct intel_crtc_state
*cstate
,
3989 struct skl_wm_level
*trans_wm
/* out */)
3991 if (!cstate
->base
.active
)
3994 /* Until we know more, just disable transition WMs */
3995 trans_wm
->plane_en
= false;
3998 static int skl_build_pipe_wm(struct intel_crtc_state
*cstate
,
3999 struct skl_ddb_allocation
*ddb
,
4000 struct skl_pipe_wm
*pipe_wm
)
4002 struct drm_device
*dev
= cstate
->base
.crtc
->dev
;
4003 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
4004 struct intel_plane
*intel_plane
;
4005 struct skl_plane_wm
*wm
;
4006 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4010 * We'll only calculate watermarks for planes that are actually
4011 * enabled, so make sure all other planes are set as disabled.
4013 memset(pipe_wm
->planes
, 0, sizeof(pipe_wm
->planes
));
4015 for_each_intel_plane_mask(&dev_priv
->drm
,
4017 cstate
->base
.plane_mask
) {
4018 wm
= &pipe_wm
->planes
[intel_plane
->id
];
4020 for (level
= 0; level
<= max_level
; level
++) {
4021 ret
= skl_compute_wm_level(dev_priv
, ddb
, cstate
,
4027 skl_compute_transition_wm(cstate
, &wm
->trans_wm
);
4029 pipe_wm
->linetime
= skl_compute_linetime_wm(cstate
);
4034 static void skl_ddb_entry_write(struct drm_i915_private
*dev_priv
,
4036 const struct skl_ddb_entry
*entry
)
4039 I915_WRITE(reg
, (entry
->end
- 1) << 16 | entry
->start
);
4044 static void skl_write_wm_level(struct drm_i915_private
*dev_priv
,
4046 const struct skl_wm_level
*level
)
4050 if (level
->plane_en
) {
4052 val
|= level
->plane_res_b
;
4053 val
|= level
->plane_res_l
<< PLANE_WM_LINES_SHIFT
;
4056 I915_WRITE(reg
, val
);
4059 static void skl_write_plane_wm(struct intel_crtc
*intel_crtc
,
4060 const struct skl_plane_wm
*wm
,
4061 const struct skl_ddb_allocation
*ddb
,
4062 enum plane_id plane_id
)
4064 struct drm_crtc
*crtc
= &intel_crtc
->base
;
4065 struct drm_device
*dev
= crtc
->dev
;
4066 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4067 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4068 enum pipe pipe
= intel_crtc
->pipe
;
4070 for (level
= 0; level
<= max_level
; level
++) {
4071 skl_write_wm_level(dev_priv
, PLANE_WM(pipe
, plane_id
, level
),
4074 skl_write_wm_level(dev_priv
, PLANE_WM_TRANS(pipe
, plane_id
),
4077 skl_ddb_entry_write(dev_priv
, PLANE_BUF_CFG(pipe
, plane_id
),
4078 &ddb
->plane
[pipe
][plane_id
]);
4079 skl_ddb_entry_write(dev_priv
, PLANE_NV12_BUF_CFG(pipe
, plane_id
),
4080 &ddb
->y_plane
[pipe
][plane_id
]);
4083 static void skl_write_cursor_wm(struct intel_crtc
*intel_crtc
,
4084 const struct skl_plane_wm
*wm
,
4085 const struct skl_ddb_allocation
*ddb
)
4087 struct drm_crtc
*crtc
= &intel_crtc
->base
;
4088 struct drm_device
*dev
= crtc
->dev
;
4089 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4090 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4091 enum pipe pipe
= intel_crtc
->pipe
;
4093 for (level
= 0; level
<= max_level
; level
++) {
4094 skl_write_wm_level(dev_priv
, CUR_WM(pipe
, level
),
4097 skl_write_wm_level(dev_priv
, CUR_WM_TRANS(pipe
), &wm
->trans_wm
);
4099 skl_ddb_entry_write(dev_priv
, CUR_BUF_CFG(pipe
),
4100 &ddb
->plane
[pipe
][PLANE_CURSOR
]);
4103 bool skl_wm_level_equals(const struct skl_wm_level
*l1
,
4104 const struct skl_wm_level
*l2
)
4106 if (l1
->plane_en
!= l2
->plane_en
)
4109 /* If both planes aren't enabled, the rest shouldn't matter */
4113 return (l1
->plane_res_l
== l2
->plane_res_l
&&
4114 l1
->plane_res_b
== l2
->plane_res_b
);
4117 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry
*a
,
4118 const struct skl_ddb_entry
*b
)
4120 return a
->start
< b
->end
&& b
->start
< a
->end
;
4123 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry
**entries
,
4124 const struct skl_ddb_entry
*ddb
,
4129 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
4130 if (i
!= ignore
&& entries
[i
] &&
4131 skl_ddb_entries_overlap(ddb
, entries
[i
]))
4137 static int skl_update_pipe_wm(struct drm_crtc_state
*cstate
,
4138 const struct skl_pipe_wm
*old_pipe_wm
,
4139 struct skl_pipe_wm
*pipe_wm
, /* out */
4140 struct skl_ddb_allocation
*ddb
, /* out */
4141 bool *changed
/* out */)
4143 struct intel_crtc_state
*intel_cstate
= to_intel_crtc_state(cstate
);
4146 ret
= skl_build_pipe_wm(intel_cstate
, ddb
, pipe_wm
);
4150 if (!memcmp(old_pipe_wm
, pipe_wm
, sizeof(*pipe_wm
)))
4159 pipes_modified(struct drm_atomic_state
*state
)
4161 struct drm_crtc
*crtc
;
4162 struct drm_crtc_state
*cstate
;
4163 uint32_t i
, ret
= 0;
4165 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
)
4166 ret
|= drm_crtc_mask(crtc
);
4172 skl_ddb_add_affected_planes(struct intel_crtc_state
*cstate
)
4174 struct drm_atomic_state
*state
= cstate
->base
.state
;
4175 struct drm_device
*dev
= state
->dev
;
4176 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
4177 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4178 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4179 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
4180 struct skl_ddb_allocation
*new_ddb
= &intel_state
->wm_results
.ddb
;
4181 struct skl_ddb_allocation
*cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
4182 struct drm_plane_state
*plane_state
;
4183 struct drm_plane
*plane
;
4184 enum pipe pipe
= intel_crtc
->pipe
;
4186 WARN_ON(!drm_atomic_get_existing_crtc_state(state
, crtc
));
4188 drm_for_each_plane_mask(plane
, dev
, cstate
->base
.plane_mask
) {
4189 enum plane_id plane_id
= to_intel_plane(plane
)->id
;
4191 if (skl_ddb_entry_equal(&cur_ddb
->plane
[pipe
][plane_id
],
4192 &new_ddb
->plane
[pipe
][plane_id
]) &&
4193 skl_ddb_entry_equal(&cur_ddb
->y_plane
[pipe
][plane_id
],
4194 &new_ddb
->y_plane
[pipe
][plane_id
]))
4197 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4198 if (IS_ERR(plane_state
))
4199 return PTR_ERR(plane_state
);
4206 skl_compute_ddb(struct drm_atomic_state
*state
)
4208 struct drm_device
*dev
= state
->dev
;
4209 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4210 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
4211 struct intel_crtc
*intel_crtc
;
4212 struct skl_ddb_allocation
*ddb
= &intel_state
->wm_results
.ddb
;
4213 uint32_t realloc_pipes
= pipes_modified(state
);
4217 * If this is our first atomic update following hardware readout,
4218 * we can't trust the DDB that the BIOS programmed for us. Let's
4219 * pretend that all pipes switched active status so that we'll
4220 * ensure a full DDB recompute.
4222 if (dev_priv
->wm
.distrust_bios_wm
) {
4223 ret
= drm_modeset_lock(&dev
->mode_config
.connection_mutex
,
4224 state
->acquire_ctx
);
4228 intel_state
->active_pipe_changes
= ~0;
4231 * We usually only initialize intel_state->active_crtcs if we
4232 * we're doing a modeset; make sure this field is always
4233 * initialized during the sanitization process that happens
4234 * on the first commit too.
4236 if (!intel_state
->modeset
)
4237 intel_state
->active_crtcs
= dev_priv
->active_crtcs
;
4241 * If the modeset changes which CRTC's are active, we need to
4242 * recompute the DDB allocation for *all* active pipes, even
4243 * those that weren't otherwise being modified in any way by this
4244 * atomic commit. Due to the shrinking of the per-pipe allocations
4245 * when new active CRTC's are added, it's possible for a pipe that
4246 * we were already using and aren't changing at all here to suddenly
4247 * become invalid if its DDB needs exceeds its new allocation.
4249 * Note that if we wind up doing a full DDB recompute, we can't let
4250 * any other display updates race with this transaction, so we need
4251 * to grab the lock on *all* CRTC's.
4253 if (intel_state
->active_pipe_changes
) {
4255 intel_state
->wm_results
.dirty_pipes
= ~0;
4259 * We're not recomputing for the pipes not included in the commit, so
4260 * make sure we start with the current state.
4262 memcpy(ddb
, &dev_priv
->wm
.skl_hw
.ddb
, sizeof(*ddb
));
4264 for_each_intel_crtc_mask(dev
, intel_crtc
, realloc_pipes
) {
4265 struct intel_crtc_state
*cstate
;
4267 cstate
= intel_atomic_get_crtc_state(state
, intel_crtc
);
4269 return PTR_ERR(cstate
);
4271 ret
= skl_allocate_pipe_ddb(cstate
, ddb
);
4275 ret
= skl_ddb_add_affected_planes(cstate
);
4284 skl_copy_wm_for_pipe(struct skl_wm_values
*dst
,
4285 struct skl_wm_values
*src
,
4288 memcpy(dst
->ddb
.y_plane
[pipe
], src
->ddb
.y_plane
[pipe
],
4289 sizeof(dst
->ddb
.y_plane
[pipe
]));
4290 memcpy(dst
->ddb
.plane
[pipe
], src
->ddb
.plane
[pipe
],
4291 sizeof(dst
->ddb
.plane
[pipe
]));
4295 skl_print_wm_changes(const struct drm_atomic_state
*state
)
4297 const struct drm_device
*dev
= state
->dev
;
4298 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
4299 const struct intel_atomic_state
*intel_state
=
4300 to_intel_atomic_state(state
);
4301 const struct drm_crtc
*crtc
;
4302 const struct drm_crtc_state
*cstate
;
4303 const struct intel_plane
*intel_plane
;
4304 const struct skl_ddb_allocation
*old_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
4305 const struct skl_ddb_allocation
*new_ddb
= &intel_state
->wm_results
.ddb
;
4308 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
) {
4309 const struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4310 enum pipe pipe
= intel_crtc
->pipe
;
4312 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
4313 enum plane_id plane_id
= intel_plane
->id
;
4314 const struct skl_ddb_entry
*old
, *new;
4316 old
= &old_ddb
->plane
[pipe
][plane_id
];
4317 new = &new_ddb
->plane
[pipe
][plane_id
];
4319 if (skl_ddb_entry_equal(old
, new))
4322 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
4323 intel_plane
->base
.base
.id
,
4324 intel_plane
->base
.name
,
4325 old
->start
, old
->end
,
4326 new->start
, new->end
);
4332 skl_compute_wm(struct drm_atomic_state
*state
)
4334 struct drm_crtc
*crtc
;
4335 struct drm_crtc_state
*cstate
;
4336 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
4337 struct skl_wm_values
*results
= &intel_state
->wm_results
;
4338 struct skl_pipe_wm
*pipe_wm
;
4339 bool changed
= false;
4343 * If this transaction isn't actually touching any CRTC's, don't
4344 * bother with watermark calculation. Note that if we pass this
4345 * test, we're guaranteed to hold at least one CRTC state mutex,
4346 * which means we can safely use values like dev_priv->active_crtcs
4347 * since any racing commits that want to update them would need to
4348 * hold _all_ CRTC state mutexes.
4350 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
)
4355 /* Clear all dirty flags */
4356 results
->dirty_pipes
= 0;
4358 ret
= skl_compute_ddb(state
);
4363 * Calculate WM's for all pipes that are part of this transaction.
4364 * Note that the DDB allocation above may have added more CRTC's that
4365 * weren't otherwise being modified (and set bits in dirty_pipes) if
4366 * pipe allocations had to change.
4368 * FIXME: Now that we're doing this in the atomic check phase, we
4369 * should allow skl_update_pipe_wm() to return failure in cases where
4370 * no suitable watermark values can be found.
4372 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
) {
4373 struct intel_crtc_state
*intel_cstate
=
4374 to_intel_crtc_state(cstate
);
4375 const struct skl_pipe_wm
*old_pipe_wm
=
4376 &to_intel_crtc_state(crtc
->state
)->wm
.skl
.optimal
;
4378 pipe_wm
= &intel_cstate
->wm
.skl
.optimal
;
4379 ret
= skl_update_pipe_wm(cstate
, old_pipe_wm
, pipe_wm
,
4380 &results
->ddb
, &changed
);
4385 results
->dirty_pipes
|= drm_crtc_mask(crtc
);
4387 if ((results
->dirty_pipes
& drm_crtc_mask(crtc
)) == 0)
4388 /* This pipe's WM's did not change */
4391 intel_cstate
->update_wm_pre
= true;
4394 skl_print_wm_changes(state
);
4399 static void skl_atomic_update_crtc_wm(struct intel_atomic_state
*state
,
4400 struct intel_crtc_state
*cstate
)
4402 struct intel_crtc
*crtc
= to_intel_crtc(cstate
->base
.crtc
);
4403 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
4404 struct skl_pipe_wm
*pipe_wm
= &cstate
->wm
.skl
.optimal
;
4405 const struct skl_ddb_allocation
*ddb
= &state
->wm_results
.ddb
;
4406 enum pipe pipe
= crtc
->pipe
;
4407 enum plane_id plane_id
;
4409 if (!(state
->wm_results
.dirty_pipes
& drm_crtc_mask(&crtc
->base
)))
4412 I915_WRITE(PIPE_WM_LINETIME(pipe
), pipe_wm
->linetime
);
4414 for_each_plane_id_on_crtc(crtc
, plane_id
) {
4415 if (plane_id
!= PLANE_CURSOR
)
4416 skl_write_plane_wm(crtc
, &pipe_wm
->planes
[plane_id
],
4419 skl_write_cursor_wm(crtc
, &pipe_wm
->planes
[plane_id
],
4424 static void skl_initial_wm(struct intel_atomic_state
*state
,
4425 struct intel_crtc_state
*cstate
)
4427 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4428 struct drm_device
*dev
= intel_crtc
->base
.dev
;
4429 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4430 struct skl_wm_values
*results
= &state
->wm_results
;
4431 struct skl_wm_values
*hw_vals
= &dev_priv
->wm
.skl_hw
;
4432 enum pipe pipe
= intel_crtc
->pipe
;
4434 if ((results
->dirty_pipes
& drm_crtc_mask(&intel_crtc
->base
)) == 0)
4437 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4439 if (cstate
->base
.active_changed
)
4440 skl_atomic_update_crtc_wm(state
, cstate
);
4442 skl_copy_wm_for_pipe(hw_vals
, results
, pipe
);
4444 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4447 static void ilk_compute_wm_config(struct drm_device
*dev
,
4448 struct intel_wm_config
*config
)
4450 struct intel_crtc
*crtc
;
4452 /* Compute the currently _active_ config */
4453 for_each_intel_crtc(dev
, crtc
) {
4454 const struct intel_pipe_wm
*wm
= &crtc
->wm
.active
.ilk
;
4456 if (!wm
->pipe_enabled
)
4459 config
->sprites_enabled
|= wm
->sprites_enabled
;
4460 config
->sprites_scaled
|= wm
->sprites_scaled
;
4461 config
->num_pipes_active
++;
4465 static void ilk_program_watermarks(struct drm_i915_private
*dev_priv
)
4467 struct drm_device
*dev
= &dev_priv
->drm
;
4468 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
4469 struct ilk_wm_maximums max
;
4470 struct intel_wm_config config
= {};
4471 struct ilk_wm_values results
= {};
4472 enum intel_ddb_partitioning partitioning
;
4474 ilk_compute_wm_config(dev
, &config
);
4476 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
4477 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
4479 /* 5/6 split only in single pipe config on IVB+ */
4480 if (INTEL_GEN(dev_priv
) >= 7 &&
4481 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
4482 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
4483 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
4485 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
4487 best_lp_wm
= &lp_wm_1_2
;
4490 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
4491 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
4493 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
4495 ilk_write_wm_values(dev_priv
, &results
);
4498 static void ilk_initial_watermarks(struct intel_atomic_state
*state
,
4499 struct intel_crtc_state
*cstate
)
4501 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
4502 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4504 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4505 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.ilk
.intermediate
;
4506 ilk_program_watermarks(dev_priv
);
4507 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4510 static void ilk_optimize_watermarks(struct intel_atomic_state
*state
,
4511 struct intel_crtc_state
*cstate
)
4513 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
4514 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4516 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4517 if (cstate
->wm
.need_postvbl_update
) {
4518 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.ilk
.optimal
;
4519 ilk_program_watermarks(dev_priv
);
4521 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4524 static inline void skl_wm_level_from_reg_val(uint32_t val
,
4525 struct skl_wm_level
*level
)
4527 level
->plane_en
= val
& PLANE_WM_EN
;
4528 level
->plane_res_b
= val
& PLANE_WM_BLOCKS_MASK
;
4529 level
->plane_res_l
= (val
>> PLANE_WM_LINES_SHIFT
) &
4530 PLANE_WM_LINES_MASK
;
4533 void skl_pipe_wm_get_hw_state(struct drm_crtc
*crtc
,
4534 struct skl_pipe_wm
*out
)
4536 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
4537 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4538 enum pipe pipe
= intel_crtc
->pipe
;
4539 int level
, max_level
;
4540 enum plane_id plane_id
;
4543 max_level
= ilk_wm_max_level(dev_priv
);
4545 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
4546 struct skl_plane_wm
*wm
= &out
->planes
[plane_id
];
4548 for (level
= 0; level
<= max_level
; level
++) {
4549 if (plane_id
!= PLANE_CURSOR
)
4550 val
= I915_READ(PLANE_WM(pipe
, plane_id
, level
));
4552 val
= I915_READ(CUR_WM(pipe
, level
));
4554 skl_wm_level_from_reg_val(val
, &wm
->wm
[level
]);
4557 if (plane_id
!= PLANE_CURSOR
)
4558 val
= I915_READ(PLANE_WM_TRANS(pipe
, plane_id
));
4560 val
= I915_READ(CUR_WM_TRANS(pipe
));
4562 skl_wm_level_from_reg_val(val
, &wm
->trans_wm
);
4565 if (!intel_crtc
->active
)
4568 out
->linetime
= I915_READ(PIPE_WM_LINETIME(pipe
));
4571 void skl_wm_get_hw_state(struct drm_device
*dev
)
4573 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4574 struct skl_wm_values
*hw
= &dev_priv
->wm
.skl_hw
;
4575 struct skl_ddb_allocation
*ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
4576 struct drm_crtc
*crtc
;
4577 struct intel_crtc
*intel_crtc
;
4578 struct intel_crtc_state
*cstate
;
4580 skl_ddb_get_hw_state(dev_priv
, ddb
);
4581 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4582 intel_crtc
= to_intel_crtc(crtc
);
4583 cstate
= to_intel_crtc_state(crtc
->state
);
4585 skl_pipe_wm_get_hw_state(crtc
, &cstate
->wm
.skl
.optimal
);
4587 if (intel_crtc
->active
)
4588 hw
->dirty_pipes
|= drm_crtc_mask(crtc
);
4591 if (dev_priv
->active_crtcs
) {
4592 /* Fully recompute DDB on first atomic commit */
4593 dev_priv
->wm
.distrust_bios_wm
= true;
4595 /* Easy/common case; just sanitize DDB now if everything off */
4596 memset(ddb
, 0, sizeof(*ddb
));
4600 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
4602 struct drm_device
*dev
= crtc
->dev
;
4603 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4604 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
4605 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4606 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
4607 struct intel_pipe_wm
*active
= &cstate
->wm
.ilk
.optimal
;
4608 enum pipe pipe
= intel_crtc
->pipe
;
4609 static const i915_reg_t wm0_pipe_reg
[] = {
4610 [PIPE_A
] = WM0_PIPEA_ILK
,
4611 [PIPE_B
] = WM0_PIPEB_ILK
,
4612 [PIPE_C
] = WM0_PIPEC_IVB
,
4615 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
4616 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
4617 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
4619 memset(active
, 0, sizeof(*active
));
4621 active
->pipe_enabled
= intel_crtc
->active
;
4623 if (active
->pipe_enabled
) {
4624 u32 tmp
= hw
->wm_pipe
[pipe
];
4627 * For active pipes LP0 watermark is marked as
4628 * enabled, and LP1+ watermaks as disabled since
4629 * we can't really reverse compute them in case
4630 * multiple pipes are active.
4632 active
->wm
[0].enable
= true;
4633 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
4634 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
4635 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
4636 active
->linetime
= hw
->wm_linetime
[pipe
];
4638 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4641 * For inactive pipes, all watermark levels
4642 * should be marked as enabled but zeroed,
4643 * which is what we'd compute them to.
4645 for (level
= 0; level
<= max_level
; level
++)
4646 active
->wm
[level
].enable
= true;
4649 intel_crtc
->wm
.active
.ilk
= *active
;
4652 #define _FW_WM(value, plane) \
4653 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4654 #define _FW_WM_VLV(value, plane) \
4655 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4657 static void vlv_read_wm_values(struct drm_i915_private
*dev_priv
,
4658 struct vlv_wm_values
*wm
)
4663 for_each_pipe(dev_priv
, pipe
) {
4664 tmp
= I915_READ(VLV_DDL(pipe
));
4666 wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] =
4667 (tmp
>> DDL_PLANE_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4668 wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] =
4669 (tmp
>> DDL_CURSOR_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4670 wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] =
4671 (tmp
>> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4672 wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] =
4673 (tmp
>> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4676 tmp
= I915_READ(DSPFW1
);
4677 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
4678 wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORB
);
4679 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEB
);
4680 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEA
);
4682 tmp
= I915_READ(DSPFW2
);
4683 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITEB
);
4684 wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORA
);
4685 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEA
);
4687 tmp
= I915_READ(DSPFW3
);
4688 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
4690 if (IS_CHERRYVIEW(dev_priv
)) {
4691 tmp
= I915_READ(DSPFW7_CHV
);
4692 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITED
);
4693 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEC
);
4695 tmp
= I915_READ(DSPFW8_CHV
);
4696 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITEF
);
4697 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEE
);
4699 tmp
= I915_READ(DSPFW9_CHV
);
4700 wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEC
);
4701 wm
->pipe
[PIPE_C
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORC
);
4703 tmp
= I915_READ(DSPHOWM
);
4704 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
4705 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEF_HI
) << 8;
4706 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEE_HI
) << 8;
4707 wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEC_HI
) << 8;
4708 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
4709 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
4710 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEB_HI
) << 8;
4711 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
4712 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
4713 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEA_HI
) << 8;
4715 tmp
= I915_READ(DSPFW7
);
4716 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITED
);
4717 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEC
);
4719 tmp
= I915_READ(DSPHOWM
);
4720 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
4721 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
4722 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
4723 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEB_HI
) << 8;
4724 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
4725 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
4726 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEA_HI
) << 8;
4733 void vlv_wm_get_hw_state(struct drm_device
*dev
)
4735 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4736 struct vlv_wm_values
*wm
= &dev_priv
->wm
.vlv
;
4737 struct intel_crtc
*crtc
;
4740 vlv_read_wm_values(dev_priv
, wm
);
4742 wm
->cxsr
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
4743 wm
->level
= VLV_WM_LEVEL_PM2
;
4745 if (IS_CHERRYVIEW(dev_priv
)) {
4746 mutex_lock(&dev_priv
->rps
.hw_lock
);
4748 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
4749 if (val
& DSP_MAXFIFO_PM5_ENABLE
)
4750 wm
->level
= VLV_WM_LEVEL_PM5
;
4753 * If DDR DVFS is disabled in the BIOS, Punit
4754 * will never ack the request. So if that happens
4755 * assume we don't have to enable/disable DDR DVFS
4756 * dynamically. To test that just set the REQ_ACK
4757 * bit to poke the Punit, but don't change the
4758 * HIGH/LOW bits so that we don't actually change
4759 * the current state.
4761 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
4762 val
|= FORCE_DDR_FREQ_REQ_ACK
;
4763 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
4765 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
4766 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3)) {
4767 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4768 "assuming DDR DVFS is disabled\n");
4769 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM5
;
4771 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
4772 if ((val
& FORCE_DDR_HIGH_FREQ
) == 0)
4773 wm
->level
= VLV_WM_LEVEL_DDR_DVFS
;
4776 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4779 for_each_intel_crtc(dev
, crtc
) {
4780 struct intel_crtc_state
*crtc_state
=
4781 to_intel_crtc_state(crtc
->base
.state
);
4782 struct vlv_wm_state
*active
= &crtc
->wm
.active
.vlv
;
4783 const struct vlv_fifo_state
*fifo_state
=
4784 &crtc_state
->wm
.vlv
.fifo_state
;
4785 enum pipe pipe
= crtc
->pipe
;
4786 enum plane_id plane_id
;
4789 vlv_get_fifo_size(crtc_state
);
4791 active
->num_levels
= wm
->level
+ 1;
4792 active
->cxsr
= wm
->cxsr
;
4794 for (level
= 0; level
< active
->num_levels
; level
++) {
4795 struct vlv_pipe_wm
*raw
=
4796 &crtc_state
->wm
.vlv
.raw
[level
];
4798 active
->sr
[level
].plane
= wm
->sr
.plane
;
4799 active
->sr
[level
].cursor
= wm
->sr
.cursor
;
4801 for_each_plane_id_on_crtc(crtc
, plane_id
) {
4802 active
->wm
[level
].plane
[plane_id
] =
4803 wm
->pipe
[pipe
].plane
[plane_id
];
4805 raw
->plane
[plane_id
] =
4806 vlv_invert_wm_value(active
->wm
[level
].plane
[plane_id
],
4807 fifo_state
->plane
[plane_id
]);
4811 for_each_plane_id_on_crtc(crtc
, plane_id
)
4812 vlv_raw_plane_wm_set(crtc_state
, level
,
4813 plane_id
, USHRT_MAX
);
4814 vlv_invalidate_wms(crtc
, active
, level
);
4816 crtc_state
->wm
.vlv
.optimal
= *active
;
4817 crtc_state
->wm
.vlv
.intermediate
= *active
;
4819 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4821 wm
->pipe
[pipe
].plane
[PLANE_PRIMARY
],
4822 wm
->pipe
[pipe
].plane
[PLANE_CURSOR
],
4823 wm
->pipe
[pipe
].plane
[PLANE_SPRITE0
],
4824 wm
->pipe
[pipe
].plane
[PLANE_SPRITE1
]);
4827 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4828 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->level
, wm
->cxsr
);
4831 void vlv_wm_sanitize(struct drm_i915_private
*dev_priv
)
4833 struct intel_plane
*plane
;
4834 struct intel_crtc
*crtc
;
4836 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4838 for_each_intel_plane(&dev_priv
->drm
, plane
) {
4839 struct intel_crtc
*crtc
=
4840 intel_get_crtc_for_pipe(dev_priv
, plane
->pipe
);
4841 struct intel_crtc_state
*crtc_state
=
4842 to_intel_crtc_state(crtc
->base
.state
);
4843 struct intel_plane_state
*plane_state
=
4844 to_intel_plane_state(plane
->base
.state
);
4845 struct vlv_wm_state
*wm_state
= &crtc_state
->wm
.vlv
.optimal
;
4846 const struct vlv_fifo_state
*fifo_state
=
4847 &crtc_state
->wm
.vlv
.fifo_state
;
4848 enum plane_id plane_id
= plane
->id
;
4851 if (plane_state
->base
.visible
)
4854 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
4855 struct vlv_pipe_wm
*raw
=
4856 &crtc_state
->wm
.vlv
.raw
[level
];
4858 raw
->plane
[plane_id
] = 0;
4860 wm_state
->wm
[level
].plane
[plane_id
] =
4861 vlv_invert_wm_value(raw
->plane
[plane_id
],
4862 fifo_state
->plane
[plane_id
]);
4866 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
4867 struct intel_crtc_state
*crtc_state
=
4868 to_intel_crtc_state(crtc
->base
.state
);
4870 crtc_state
->wm
.vlv
.intermediate
=
4871 crtc_state
->wm
.vlv
.optimal
;
4872 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.optimal
;
4875 vlv_program_watermarks(dev_priv
);
4877 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4880 void ilk_wm_get_hw_state(struct drm_device
*dev
)
4882 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4883 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
4884 struct drm_crtc
*crtc
;
4886 for_each_crtc(dev
, crtc
)
4887 ilk_pipe_wm_get_hw_state(crtc
);
4889 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
4890 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
4891 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
4893 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
4894 if (INTEL_GEN(dev_priv
) >= 7) {
4895 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
4896 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
4899 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
4900 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
4901 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
4902 else if (IS_IVYBRIDGE(dev_priv
))
4903 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
4904 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
4907 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
4911 * intel_update_watermarks - update FIFO watermark values based on current modes
4913 * Calculate watermark values for the various WM regs based on current mode
4914 * and plane configuration.
4916 * There are several cases to deal with here:
4917 * - normal (i.e. non-self-refresh)
4918 * - self-refresh (SR) mode
4919 * - lines are large relative to FIFO size (buffer can hold up to 2)
4920 * - lines are small relative to FIFO size (buffer can hold more than 2
4921 * lines), so need to account for TLB latency
4923 * The normal calculation is:
4924 * watermark = dotclock * bytes per pixel * latency
4925 * where latency is platform & configuration dependent (we assume pessimal
4928 * The SR calculation is:
4929 * watermark = (trunc(latency/line time)+1) * surface width *
4932 * line time = htotal / dotclock
4933 * surface width = hdisplay for normal plane and 64 for cursor
4934 * and latency is assumed to be high, as above.
4936 * The final value programmed to the register should always be rounded up,
4937 * and include an extra 2 entries to account for clock crossings.
4939 * We don't use the sprite, so we can ignore that. And on Crestline we have
4940 * to set the non-SR watermarks to 8.
4942 void intel_update_watermarks(struct intel_crtc
*crtc
)
4944 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4946 if (dev_priv
->display
.update_wm
)
4947 dev_priv
->display
.update_wm(crtc
);
4951 * Lock protecting IPS related data structures
4953 DEFINE_SPINLOCK(mchdev_lock
);
4955 /* Global for IPS driver to get at the current i915 device. Protected by
4957 static struct drm_i915_private
*i915_mch_dev
;
4959 bool ironlake_set_drps(struct drm_i915_private
*dev_priv
, u8 val
)
4963 lockdep_assert_held(&mchdev_lock
);
4965 rgvswctl
= I915_READ16(MEMSWCTL
);
4966 if (rgvswctl
& MEMCTL_CMD_STS
) {
4967 DRM_DEBUG("gpu busy, RCS change rejected\n");
4968 return false; /* still busy with another command */
4971 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
4972 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
4973 I915_WRITE16(MEMSWCTL
, rgvswctl
);
4974 POSTING_READ16(MEMSWCTL
);
4976 rgvswctl
|= MEMCTL_CMD_STS
;
4977 I915_WRITE16(MEMSWCTL
, rgvswctl
);
4982 static void ironlake_enable_drps(struct drm_i915_private
*dev_priv
)
4985 u8 fmax
, fmin
, fstart
, vstart
;
4987 spin_lock_irq(&mchdev_lock
);
4989 rgvmodectl
= I915_READ(MEMMODECTL
);
4991 /* Enable temp reporting */
4992 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
4993 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
4995 /* 100ms RC evaluation intervals */
4996 I915_WRITE(RCUPEI
, 100000);
4997 I915_WRITE(RCDNEI
, 100000);
4999 /* Set max/min thresholds to 90ms and 80ms respectively */
5000 I915_WRITE(RCBMAXAVG
, 90000);
5001 I915_WRITE(RCBMINAVG
, 80000);
5003 I915_WRITE(MEMIHYST
, 1);
5005 /* Set up min, max, and cur for interrupt handling */
5006 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
5007 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
5008 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
5009 MEMMODE_FSTART_SHIFT
;
5011 vstart
= (I915_READ(PXVFREQ(fstart
)) & PXVFREQ_PX_MASK
) >>
5014 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
5015 dev_priv
->ips
.fstart
= fstart
;
5017 dev_priv
->ips
.max_delay
= fstart
;
5018 dev_priv
->ips
.min_delay
= fmin
;
5019 dev_priv
->ips
.cur_delay
= fstart
;
5021 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5022 fmax
, fmin
, fstart
);
5024 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
5027 * Interrupts will be enabled in ironlake_irq_postinstall
5030 I915_WRITE(VIDSTART
, vstart
);
5031 POSTING_READ(VIDSTART
);
5033 rgvmodectl
|= MEMMODE_SWMODE_EN
;
5034 I915_WRITE(MEMMODECTL
, rgvmodectl
);
5036 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
5037 DRM_ERROR("stuck trying to change perf mode\n");
5040 ironlake_set_drps(dev_priv
, fstart
);
5042 dev_priv
->ips
.last_count1
= I915_READ(DMIEC
) +
5043 I915_READ(DDREC
) + I915_READ(CSIEC
);
5044 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
5045 dev_priv
->ips
.last_count2
= I915_READ(GFXEC
);
5046 dev_priv
->ips
.last_time2
= ktime_get_raw_ns();
5048 spin_unlock_irq(&mchdev_lock
);
5051 static void ironlake_disable_drps(struct drm_i915_private
*dev_priv
)
5055 spin_lock_irq(&mchdev_lock
);
5057 rgvswctl
= I915_READ16(MEMSWCTL
);
5059 /* Ack interrupts, disable EFC interrupt */
5060 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
5061 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
5062 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
5063 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
5064 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
5066 /* Go back to the starting frequency */
5067 ironlake_set_drps(dev_priv
, dev_priv
->ips
.fstart
);
5069 rgvswctl
|= MEMCTL_CMD_STS
;
5070 I915_WRITE(MEMSWCTL
, rgvswctl
);
5073 spin_unlock_irq(&mchdev_lock
);
5076 /* There's a funny hw issue where the hw returns all 0 when reading from
5077 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
5078 * ourselves, instead of doing a rmw cycle (which might result in us clearing
5079 * all limits and the gpu stuck at whatever frequency it is at atm).
5081 static u32
intel_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
5085 /* Only set the down limit when we've reached the lowest level to avoid
5086 * getting more interrupts, otherwise leave this clear. This prevents a
5087 * race in the hw when coming out of rc6: There's a tiny window where
5088 * the hw runs at the minimal clock before selecting the desired
5089 * frequency, if the down threshold expires in that window we will not
5090 * receive a down interrupt. */
5091 if (IS_GEN9(dev_priv
)) {
5092 limits
= (dev_priv
->rps
.max_freq_softlimit
) << 23;
5093 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
5094 limits
|= (dev_priv
->rps
.min_freq_softlimit
) << 14;
5096 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
5097 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
5098 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
5104 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
5107 u32 threshold_up
= 0, threshold_down
= 0; /* in % */
5108 u32 ei_up
= 0, ei_down
= 0;
5110 new_power
= dev_priv
->rps
.power
;
5111 switch (dev_priv
->rps
.power
) {
5113 if (val
> dev_priv
->rps
.efficient_freq
+ 1 &&
5114 val
> dev_priv
->rps
.cur_freq
)
5115 new_power
= BETWEEN
;
5119 if (val
<= dev_priv
->rps
.efficient_freq
&&
5120 val
< dev_priv
->rps
.cur_freq
)
5121 new_power
= LOW_POWER
;
5122 else if (val
>= dev_priv
->rps
.rp0_freq
&&
5123 val
> dev_priv
->rps
.cur_freq
)
5124 new_power
= HIGH_POWER
;
5128 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 &&
5129 val
< dev_priv
->rps
.cur_freq
)
5130 new_power
= BETWEEN
;
5133 /* Max/min bins are special */
5134 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
5135 new_power
= LOW_POWER
;
5136 if (val
>= dev_priv
->rps
.max_freq_softlimit
)
5137 new_power
= HIGH_POWER
;
5138 if (new_power
== dev_priv
->rps
.power
)
5141 /* Note the units here are not exactly 1us, but 1280ns. */
5142 switch (new_power
) {
5144 /* Upclock if more than 95% busy over 16ms */
5148 /* Downclock if less than 85% busy over 32ms */
5150 threshold_down
= 85;
5154 /* Upclock if more than 90% busy over 13ms */
5158 /* Downclock if less than 75% busy over 32ms */
5160 threshold_down
= 75;
5164 /* Upclock if more than 85% busy over 10ms */
5168 /* Downclock if less than 60% busy over 32ms */
5170 threshold_down
= 60;
5174 /* When byt can survive without system hang with dynamic
5175 * sw freq adjustments, this restriction can be lifted.
5177 if (IS_VALLEYVIEW(dev_priv
))
5180 I915_WRITE(GEN6_RP_UP_EI
,
5181 GT_INTERVAL_FROM_US(dev_priv
, ei_up
));
5182 I915_WRITE(GEN6_RP_UP_THRESHOLD
,
5183 GT_INTERVAL_FROM_US(dev_priv
,
5184 ei_up
* threshold_up
/ 100));
5186 I915_WRITE(GEN6_RP_DOWN_EI
,
5187 GT_INTERVAL_FROM_US(dev_priv
, ei_down
));
5188 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
,
5189 GT_INTERVAL_FROM_US(dev_priv
,
5190 ei_down
* threshold_down
/ 100));
5192 I915_WRITE(GEN6_RP_CONTROL
,
5193 GEN6_RP_MEDIA_TURBO
|
5194 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5195 GEN6_RP_MEDIA_IS_GFX
|
5197 GEN6_RP_UP_BUSY_AVG
|
5198 GEN6_RP_DOWN_IDLE_AVG
);
5201 dev_priv
->rps
.power
= new_power
;
5202 dev_priv
->rps
.up_threshold
= threshold_up
;
5203 dev_priv
->rps
.down_threshold
= threshold_down
;
5204 dev_priv
->rps
.last_adj
= 0;
5207 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
5211 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
5212 if (val
> dev_priv
->rps
.min_freq_softlimit
)
5213 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
5214 if (val
< dev_priv
->rps
.max_freq_softlimit
)
5215 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_UP_THRESHOLD
;
5217 mask
&= dev_priv
->pm_rps_events
;
5219 return gen6_sanitize_rps_pm_mask(dev_priv
, ~mask
);
5222 /* gen6_set_rps is called to update the frequency request, but should also be
5223 * called when the range (min_delay and max_delay) is modified so that we can
5224 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
5225 static int gen6_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
5227 /* min/max delay may still have been modified so be sure to
5228 * write the limits value.
5230 if (val
!= dev_priv
->rps
.cur_freq
) {
5231 gen6_set_rps_thresholds(dev_priv
, val
);
5233 if (IS_GEN9(dev_priv
))
5234 I915_WRITE(GEN6_RPNSWREQ
,
5235 GEN9_FREQUENCY(val
));
5236 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
5237 I915_WRITE(GEN6_RPNSWREQ
,
5238 HSW_FREQUENCY(val
));
5240 I915_WRITE(GEN6_RPNSWREQ
,
5241 GEN6_FREQUENCY(val
) |
5243 GEN6_AGGRESSIVE_TURBO
);
5246 /* Make sure we continue to get interrupts
5247 * until we hit the minimum or maximum frequencies.
5249 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, intel_rps_limits(dev_priv
, val
));
5250 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
5252 dev_priv
->rps
.cur_freq
= val
;
5253 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
5258 static int valleyview_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
5262 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv
) && (val
& 1),
5263 "Odd GPU freq value\n"))
5266 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
5268 if (val
!= dev_priv
->rps
.cur_freq
) {
5269 err
= vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
5273 gen6_set_rps_thresholds(dev_priv
, val
);
5276 dev_priv
->rps
.cur_freq
= val
;
5277 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
5282 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
5284 * * If Gfx is Idle, then
5285 * 1. Forcewake Media well.
5286 * 2. Request idle freq.
5287 * 3. Release Forcewake of Media well.
5289 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
5291 u32 val
= dev_priv
->rps
.idle_freq
;
5294 if (dev_priv
->rps
.cur_freq
<= val
)
5297 /* The punit delays the write of the frequency and voltage until it
5298 * determines the GPU is awake. During normal usage we don't want to
5299 * waste power changing the frequency if the GPU is sleeping (rc6).
5300 * However, the GPU and driver is now idle and we do not want to delay
5301 * switching to minimum voltage (reducing power whilst idle) as we do
5302 * not expect to be woken in the near future and so must flush the
5303 * change by waking the device.
5305 * We choose to take the media powerwell (either would do to trick the
5306 * punit into committing the voltage change) as that takes a lot less
5307 * power than the render powerwell.
5309 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_MEDIA
);
5310 err
= valleyview_set_rps(dev_priv
, val
);
5311 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_MEDIA
);
5314 DRM_ERROR("Failed to set RPS for idle\n");
5317 void gen6_rps_busy(struct drm_i915_private
*dev_priv
)
5319 mutex_lock(&dev_priv
->rps
.hw_lock
);
5320 if (dev_priv
->rps
.enabled
) {
5323 if (dev_priv
->pm_rps_events
& GEN6_PM_RP_UP_EI_EXPIRED
)
5324 gen6_rps_reset_ei(dev_priv
);
5325 I915_WRITE(GEN6_PMINTRMSK
,
5326 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
5328 gen6_enable_rps_interrupts(dev_priv
);
5330 /* Use the user's desired frequency as a guide, but for better
5331 * performance, jump directly to RPe as our starting frequency.
5333 freq
= max(dev_priv
->rps
.cur_freq
,
5334 dev_priv
->rps
.efficient_freq
);
5336 if (intel_set_rps(dev_priv
,
5338 dev_priv
->rps
.min_freq_softlimit
,
5339 dev_priv
->rps
.max_freq_softlimit
)))
5340 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
5342 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5345 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
5347 /* Flush our bottom-half so that it does not race with us
5348 * setting the idle frequency and so that it is bounded by
5349 * our rpm wakeref. And then disable the interrupts to stop any
5350 * futher RPS reclocking whilst we are asleep.
5352 gen6_disable_rps_interrupts(dev_priv
);
5354 mutex_lock(&dev_priv
->rps
.hw_lock
);
5355 if (dev_priv
->rps
.enabled
) {
5356 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
5357 vlv_set_rps_idle(dev_priv
);
5359 gen6_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
5360 dev_priv
->rps
.last_adj
= 0;
5361 I915_WRITE(GEN6_PMINTRMSK
,
5362 gen6_sanitize_rps_pm_mask(dev_priv
, ~0));
5364 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5366 spin_lock(&dev_priv
->rps
.client_lock
);
5367 while (!list_empty(&dev_priv
->rps
.clients
))
5368 list_del_init(dev_priv
->rps
.clients
.next
);
5369 spin_unlock(&dev_priv
->rps
.client_lock
);
5372 void gen6_rps_boost(struct drm_i915_private
*dev_priv
,
5373 struct intel_rps_client
*rps
,
5374 unsigned long submitted
)
5376 /* This is intentionally racy! We peek at the state here, then
5377 * validate inside the RPS worker.
5379 if (!(dev_priv
->gt
.awake
&&
5380 dev_priv
->rps
.enabled
&&
5381 dev_priv
->rps
.cur_freq
< dev_priv
->rps
.boost_freq
))
5384 /* Force a RPS boost (and don't count it against the client) if
5385 * the GPU is severely congested.
5387 if (rps
&& time_after(jiffies
, submitted
+ DRM_I915_THROTTLE_JIFFIES
))
5390 spin_lock(&dev_priv
->rps
.client_lock
);
5391 if (rps
== NULL
|| list_empty(&rps
->link
)) {
5392 spin_lock_irq(&dev_priv
->irq_lock
);
5393 if (dev_priv
->rps
.interrupts_enabled
) {
5394 dev_priv
->rps
.client_boost
= true;
5395 schedule_work(&dev_priv
->rps
.work
);
5397 spin_unlock_irq(&dev_priv
->irq_lock
);
5400 list_add(&rps
->link
, &dev_priv
->rps
.clients
);
5403 dev_priv
->rps
.boosts
++;
5405 spin_unlock(&dev_priv
->rps
.client_lock
);
5408 int intel_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
5412 lockdep_assert_held(&dev_priv
->rps
.hw_lock
);
5413 GEM_BUG_ON(val
> dev_priv
->rps
.max_freq
);
5414 GEM_BUG_ON(val
< dev_priv
->rps
.min_freq
);
5416 if (!dev_priv
->rps
.enabled
) {
5417 dev_priv
->rps
.cur_freq
= val
;
5421 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
5422 err
= valleyview_set_rps(dev_priv
, val
);
5424 err
= gen6_set_rps(dev_priv
, val
);
5429 static void gen9_disable_rc6(struct drm_i915_private
*dev_priv
)
5431 I915_WRITE(GEN6_RC_CONTROL
, 0);
5432 I915_WRITE(GEN9_PG_ENABLE
, 0);
5435 static void gen9_disable_rps(struct drm_i915_private
*dev_priv
)
5437 I915_WRITE(GEN6_RP_CONTROL
, 0);
5440 static void gen6_disable_rps(struct drm_i915_private
*dev_priv
)
5442 I915_WRITE(GEN6_RC_CONTROL
, 0);
5443 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
5444 I915_WRITE(GEN6_RP_CONTROL
, 0);
5447 static void cherryview_disable_rps(struct drm_i915_private
*dev_priv
)
5449 I915_WRITE(GEN6_RC_CONTROL
, 0);
5452 static void valleyview_disable_rps(struct drm_i915_private
*dev_priv
)
5454 /* we're doing forcewake before Disabling RC6,
5455 * This what the BIOS expects when going into suspend */
5456 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5458 I915_WRITE(GEN6_RC_CONTROL
, 0);
5460 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5463 static void intel_print_rc6_info(struct drm_i915_private
*dev_priv
, u32 mode
)
5465 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
5466 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
5467 mode
= GEN6_RC_CTL_RC6_ENABLE
;
5471 if (HAS_RC6p(dev_priv
))
5472 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5473 "RC6 %s RC6p %s RC6pp %s\n",
5474 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
),
5475 onoff(mode
& GEN6_RC_CTL_RC6p_ENABLE
),
5476 onoff(mode
& GEN6_RC_CTL_RC6pp_ENABLE
));
5479 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5480 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
));
5483 static bool bxt_check_bios_rc6_setup(struct drm_i915_private
*dev_priv
)
5485 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
5486 bool enable_rc6
= true;
5487 unsigned long rc6_ctx_base
;
5491 rc_ctl
= I915_READ(GEN6_RC_CONTROL
);
5492 rc_sw_target
= (I915_READ(GEN6_RC_STATE
) & RC_SW_TARGET_STATE_MASK
) >>
5493 RC_SW_TARGET_STATE_SHIFT
;
5494 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5495 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5496 onoff(rc_ctl
& GEN6_RC_CTL_HW_ENABLE
),
5497 onoff(rc_ctl
& GEN6_RC_CTL_RC6_ENABLE
),
5500 if (!(I915_READ(RC6_LOCATION
) & RC6_CTX_IN_DRAM
)) {
5501 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5506 * The exact context size is not known for BXT, so assume a page size
5509 rc6_ctx_base
= I915_READ(RC6_CTX_BASE
) & RC6_CTX_BASE_MASK
;
5510 if (!((rc6_ctx_base
>= ggtt
->stolen_reserved_base
) &&
5511 (rc6_ctx_base
+ PAGE_SIZE
<= ggtt
->stolen_reserved_base
+
5512 ggtt
->stolen_reserved_size
))) {
5513 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5517 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
5518 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0
) & IDLE_TIME_MASK
) > 1) &&
5519 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
5520 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT
) & IDLE_TIME_MASK
) > 1))) {
5521 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5525 if (!I915_READ(GEN8_PUSHBUS_CONTROL
) ||
5526 !I915_READ(GEN8_PUSHBUS_ENABLE
) ||
5527 !I915_READ(GEN8_PUSHBUS_SHIFT
)) {
5528 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5532 if (!I915_READ(GEN6_GFXPAUSE
)) {
5533 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5537 if (!I915_READ(GEN8_MISC_CTRL0
)) {
5538 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5545 int sanitize_rc6_option(struct drm_i915_private
*dev_priv
, int enable_rc6
)
5547 /* No RC6 before Ironlake and code is gone for ilk. */
5548 if (INTEL_INFO(dev_priv
)->gen
< 6)
5554 if (IS_GEN9_LP(dev_priv
) && !bxt_check_bios_rc6_setup(dev_priv
)) {
5555 DRM_INFO("RC6 disabled by BIOS\n");
5559 /* Respect the kernel parameter if it is set */
5560 if (enable_rc6
>= 0) {
5563 if (HAS_RC6p(dev_priv
))
5564 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
5567 mask
= INTEL_RC6_ENABLE
;
5569 if ((enable_rc6
& mask
) != enable_rc6
)
5570 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5571 "(requested %d, valid %d)\n",
5572 enable_rc6
& mask
, enable_rc6
, mask
);
5574 return enable_rc6
& mask
;
5577 if (IS_IVYBRIDGE(dev_priv
))
5578 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
5580 return INTEL_RC6_ENABLE
;
5583 static void gen6_init_rps_frequencies(struct drm_i915_private
*dev_priv
)
5585 /* All of these values are in units of 50MHz */
5587 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5588 if (IS_GEN9_LP(dev_priv
)) {
5589 u32 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
5590 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 16) & 0xff;
5591 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
5592 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 0) & 0xff;
5594 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
5595 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
5596 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
5597 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
5599 /* hw_max = RP0 until we check for overclocking */
5600 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
5602 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
5603 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
) ||
5604 IS_GEN9_BC(dev_priv
)) {
5605 u32 ddcc_status
= 0;
5607 if (sandybridge_pcode_read(dev_priv
,
5608 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL
,
5610 dev_priv
->rps
.efficient_freq
=
5612 ((ddcc_status
>> 8) & 0xff),
5613 dev_priv
->rps
.min_freq
,
5614 dev_priv
->rps
.max_freq
);
5617 if (IS_GEN9_BC(dev_priv
)) {
5618 /* Store the frequency values in 16.66 MHZ units, which is
5619 * the natural hardware unit for SKL
5621 dev_priv
->rps
.rp0_freq
*= GEN9_FREQ_SCALER
;
5622 dev_priv
->rps
.rp1_freq
*= GEN9_FREQ_SCALER
;
5623 dev_priv
->rps
.min_freq
*= GEN9_FREQ_SCALER
;
5624 dev_priv
->rps
.max_freq
*= GEN9_FREQ_SCALER
;
5625 dev_priv
->rps
.efficient_freq
*= GEN9_FREQ_SCALER
;
5629 static void reset_rps(struct drm_i915_private
*dev_priv
,
5630 int (*set
)(struct drm_i915_private
*, u8
))
5632 u8 freq
= dev_priv
->rps
.cur_freq
;
5635 dev_priv
->rps
.power
= -1;
5636 dev_priv
->rps
.cur_freq
= -1;
5638 if (set(dev_priv
, freq
))
5639 DRM_ERROR("Failed to reset RPS to initial values\n");
5642 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5643 static void gen9_enable_rps(struct drm_i915_private
*dev_priv
)
5645 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5647 /* Program defaults and thresholds for RPS*/
5648 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
5649 GEN9_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5651 /* 1 second timeout*/
5652 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
,
5653 GT_INTERVAL_FROM_US(dev_priv
, 1000000));
5655 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 0xa);
5657 /* Leaning on the below call to gen6_set_rps to program/setup the
5658 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5659 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5660 reset_rps(dev_priv
, gen6_set_rps
);
5662 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5665 static void gen9_enable_rc6(struct drm_i915_private
*dev_priv
)
5667 struct intel_engine_cs
*engine
;
5668 enum intel_engine_id id
;
5669 uint32_t rc6_mask
= 0;
5671 /* 1a: Software RC state - RC0 */
5672 I915_WRITE(GEN6_RC_STATE
, 0);
5674 /* 1b: Get forcewake during program sequence. Although the driver
5675 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5676 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5678 /* 2a: Disable RC states. */
5679 I915_WRITE(GEN6_RC_CONTROL
, 0);
5681 /* 2b: Program RC6 thresholds.*/
5683 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5684 if (IS_SKYLAKE(dev_priv
))
5685 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 108 << 16);
5687 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16);
5688 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
5689 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
5690 for_each_engine(engine
, dev_priv
, id
)
5691 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5693 if (HAS_GUC(dev_priv
))
5694 I915_WRITE(GUC_MAX_IDLE_COUNT
, 0xA);
5696 I915_WRITE(GEN6_RC_SLEEP
, 0);
5698 /* 2c: Program Coarse Power Gating Policies. */
5699 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS
, 25);
5700 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS
, 25);
5702 /* 3a: Enable RC6 */
5703 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
5704 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
5705 DRM_INFO("RC6 %s\n", onoff(rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
));
5706 I915_WRITE(GEN6_RC6_THRESHOLD
, 37500); /* 37.5/125ms per EI */
5707 I915_WRITE(GEN6_RC_CONTROL
,
5708 GEN6_RC_CTL_HW_ENABLE
| GEN6_RC_CTL_EI_MODE(1) | rc6_mask
);
5711 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5712 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5714 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv
))
5715 I915_WRITE(GEN9_PG_ENABLE
, 0);
5717 I915_WRITE(GEN9_PG_ENABLE
, (rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
) ?
5718 (GEN9_RENDER_PG_ENABLE
| GEN9_MEDIA_PG_ENABLE
) : 0);
5720 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5723 static void gen8_enable_rps(struct drm_i915_private
*dev_priv
)
5725 struct intel_engine_cs
*engine
;
5726 enum intel_engine_id id
;
5727 uint32_t rc6_mask
= 0;
5729 /* 1a: Software RC state - RC0 */
5730 I915_WRITE(GEN6_RC_STATE
, 0);
5732 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5733 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5734 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5736 /* 2a: Disable RC states. */
5737 I915_WRITE(GEN6_RC_CONTROL
, 0);
5739 /* 2b: Program RC6 thresholds.*/
5740 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
5741 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
5742 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
5743 for_each_engine(engine
, dev_priv
, id
)
5744 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5745 I915_WRITE(GEN6_RC_SLEEP
, 0);
5746 if (IS_BROADWELL(dev_priv
))
5747 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
5749 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
5752 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
5753 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
5754 intel_print_rc6_info(dev_priv
, rc6_mask
);
5755 if (IS_BROADWELL(dev_priv
))
5756 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5757 GEN7_RC_CTL_TO_MODE
|
5760 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5761 GEN6_RC_CTL_EI_MODE(1) |
5764 /* 4 Program defaults and thresholds for RPS*/
5765 I915_WRITE(GEN6_RPNSWREQ
,
5766 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5767 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
5768 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5769 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5770 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 100000000 / 128); /* 1 second timeout */
5772 /* Docs recommend 900MHz, and 300 MHz respectively */
5773 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
5774 dev_priv
->rps
.max_freq_softlimit
<< 24 |
5775 dev_priv
->rps
.min_freq_softlimit
<< 16);
5777 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 7600000 / 128); /* 76ms busyness per EI, 90% */
5778 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5779 I915_WRITE(GEN6_RP_UP_EI
, 66000); /* 84.48ms, XXX: random? */
5780 I915_WRITE(GEN6_RP_DOWN_EI
, 350000); /* 448ms, XXX: random? */
5782 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5785 I915_WRITE(GEN6_RP_CONTROL
,
5786 GEN6_RP_MEDIA_TURBO
|
5787 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5788 GEN6_RP_MEDIA_IS_GFX
|
5790 GEN6_RP_UP_BUSY_AVG
|
5791 GEN6_RP_DOWN_IDLE_AVG
);
5793 /* 6: Ring frequency + overclocking (our driver does this later */
5795 reset_rps(dev_priv
, gen6_set_rps
);
5797 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5800 static void gen6_enable_rps(struct drm_i915_private
*dev_priv
)
5802 struct intel_engine_cs
*engine
;
5803 enum intel_engine_id id
;
5804 u32 rc6vids
, rc6_mask
= 0;
5809 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5811 /* Here begins a magic sequence of register writes to enable
5812 * auto-downclocking.
5814 * Perhaps there might be some value in exposing these to
5817 I915_WRITE(GEN6_RC_STATE
, 0);
5819 /* Clear the DBG now so we don't confuse earlier errors */
5820 gtfifodbg
= I915_READ(GTFIFODBG
);
5822 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
5823 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5826 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5828 /* disable the counters and set deterministic thresholds */
5829 I915_WRITE(GEN6_RC_CONTROL
, 0);
5831 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
5832 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
5833 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
5834 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
5835 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
5837 for_each_engine(engine
, dev_priv
, id
)
5838 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5840 I915_WRITE(GEN6_RC_SLEEP
, 0);
5841 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
5842 if (IS_IVYBRIDGE(dev_priv
))
5843 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
5845 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
5846 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
5847 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
5849 /* Check if we are enabling RC6 */
5850 rc6_mode
= intel_enable_rc6();
5851 if (rc6_mode
& INTEL_RC6_ENABLE
)
5852 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
5854 /* We don't use those on Haswell */
5855 if (!IS_HASWELL(dev_priv
)) {
5856 if (rc6_mode
& INTEL_RC6p_ENABLE
)
5857 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
5859 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
5860 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
5863 intel_print_rc6_info(dev_priv
, rc6_mask
);
5865 I915_WRITE(GEN6_RC_CONTROL
,
5867 GEN6_RC_CTL_EI_MODE(1) |
5868 GEN6_RC_CTL_HW_ENABLE
);
5870 /* Power down if completely idle for over 50ms */
5871 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
5872 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5874 reset_rps(dev_priv
, gen6_set_rps
);
5877 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
5878 if (IS_GEN6(dev_priv
) && ret
) {
5879 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5880 } else if (IS_GEN6(dev_priv
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
5881 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5882 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
5883 rc6vids
&= 0xffff00;
5884 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
5885 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
5887 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5890 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5893 static void gen6_update_ring_freq(struct drm_i915_private
*dev_priv
)
5896 unsigned int gpu_freq
;
5897 unsigned int max_ia_freq
, min_ring_freq
;
5898 unsigned int max_gpu_freq
, min_gpu_freq
;
5899 int scaling_factor
= 180;
5900 struct cpufreq_policy
*policy
;
5902 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5904 policy
= cpufreq_cpu_get(0);
5906 max_ia_freq
= policy
->cpuinfo
.max_freq
;
5907 cpufreq_cpu_put(policy
);
5910 * Default to measured freq if none found, PCU will ensure we
5913 max_ia_freq
= tsc_khz
;
5916 /* Convert from kHz to MHz */
5917 max_ia_freq
/= 1000;
5919 min_ring_freq
= I915_READ(DCLK
) & 0xf;
5920 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5921 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
5923 if (IS_GEN9_BC(dev_priv
)) {
5924 /* Convert GT frequency to 50 HZ units */
5925 min_gpu_freq
= dev_priv
->rps
.min_freq
/ GEN9_FREQ_SCALER
;
5926 max_gpu_freq
= dev_priv
->rps
.max_freq
/ GEN9_FREQ_SCALER
;
5928 min_gpu_freq
= dev_priv
->rps
.min_freq
;
5929 max_gpu_freq
= dev_priv
->rps
.max_freq
;
5933 * For each potential GPU frequency, load a ring frequency we'd like
5934 * to use for memory access. We do this by specifying the IA frequency
5935 * the PCU should use as a reference to determine the ring frequency.
5937 for (gpu_freq
= max_gpu_freq
; gpu_freq
>= min_gpu_freq
; gpu_freq
--) {
5938 int diff
= max_gpu_freq
- gpu_freq
;
5939 unsigned int ia_freq
= 0, ring_freq
= 0;
5941 if (IS_GEN9_BC(dev_priv
)) {
5943 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5944 * No floor required for ring frequency on SKL.
5946 ring_freq
= gpu_freq
;
5947 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
5948 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5949 ring_freq
= max(min_ring_freq
, gpu_freq
);
5950 } else if (IS_HASWELL(dev_priv
)) {
5951 ring_freq
= mult_frac(gpu_freq
, 5, 4);
5952 ring_freq
= max(min_ring_freq
, ring_freq
);
5953 /* leave ia_freq as the default, chosen by cpufreq */
5955 /* On older processors, there is no separate ring
5956 * clock domain, so in order to boost the bandwidth
5957 * of the ring, we need to upclock the CPU (ia_freq).
5959 * For GPU frequencies less than 750MHz,
5960 * just use the lowest ring freq.
5962 if (gpu_freq
< min_freq
)
5965 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
5966 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
5969 sandybridge_pcode_write(dev_priv
,
5970 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
5971 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
5972 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
5977 static int cherryview_rps_max_freq(struct drm_i915_private
*dev_priv
)
5981 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
5983 switch (INTEL_INFO(dev_priv
)->sseu
.eu_total
) {
5985 /* (2 * 4) config */
5986 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT
);
5989 /* (2 * 6) config */
5990 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT
);
5993 /* (2 * 8) config */
5995 /* Setting (2 * 8) Min RP0 for any other combination */
5996 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT
);
6000 rp0
= (rp0
& FB_GFX_FREQ_FUSE_MASK
);
6005 static int cherryview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
6009 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_DUTYCYCLE_REG
);
6010 rpe
= (val
>> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
6015 static int cherryview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
6019 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
6020 rp1
= (val
& FB_GFX_FREQ_FUSE_MASK
);
6025 static u32
cherryview_rps_min_freq(struct drm_i915_private
*dev_priv
)
6029 val
= vlv_punit_read(dev_priv
, FB_GFX_FMIN_AT_VMIN_FUSE
);
6030 rpn
= ((val
>> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT
) &
6031 FB_GFX_FREQ_FUSE_MASK
);
6036 static int valleyview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
6040 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
6042 rp1
= (val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
6047 static int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
6051 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
6053 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
6055 rp0
= min_t(u32
, rp0
, 0xea);
6060 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
6064 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
6065 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
6066 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
6067 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
6072 static int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
6076 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
6078 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
6079 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
6080 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
6081 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
6082 * to make sure it matches what Punit accepts.
6084 return max_t(u32
, val
, 0xc0);
6087 /* Check that the pctx buffer wasn't move under us. */
6088 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
6090 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
6092 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
6093 dev_priv
->vlv_pctx
->stolen
->start
);
6097 /* Check that the pcbr address is not empty. */
6098 static void cherryview_check_pctx(struct drm_i915_private
*dev_priv
)
6100 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
6102 WARN_ON((pctx_addr
>> VLV_PCBR_ADDR_SHIFT
) == 0);
6105 static void cherryview_setup_pctx(struct drm_i915_private
*dev_priv
)
6107 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
6108 unsigned long pctx_paddr
, paddr
;
6110 int pctx_size
= 32*1024;
6112 pcbr
= I915_READ(VLV_PCBR
);
6113 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
6114 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
6115 paddr
= (dev_priv
->mm
.stolen_base
+
6116 (ggtt
->stolen_size
- pctx_size
));
6118 pctx_paddr
= (paddr
& (~4095));
6119 I915_WRITE(VLV_PCBR
, pctx_paddr
);
6122 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
6125 static void valleyview_setup_pctx(struct drm_i915_private
*dev_priv
)
6127 struct drm_i915_gem_object
*pctx
;
6128 unsigned long pctx_paddr
;
6130 int pctx_size
= 24*1024;
6132 pcbr
= I915_READ(VLV_PCBR
);
6134 /* BIOS set it up already, grab the pre-alloc'd space */
6137 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
6138 pctx
= i915_gem_object_create_stolen_for_preallocated(dev_priv
,
6140 I915_GTT_OFFSET_NONE
,
6145 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
6148 * From the Gunit register HAS:
6149 * The Gfx driver is expected to program this register and ensure
6150 * proper allocation within Gfx stolen memory. For example, this
6151 * register should be programmed such than the PCBR range does not
6152 * overlap with other ranges, such as the frame buffer, protected
6153 * memory, or any other relevant ranges.
6155 pctx
= i915_gem_object_create_stolen(dev_priv
, pctx_size
);
6157 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
6161 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
6162 I915_WRITE(VLV_PCBR
, pctx_paddr
);
6165 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
6166 dev_priv
->vlv_pctx
= pctx
;
6169 static void valleyview_cleanup_pctx(struct drm_i915_private
*dev_priv
)
6171 if (WARN_ON(!dev_priv
->vlv_pctx
))
6174 i915_gem_object_put(dev_priv
->vlv_pctx
);
6175 dev_priv
->vlv_pctx
= NULL
;
6178 static void vlv_init_gpll_ref_freq(struct drm_i915_private
*dev_priv
)
6180 dev_priv
->rps
.gpll_ref_freq
=
6181 vlv_get_cck_clock(dev_priv
, "GPLL ref",
6182 CCK_GPLL_CLOCK_CONTROL
,
6183 dev_priv
->czclk_freq
);
6185 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
6186 dev_priv
->rps
.gpll_ref_freq
);
6189 static void valleyview_init_gt_powersave(struct drm_i915_private
*dev_priv
)
6193 valleyview_setup_pctx(dev_priv
);
6195 vlv_init_gpll_ref_freq(dev_priv
);
6197 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
6198 switch ((val
>> 6) & 3) {
6201 dev_priv
->mem_freq
= 800;
6204 dev_priv
->mem_freq
= 1066;
6207 dev_priv
->mem_freq
= 1333;
6210 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
6212 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
6213 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
6214 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
6215 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
6216 dev_priv
->rps
.max_freq
);
6218 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
6219 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
6220 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
6221 dev_priv
->rps
.efficient_freq
);
6223 dev_priv
->rps
.rp1_freq
= valleyview_rps_guar_freq(dev_priv
);
6224 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
6225 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
6226 dev_priv
->rps
.rp1_freq
);
6228 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
6229 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
6230 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
6231 dev_priv
->rps
.min_freq
);
6234 static void cherryview_init_gt_powersave(struct drm_i915_private
*dev_priv
)
6238 cherryview_setup_pctx(dev_priv
);
6240 vlv_init_gpll_ref_freq(dev_priv
);
6242 mutex_lock(&dev_priv
->sb_lock
);
6243 val
= vlv_cck_read(dev_priv
, CCK_FUSE_REG
);
6244 mutex_unlock(&dev_priv
->sb_lock
);
6246 switch ((val
>> 2) & 0x7) {
6248 dev_priv
->mem_freq
= 2000;
6251 dev_priv
->mem_freq
= 1600;
6254 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
6256 dev_priv
->rps
.max_freq
= cherryview_rps_max_freq(dev_priv
);
6257 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
6258 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
6259 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
6260 dev_priv
->rps
.max_freq
);
6262 dev_priv
->rps
.efficient_freq
= cherryview_rps_rpe_freq(dev_priv
);
6263 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
6264 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
6265 dev_priv
->rps
.efficient_freq
);
6267 dev_priv
->rps
.rp1_freq
= cherryview_rps_guar_freq(dev_priv
);
6268 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
6269 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
6270 dev_priv
->rps
.rp1_freq
);
6272 dev_priv
->rps
.min_freq
= cherryview_rps_min_freq(dev_priv
);
6273 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
6274 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
6275 dev_priv
->rps
.min_freq
);
6277 WARN_ONCE((dev_priv
->rps
.max_freq
|
6278 dev_priv
->rps
.efficient_freq
|
6279 dev_priv
->rps
.rp1_freq
|
6280 dev_priv
->rps
.min_freq
) & 1,
6281 "Odd GPU freq values\n");
6284 static void valleyview_cleanup_gt_powersave(struct drm_i915_private
*dev_priv
)
6286 valleyview_cleanup_pctx(dev_priv
);
6289 static void cherryview_enable_rps(struct drm_i915_private
*dev_priv
)
6291 struct intel_engine_cs
*engine
;
6292 enum intel_engine_id id
;
6293 u32 gtfifodbg
, val
, rc6_mode
= 0, pcbr
;
6295 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6297 gtfifodbg
= I915_READ(GTFIFODBG
) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV
|
6298 GT_FIFO_FREE_ENTRIES_CHV
);
6300 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6302 I915_WRITE(GTFIFODBG
, gtfifodbg
);
6305 cherryview_check_pctx(dev_priv
);
6307 /* 1a & 1b: Get forcewake during program sequence. Although the driver
6308 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6309 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6311 /* Disable RC states. */
6312 I915_WRITE(GEN6_RC_CONTROL
, 0);
6314 /* 2a: Program RC6 thresholds.*/
6315 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
6316 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
6317 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
6319 for_each_engine(engine
, dev_priv
, id
)
6320 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6321 I915_WRITE(GEN6_RC_SLEEP
, 0);
6323 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
6324 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x186);
6326 /* allows RC6 residency counter to work */
6327 I915_WRITE(VLV_COUNTER_CONTROL
,
6328 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
6329 VLV_MEDIA_RC6_COUNT_EN
|
6330 VLV_RENDER_RC6_COUNT_EN
));
6332 /* For now we assume BIOS is allocating and populating the PCBR */
6333 pcbr
= I915_READ(VLV_PCBR
);
6336 if ((intel_enable_rc6() & INTEL_RC6_ENABLE
) &&
6337 (pcbr
>> VLV_PCBR_ADDR_SHIFT
))
6338 rc6_mode
= GEN7_RC_CTL_TO_MODE
;
6340 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
6342 /* 4 Program defaults and thresholds for RPS*/
6343 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
6344 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
6345 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
6346 I915_WRITE(GEN6_RP_UP_EI
, 66000);
6347 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
6349 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
6352 I915_WRITE(GEN6_RP_CONTROL
,
6353 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
6354 GEN6_RP_MEDIA_IS_GFX
|
6356 GEN6_RP_UP_BUSY_AVG
|
6357 GEN6_RP_DOWN_IDLE_AVG
);
6359 /* Setting Fixed Bias */
6360 val
= VLV_OVERRIDE_EN
|
6362 CHV_BIAS_CPU_50_SOC_50
;
6363 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
6365 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
6367 /* RPS code assumes GPLL is used */
6368 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
6370 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
6371 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
6373 reset_rps(dev_priv
, valleyview_set_rps
);
6375 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6378 static void valleyview_enable_rps(struct drm_i915_private
*dev_priv
)
6380 struct intel_engine_cs
*engine
;
6381 enum intel_engine_id id
;
6382 u32 gtfifodbg
, val
, rc6_mode
= 0;
6384 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6386 valleyview_check_pctx(dev_priv
);
6388 gtfifodbg
= I915_READ(GTFIFODBG
);
6390 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6392 I915_WRITE(GTFIFODBG
, gtfifodbg
);
6395 /* If VLV, Forcewake all wells, else re-direct to regular path */
6396 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6398 /* Disable RC states. */
6399 I915_WRITE(GEN6_RC_CONTROL
, 0);
6401 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
6402 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
6403 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
6404 I915_WRITE(GEN6_RP_UP_EI
, 66000);
6405 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
6407 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
6409 I915_WRITE(GEN6_RP_CONTROL
,
6410 GEN6_RP_MEDIA_TURBO
|
6411 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
6412 GEN6_RP_MEDIA_IS_GFX
|
6414 GEN6_RP_UP_BUSY_AVG
|
6415 GEN6_RP_DOWN_IDLE_CONT
);
6417 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
6418 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
6419 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
6421 for_each_engine(engine
, dev_priv
, id
)
6422 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6424 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
6426 /* allows RC6 residency counter to work */
6427 I915_WRITE(VLV_COUNTER_CONTROL
,
6428 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
6429 VLV_MEDIA_RC0_COUNT_EN
|
6430 VLV_RENDER_RC0_COUNT_EN
|
6431 VLV_MEDIA_RC6_COUNT_EN
|
6432 VLV_RENDER_RC6_COUNT_EN
));
6434 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
6435 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
6437 intel_print_rc6_info(dev_priv
, rc6_mode
);
6439 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
6441 /* Setting Fixed Bias */
6442 val
= VLV_OVERRIDE_EN
|
6444 VLV_BIAS_CPU_125_SOC_875
;
6445 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
6447 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
6449 /* RPS code assumes GPLL is used */
6450 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
6452 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
6453 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
6455 reset_rps(dev_priv
, valleyview_set_rps
);
6457 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6460 static unsigned long intel_pxfreq(u32 vidfreq
)
6463 int div
= (vidfreq
& 0x3f0000) >> 16;
6464 int post
= (vidfreq
& 0x3000) >> 12;
6465 int pre
= (vidfreq
& 0x7);
6470 freq
= ((div
* 133333) / ((1<<post
) * pre
));
6475 static const struct cparams
{
6481 { 1, 1333, 301, 28664 },
6482 { 1, 1066, 294, 24460 },
6483 { 1, 800, 294, 25192 },
6484 { 0, 1333, 276, 27605 },
6485 { 0, 1066, 276, 27605 },
6486 { 0, 800, 231, 23784 },
6489 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
6491 u64 total_count
, diff
, ret
;
6492 u32 count1
, count2
, count3
, m
= 0, c
= 0;
6493 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
6496 lockdep_assert_held(&mchdev_lock
);
6498 diff1
= now
- dev_priv
->ips
.last_time1
;
6500 /* Prevent division-by-zero if we are asking too fast.
6501 * Also, we don't get interesting results if we are polling
6502 * faster than once in 10ms, so just return the saved value
6506 return dev_priv
->ips
.chipset_power
;
6508 count1
= I915_READ(DMIEC
);
6509 count2
= I915_READ(DDREC
);
6510 count3
= I915_READ(CSIEC
);
6512 total_count
= count1
+ count2
+ count3
;
6514 /* FIXME: handle per-counter overflow */
6515 if (total_count
< dev_priv
->ips
.last_count1
) {
6516 diff
= ~0UL - dev_priv
->ips
.last_count1
;
6517 diff
+= total_count
;
6519 diff
= total_count
- dev_priv
->ips
.last_count1
;
6522 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
6523 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
6524 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
6531 diff
= div_u64(diff
, diff1
);
6532 ret
= ((m
* diff
) + c
);
6533 ret
= div_u64(ret
, 10);
6535 dev_priv
->ips
.last_count1
= total_count
;
6536 dev_priv
->ips
.last_time1
= now
;
6538 dev_priv
->ips
.chipset_power
= ret
;
6543 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
6547 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6550 spin_lock_irq(&mchdev_lock
);
6552 val
= __i915_chipset_val(dev_priv
);
6554 spin_unlock_irq(&mchdev_lock
);
6559 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
6561 unsigned long m
, x
, b
;
6564 tsfs
= I915_READ(TSFS
);
6566 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
6567 x
= I915_READ8(TR1
);
6569 b
= tsfs
& TSFS_INTR_MASK
;
6571 return ((m
* x
) / 127) - b
;
6574 static int _pxvid_to_vd(u8 pxvid
)
6579 if (pxvid
>= 8 && pxvid
< 31)
6582 return (pxvid
+ 2) * 125;
6585 static u32
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
6587 const int vd
= _pxvid_to_vd(pxvid
);
6588 const int vm
= vd
- 1125;
6590 if (INTEL_INFO(dev_priv
)->is_mobile
)
6591 return vm
> 0 ? vm
: 0;
6596 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
6598 u64 now
, diff
, diffms
;
6601 lockdep_assert_held(&mchdev_lock
);
6603 now
= ktime_get_raw_ns();
6604 diffms
= now
- dev_priv
->ips
.last_time2
;
6605 do_div(diffms
, NSEC_PER_MSEC
);
6607 /* Don't divide by 0 */
6611 count
= I915_READ(GFXEC
);
6613 if (count
< dev_priv
->ips
.last_count2
) {
6614 diff
= ~0UL - dev_priv
->ips
.last_count2
;
6617 diff
= count
- dev_priv
->ips
.last_count2
;
6620 dev_priv
->ips
.last_count2
= count
;
6621 dev_priv
->ips
.last_time2
= now
;
6623 /* More magic constants... */
6625 diff
= div_u64(diff
, diffms
* 10);
6626 dev_priv
->ips
.gfx_power
= diff
;
6629 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
6631 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6634 spin_lock_irq(&mchdev_lock
);
6636 __i915_update_gfx_val(dev_priv
);
6638 spin_unlock_irq(&mchdev_lock
);
6641 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
6643 unsigned long t
, corr
, state1
, corr2
, state2
;
6646 lockdep_assert_held(&mchdev_lock
);
6648 pxvid
= I915_READ(PXVFREQ(dev_priv
->rps
.cur_freq
));
6649 pxvid
= (pxvid
>> 24) & 0x7f;
6650 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
6654 t
= i915_mch_val(dev_priv
);
6656 /* Revel in the empirically derived constants */
6658 /* Correction factor in 1/100000 units */
6660 corr
= ((t
* 2349) + 135940);
6662 corr
= ((t
* 964) + 29317);
6664 corr
= ((t
* 301) + 1004);
6666 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
6668 corr2
= (corr
* dev_priv
->ips
.corr
);
6670 state2
= (corr2
* state1
) / 10000;
6671 state2
/= 100; /* convert to mW */
6673 __i915_update_gfx_val(dev_priv
);
6675 return dev_priv
->ips
.gfx_power
+ state2
;
6678 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
6682 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6685 spin_lock_irq(&mchdev_lock
);
6687 val
= __i915_gfx_val(dev_priv
);
6689 spin_unlock_irq(&mchdev_lock
);
6695 * i915_read_mch_val - return value for IPS use
6697 * Calculate and return a value for the IPS driver to use when deciding whether
6698 * we have thermal and power headroom to increase CPU or GPU power budget.
6700 unsigned long i915_read_mch_val(void)
6702 struct drm_i915_private
*dev_priv
;
6703 unsigned long chipset_val
, graphics_val
, ret
= 0;
6705 spin_lock_irq(&mchdev_lock
);
6708 dev_priv
= i915_mch_dev
;
6710 chipset_val
= __i915_chipset_val(dev_priv
);
6711 graphics_val
= __i915_gfx_val(dev_priv
);
6713 ret
= chipset_val
+ graphics_val
;
6716 spin_unlock_irq(&mchdev_lock
);
6720 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
6723 * i915_gpu_raise - raise GPU frequency limit
6725 * Raise the limit; IPS indicates we have thermal headroom.
6727 bool i915_gpu_raise(void)
6729 struct drm_i915_private
*dev_priv
;
6732 spin_lock_irq(&mchdev_lock
);
6733 if (!i915_mch_dev
) {
6737 dev_priv
= i915_mch_dev
;
6739 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
6740 dev_priv
->ips
.max_delay
--;
6743 spin_unlock_irq(&mchdev_lock
);
6747 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
6750 * i915_gpu_lower - lower GPU frequency limit
6752 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6753 * frequency maximum.
6755 bool i915_gpu_lower(void)
6757 struct drm_i915_private
*dev_priv
;
6760 spin_lock_irq(&mchdev_lock
);
6761 if (!i915_mch_dev
) {
6765 dev_priv
= i915_mch_dev
;
6767 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
6768 dev_priv
->ips
.max_delay
++;
6771 spin_unlock_irq(&mchdev_lock
);
6775 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
6778 * i915_gpu_busy - indicate GPU business to IPS
6780 * Tell the IPS driver whether or not the GPU is busy.
6782 bool i915_gpu_busy(void)
6786 spin_lock_irq(&mchdev_lock
);
6788 ret
= i915_mch_dev
->gt
.awake
;
6789 spin_unlock_irq(&mchdev_lock
);
6793 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
6796 * i915_gpu_turbo_disable - disable graphics turbo
6798 * Disable graphics turbo by resetting the max frequency and setting the
6799 * current frequency to the default.
6801 bool i915_gpu_turbo_disable(void)
6803 struct drm_i915_private
*dev_priv
;
6806 spin_lock_irq(&mchdev_lock
);
6807 if (!i915_mch_dev
) {
6811 dev_priv
= i915_mch_dev
;
6813 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
6815 if (!ironlake_set_drps(dev_priv
, dev_priv
->ips
.fstart
))
6819 spin_unlock_irq(&mchdev_lock
);
6823 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
6826 * Tells the intel_ips driver that the i915 driver is now loaded, if
6827 * IPS got loaded first.
6829 * This awkward dance is so that neither module has to depend on the
6830 * other in order for IPS to do the appropriate communication of
6831 * GPU turbo limits to i915.
6834 ips_ping_for_i915_load(void)
6838 link
= symbol_get(ips_link_to_i915_driver
);
6841 symbol_put(ips_link_to_i915_driver
);
6845 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
6847 /* We only register the i915 ips part with intel-ips once everything is
6848 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6849 spin_lock_irq(&mchdev_lock
);
6850 i915_mch_dev
= dev_priv
;
6851 spin_unlock_irq(&mchdev_lock
);
6853 ips_ping_for_i915_load();
6856 void intel_gpu_ips_teardown(void)
6858 spin_lock_irq(&mchdev_lock
);
6859 i915_mch_dev
= NULL
;
6860 spin_unlock_irq(&mchdev_lock
);
6863 static void intel_init_emon(struct drm_i915_private
*dev_priv
)
6869 /* Disable to program */
6873 /* Program energy weights for various events */
6874 I915_WRITE(SDEW
, 0x15040d00);
6875 I915_WRITE(CSIEW0
, 0x007f0000);
6876 I915_WRITE(CSIEW1
, 0x1e220004);
6877 I915_WRITE(CSIEW2
, 0x04000004);
6879 for (i
= 0; i
< 5; i
++)
6880 I915_WRITE(PEW(i
), 0);
6881 for (i
= 0; i
< 3; i
++)
6882 I915_WRITE(DEW(i
), 0);
6884 /* Program P-state weights to account for frequency power adjustment */
6885 for (i
= 0; i
< 16; i
++) {
6886 u32 pxvidfreq
= I915_READ(PXVFREQ(i
));
6887 unsigned long freq
= intel_pxfreq(pxvidfreq
);
6888 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
6893 val
*= (freq
/ 1000);
6895 val
/= (127*127*900);
6897 DRM_ERROR("bad pxval: %ld\n", val
);
6900 /* Render standby states get 0 weight */
6904 for (i
= 0; i
< 4; i
++) {
6905 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
6906 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
6907 I915_WRITE(PXW(i
), val
);
6910 /* Adjust magic regs to magic values (more experimental results) */
6911 I915_WRITE(OGW0
, 0);
6912 I915_WRITE(OGW1
, 0);
6913 I915_WRITE(EG0
, 0x00007f00);
6914 I915_WRITE(EG1
, 0x0000000e);
6915 I915_WRITE(EG2
, 0x000e0000);
6916 I915_WRITE(EG3
, 0x68000300);
6917 I915_WRITE(EG4
, 0x42000000);
6918 I915_WRITE(EG5
, 0x00140031);
6922 for (i
= 0; i
< 8; i
++)
6923 I915_WRITE(PXWL(i
), 0);
6925 /* Enable PMON + select events */
6926 I915_WRITE(ECR
, 0x80000019);
6928 lcfuse
= I915_READ(LCFUSE02
);
6930 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
6933 void intel_init_gt_powersave(struct drm_i915_private
*dev_priv
)
6936 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6939 if (!i915
.enable_rc6
) {
6940 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6941 intel_runtime_pm_get(dev_priv
);
6944 mutex_lock(&dev_priv
->drm
.struct_mutex
);
6945 mutex_lock(&dev_priv
->rps
.hw_lock
);
6947 /* Initialize RPS limits (for userspace) */
6948 if (IS_CHERRYVIEW(dev_priv
))
6949 cherryview_init_gt_powersave(dev_priv
);
6950 else if (IS_VALLEYVIEW(dev_priv
))
6951 valleyview_init_gt_powersave(dev_priv
);
6952 else if (INTEL_GEN(dev_priv
) >= 6)
6953 gen6_init_rps_frequencies(dev_priv
);
6955 /* Derive initial user preferences/limits from the hardware limits */
6956 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
6957 dev_priv
->rps
.cur_freq
= dev_priv
->rps
.idle_freq
;
6959 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
6960 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
6962 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
6963 dev_priv
->rps
.min_freq_softlimit
=
6965 dev_priv
->rps
.efficient_freq
,
6966 intel_freq_opcode(dev_priv
, 450));
6968 /* After setting max-softlimit, find the overclock max freq */
6969 if (IS_GEN6(dev_priv
) ||
6970 IS_IVYBRIDGE(dev_priv
) || IS_HASWELL(dev_priv
)) {
6973 sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, ¶ms
);
6974 if (params
& BIT(31)) { /* OC supported */
6975 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6976 (dev_priv
->rps
.max_freq
& 0xff) * 50,
6977 (params
& 0xff) * 50);
6978 dev_priv
->rps
.max_freq
= params
& 0xff;
6982 /* Finally allow us to boost to max by default */
6983 dev_priv
->rps
.boost_freq
= dev_priv
->rps
.max_freq
;
6985 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6986 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
6988 intel_autoenable_gt_powersave(dev_priv
);
6991 void intel_cleanup_gt_powersave(struct drm_i915_private
*dev_priv
)
6993 if (IS_VALLEYVIEW(dev_priv
))
6994 valleyview_cleanup_gt_powersave(dev_priv
);
6996 if (!i915
.enable_rc6
)
6997 intel_runtime_pm_put(dev_priv
);
7001 * intel_suspend_gt_powersave - suspend PM work and helper threads
7002 * @dev_priv: i915 device
7004 * We don't want to disable RC6 or other features here, we just want
7005 * to make sure any work we've queued has finished and won't bother
7006 * us while we're suspended.
7008 void intel_suspend_gt_powersave(struct drm_i915_private
*dev_priv
)
7010 if (INTEL_GEN(dev_priv
) < 6)
7013 if (cancel_delayed_work_sync(&dev_priv
->rps
.autoenable_work
))
7014 intel_runtime_pm_put(dev_priv
);
7016 /* gen6_rps_idle() will be called later to disable interrupts */
7019 void intel_sanitize_gt_powersave(struct drm_i915_private
*dev_priv
)
7021 dev_priv
->rps
.enabled
= true; /* force disabling */
7022 intel_disable_gt_powersave(dev_priv
);
7024 gen6_reset_rps_interrupts(dev_priv
);
7027 void intel_disable_gt_powersave(struct drm_i915_private
*dev_priv
)
7029 if (!READ_ONCE(dev_priv
->rps
.enabled
))
7032 mutex_lock(&dev_priv
->rps
.hw_lock
);
7034 if (INTEL_GEN(dev_priv
) >= 9) {
7035 gen9_disable_rc6(dev_priv
);
7036 gen9_disable_rps(dev_priv
);
7037 } else if (IS_CHERRYVIEW(dev_priv
)) {
7038 cherryview_disable_rps(dev_priv
);
7039 } else if (IS_VALLEYVIEW(dev_priv
)) {
7040 valleyview_disable_rps(dev_priv
);
7041 } else if (INTEL_GEN(dev_priv
) >= 6) {
7042 gen6_disable_rps(dev_priv
);
7043 } else if (IS_IRONLAKE_M(dev_priv
)) {
7044 ironlake_disable_drps(dev_priv
);
7047 dev_priv
->rps
.enabled
= false;
7048 mutex_unlock(&dev_priv
->rps
.hw_lock
);
7051 void intel_enable_gt_powersave(struct drm_i915_private
*dev_priv
)
7053 /* We shouldn't be disabling as we submit, so this should be less
7054 * racy than it appears!
7056 if (READ_ONCE(dev_priv
->rps
.enabled
))
7059 /* Powersaving is controlled by the host when inside a VM */
7060 if (intel_vgpu_active(dev_priv
))
7063 mutex_lock(&dev_priv
->rps
.hw_lock
);
7065 if (IS_CHERRYVIEW(dev_priv
)) {
7066 cherryview_enable_rps(dev_priv
);
7067 } else if (IS_VALLEYVIEW(dev_priv
)) {
7068 valleyview_enable_rps(dev_priv
);
7069 } else if (INTEL_GEN(dev_priv
) >= 9) {
7070 gen9_enable_rc6(dev_priv
);
7071 gen9_enable_rps(dev_priv
);
7072 if (IS_GEN9_BC(dev_priv
))
7073 gen6_update_ring_freq(dev_priv
);
7074 } else if (IS_BROADWELL(dev_priv
)) {
7075 gen8_enable_rps(dev_priv
);
7076 gen6_update_ring_freq(dev_priv
);
7077 } else if (INTEL_GEN(dev_priv
) >= 6) {
7078 gen6_enable_rps(dev_priv
);
7079 gen6_update_ring_freq(dev_priv
);
7080 } else if (IS_IRONLAKE_M(dev_priv
)) {
7081 ironlake_enable_drps(dev_priv
);
7082 intel_init_emon(dev_priv
);
7085 WARN_ON(dev_priv
->rps
.max_freq
< dev_priv
->rps
.min_freq
);
7086 WARN_ON(dev_priv
->rps
.idle_freq
> dev_priv
->rps
.max_freq
);
7088 WARN_ON(dev_priv
->rps
.efficient_freq
< dev_priv
->rps
.min_freq
);
7089 WARN_ON(dev_priv
->rps
.efficient_freq
> dev_priv
->rps
.max_freq
);
7091 dev_priv
->rps
.enabled
= true;
7092 mutex_unlock(&dev_priv
->rps
.hw_lock
);
7095 static void __intel_autoenable_gt_powersave(struct work_struct
*work
)
7097 struct drm_i915_private
*dev_priv
=
7098 container_of(work
, typeof(*dev_priv
), rps
.autoenable_work
.work
);
7099 struct intel_engine_cs
*rcs
;
7100 struct drm_i915_gem_request
*req
;
7102 if (READ_ONCE(dev_priv
->rps
.enabled
))
7105 rcs
= dev_priv
->engine
[RCS
];
7106 if (rcs
->last_retired_context
)
7109 if (!rcs
->init_context
)
7112 mutex_lock(&dev_priv
->drm
.struct_mutex
);
7114 req
= i915_gem_request_alloc(rcs
, dev_priv
->kernel_context
);
7118 if (!i915
.enable_execlists
&& i915_switch_context(req
) == 0)
7119 rcs
->init_context(req
);
7121 /* Mark the device busy, calling intel_enable_gt_powersave() */
7122 i915_add_request(req
);
7125 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
7127 intel_runtime_pm_put(dev_priv
);
7130 void intel_autoenable_gt_powersave(struct drm_i915_private
*dev_priv
)
7132 if (READ_ONCE(dev_priv
->rps
.enabled
))
7135 if (IS_IRONLAKE_M(dev_priv
)) {
7136 ironlake_enable_drps(dev_priv
);
7137 intel_init_emon(dev_priv
);
7138 } else if (INTEL_INFO(dev_priv
)->gen
>= 6) {
7140 * PCU communication is slow and this doesn't need to be
7141 * done at any specific time, so do this out of our fast path
7142 * to make resume and init faster.
7144 * We depend on the HW RC6 power context save/restore
7145 * mechanism when entering D3 through runtime PM suspend. So
7146 * disable RPM until RPS/RC6 is properly setup. We can only
7147 * get here via the driver load/system resume/runtime resume
7148 * paths, so the _noresume version is enough (and in case of
7149 * runtime resume it's necessary).
7151 if (queue_delayed_work(dev_priv
->wq
,
7152 &dev_priv
->rps
.autoenable_work
,
7153 round_jiffies_up_relative(HZ
)))
7154 intel_runtime_pm_get_noresume(dev_priv
);
7158 static void ibx_init_clock_gating(struct drm_i915_private
*dev_priv
)
7161 * On Ibex Peak and Cougar Point, we need to disable clock
7162 * gating for the panel power sequencer or it will fail to
7163 * start up when no ports are active.
7165 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
7168 static void g4x_disable_trickle_feed(struct drm_i915_private
*dev_priv
)
7172 for_each_pipe(dev_priv
, pipe
) {
7173 I915_WRITE(DSPCNTR(pipe
),
7174 I915_READ(DSPCNTR(pipe
)) |
7175 DISPPLANE_TRICKLE_FEED_DISABLE
);
7177 I915_WRITE(DSPSURF(pipe
), I915_READ(DSPSURF(pipe
)));
7178 POSTING_READ(DSPSURF(pipe
));
7182 static void ilk_init_lp_watermarks(struct drm_i915_private
*dev_priv
)
7184 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
7185 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
7186 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
7189 * Don't touch WM1S_LP_EN here.
7190 * Doing so could cause underruns.
7194 static void ironlake_init_clock_gating(struct drm_i915_private
*dev_priv
)
7196 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
7200 * WaFbcDisableDpfcClockGating:ilk
7202 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
7203 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
7204 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
7206 I915_WRITE(PCH_3DCGDIS0
,
7207 MARIUNIT_CLOCK_GATE_DISABLE
|
7208 SVSMUNIT_CLOCK_GATE_DISABLE
);
7209 I915_WRITE(PCH_3DCGDIS1
,
7210 VFMUNIT_CLOCK_GATE_DISABLE
);
7213 * According to the spec the following bits should be set in
7214 * order to enable memory self-refresh
7215 * The bit 22/21 of 0x42004
7216 * The bit 5 of 0x42020
7217 * The bit 15 of 0x45000
7219 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7220 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
7221 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
7222 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
7223 I915_WRITE(DISP_ARB_CTL
,
7224 (I915_READ(DISP_ARB_CTL
) |
7227 ilk_init_lp_watermarks(dev_priv
);
7230 * Based on the document from hardware guys the following bits
7231 * should be set unconditionally in order to enable FBC.
7232 * The bit 22 of 0x42000
7233 * The bit 22 of 0x42004
7234 * The bit 7,8,9 of 0x42020.
7236 if (IS_IRONLAKE_M(dev_priv
)) {
7237 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
7238 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
7239 I915_READ(ILK_DISPLAY_CHICKEN1
) |
7241 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7242 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7246 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
7248 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7249 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7250 ILK_ELPIN_409_SELECT
);
7251 I915_WRITE(_3D_CHICKEN2
,
7252 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
7253 _3D_CHICKEN2_WM_READ_PIPELINED
);
7255 /* WaDisableRenderCachePipelinedFlush:ilk */
7256 I915_WRITE(CACHE_MODE_0
,
7257 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
7259 /* WaDisable_RenderCache_OperationalFlush:ilk */
7260 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7262 g4x_disable_trickle_feed(dev_priv
);
7264 ibx_init_clock_gating(dev_priv
);
7267 static void cpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
7273 * On Ibex Peak and Cougar Point, we need to disable clock
7274 * gating for the panel power sequencer or it will fail to
7275 * start up when no ports are active.
7277 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
7278 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
7279 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
7280 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
7281 DPLS_EDP_PPS_FIX_DIS
);
7282 /* The below fixes the weird display corruption, a few pixels shifted
7283 * downward, on (only) LVDS of some HP laptops with IVY.
7285 for_each_pipe(dev_priv
, pipe
) {
7286 val
= I915_READ(TRANS_CHICKEN2(pipe
));
7287 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
7288 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
7289 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
7290 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
7291 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
7292 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
7293 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
7294 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
7296 /* WADP0ClockGatingDisable */
7297 for_each_pipe(dev_priv
, pipe
) {
7298 I915_WRITE(TRANS_CHICKEN1(pipe
),
7299 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
7303 static void gen6_check_mch_setup(struct drm_i915_private
*dev_priv
)
7307 tmp
= I915_READ(MCH_SSKPD
);
7308 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
7309 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7313 static void gen6_init_clock_gating(struct drm_i915_private
*dev_priv
)
7315 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
7317 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
7319 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7320 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7321 ILK_ELPIN_409_SELECT
);
7323 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
7324 I915_WRITE(_3D_CHICKEN
,
7325 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
7327 /* WaDisable_RenderCache_OperationalFlush:snb */
7328 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7331 * BSpec recoomends 8x4 when MSAA is used,
7332 * however in practice 16x4 seems fastest.
7334 * Note that PS/WM thread counts depend on the WIZ hashing
7335 * disable bit, which we don't touch here, but it's good
7336 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7338 I915_WRITE(GEN6_GT_MODE
,
7339 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7341 ilk_init_lp_watermarks(dev_priv
);
7343 I915_WRITE(CACHE_MODE_0
,
7344 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
7346 I915_WRITE(GEN6_UCGCTL1
,
7347 I915_READ(GEN6_UCGCTL1
) |
7348 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
7349 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7351 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7352 * gating disable must be set. Failure to set it results in
7353 * flickering pixels due to Z write ordering failures after
7354 * some amount of runtime in the Mesa "fire" demo, and Unigine
7355 * Sanctuary and Tropics, and apparently anything else with
7356 * alpha test or pixel discard.
7358 * According to the spec, bit 11 (RCCUNIT) must also be set,
7359 * but we didn't debug actual testcases to find it out.
7361 * WaDisableRCCUnitClockGating:snb
7362 * WaDisableRCPBUnitClockGating:snb
7364 I915_WRITE(GEN6_UCGCTL2
,
7365 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
7366 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
7368 /* WaStripsFansDisableFastClipPerformanceFix:snb */
7369 I915_WRITE(_3D_CHICKEN3
,
7370 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
7374 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7375 * 3DSTATE_SF number of SF output attributes is more than 16."
7377 I915_WRITE(_3D_CHICKEN3
,
7378 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
7381 * According to the spec the following bits should be
7382 * set in order to enable memory self-refresh and fbc:
7383 * The bit21 and bit22 of 0x42000
7384 * The bit21 and bit22 of 0x42004
7385 * The bit5 and bit7 of 0x42020
7386 * The bit14 of 0x70180
7387 * The bit14 of 0x71180
7389 * WaFbcAsynchFlipDisableFbcQueue:snb
7391 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
7392 I915_READ(ILK_DISPLAY_CHICKEN1
) |
7393 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
7394 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7395 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7396 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
7397 I915_WRITE(ILK_DSPCLK_GATE_D
,
7398 I915_READ(ILK_DSPCLK_GATE_D
) |
7399 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
7400 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
7402 g4x_disable_trickle_feed(dev_priv
);
7404 cpt_init_clock_gating(dev_priv
);
7406 gen6_check_mch_setup(dev_priv
);
7409 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
7411 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
7414 * WaVSThreadDispatchOverride:ivb,vlv
7416 * This actually overrides the dispatch
7417 * mode for all thread types.
7419 reg
&= ~GEN7_FF_SCHED_MASK
;
7420 reg
|= GEN7_FF_TS_SCHED_HW
;
7421 reg
|= GEN7_FF_VS_SCHED_HW
;
7422 reg
|= GEN7_FF_DS_SCHED_HW
;
7424 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
7427 static void lpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
7430 * TODO: this bit should only be enabled when really needed, then
7431 * disabled when not needed anymore in order to save power.
7433 if (HAS_PCH_LPT_LP(dev_priv
))
7434 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
7435 I915_READ(SOUTH_DSPCLK_GATE_D
) |
7436 PCH_LP_PARTITION_LEVEL_DISABLE
);
7438 /* WADPOClockGatingDisable:hsw */
7439 I915_WRITE(TRANS_CHICKEN1(PIPE_A
),
7440 I915_READ(TRANS_CHICKEN1(PIPE_A
)) |
7441 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
7444 static void lpt_suspend_hw(struct drm_i915_private
*dev_priv
)
7446 if (HAS_PCH_LPT_LP(dev_priv
)) {
7447 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
7449 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
7450 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
7454 static void gen8_set_l3sqc_credits(struct drm_i915_private
*dev_priv
,
7455 int general_prio_credits
,
7456 int high_prio_credits
)
7460 /* WaTempDisableDOPClkGating:bdw */
7461 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
7462 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
7464 I915_WRITE(GEN8_L3SQCREG1
,
7465 L3_GENERAL_PRIO_CREDITS(general_prio_credits
) |
7466 L3_HIGH_PRIO_CREDITS(high_prio_credits
));
7469 * Wait at least 100 clocks before re-enabling clock gating.
7470 * See the definition of L3SQCREG1 in BSpec.
7472 POSTING_READ(GEN8_L3SQCREG1
);
7474 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
7477 static void kabylake_init_clock_gating(struct drm_i915_private
*dev_priv
)
7479 gen9_init_clock_gating(dev_priv
);
7481 /* WaDisableSDEUnitClockGating:kbl */
7482 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
7483 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7484 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7486 /* WaDisableGamClockGating:kbl */
7487 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
7488 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7489 GEN6_GAMUNIT_CLOCK_GATE_DISABLE
);
7491 /* WaFbcNukeOnHostModify:kbl */
7492 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
7493 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
7496 static void skylake_init_clock_gating(struct drm_i915_private
*dev_priv
)
7498 gen9_init_clock_gating(dev_priv
);
7500 /* WAC6entrylatency:skl */
7501 I915_WRITE(FBC_LLC_READ_CTRL
, I915_READ(FBC_LLC_READ_CTRL
) |
7502 FBC_LLC_FULLY_OPEN
);
7504 /* WaFbcNukeOnHostModify:skl */
7505 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
7506 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
7509 static void broadwell_init_clock_gating(struct drm_i915_private
*dev_priv
)
7513 ilk_init_lp_watermarks(dev_priv
);
7515 /* WaSwitchSolVfFArbitrationPriority:bdw */
7516 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
7518 /* WaPsrDPAMaskVBlankInSRD:bdw */
7519 I915_WRITE(CHICKEN_PAR1_1
,
7520 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
7522 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7523 for_each_pipe(dev_priv
, pipe
) {
7524 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
7525 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
7526 BDW_DPRS_MASK_VBLANK_SRD
);
7529 /* WaVSRefCountFullforceMissDisable:bdw */
7530 /* WaDSRefCountFullforceMissDisable:bdw */
7531 I915_WRITE(GEN7_FF_THREAD_MODE
,
7532 I915_READ(GEN7_FF_THREAD_MODE
) &
7533 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7535 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7536 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7538 /* WaDisableSDEUnitClockGating:bdw */
7539 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7540 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7542 /* WaProgramL3SqcReg1Default:bdw */
7543 gen8_set_l3sqc_credits(dev_priv
, 30, 2);
7546 * WaGttCachingOffByDefault:bdw
7547 * GTT cache may not work with big pages, so if those
7548 * are ever enabled GTT cache may need to be disabled.
7550 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
7552 /* WaKVMNotificationOnConfigChange:bdw */
7553 I915_WRITE(CHICKEN_PAR2_1
, I915_READ(CHICKEN_PAR2_1
)
7554 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT
);
7556 lpt_init_clock_gating(dev_priv
);
7558 /* WaDisableDopClockGating:bdw
7560 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
7563 I915_WRITE(GEN6_UCGCTL1
,
7564 I915_READ(GEN6_UCGCTL1
) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE
);
7567 static void haswell_init_clock_gating(struct drm_i915_private
*dev_priv
)
7569 ilk_init_lp_watermarks(dev_priv
);
7571 /* L3 caching of data atomics doesn't work -- disable it. */
7572 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
7573 I915_WRITE(HSW_ROW_CHICKEN3
,
7574 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
7576 /* This is required by WaCatErrorRejectionIssue:hsw */
7577 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7578 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7579 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7581 /* WaVSRefCountFullforceMissDisable:hsw */
7582 I915_WRITE(GEN7_FF_THREAD_MODE
,
7583 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
7585 /* WaDisable_RenderCache_OperationalFlush:hsw */
7586 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7588 /* enable HiZ Raw Stall Optimization */
7589 I915_WRITE(CACHE_MODE_0_GEN7
,
7590 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
7592 /* WaDisable4x2SubspanOptimization:hsw */
7593 I915_WRITE(CACHE_MODE_1
,
7594 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7597 * BSpec recommends 8x4 when MSAA is used,
7598 * however in practice 16x4 seems fastest.
7600 * Note that PS/WM thread counts depend on the WIZ hashing
7601 * disable bit, which we don't touch here, but it's good
7602 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7604 I915_WRITE(GEN7_GT_MODE
,
7605 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7607 /* WaSampleCChickenBitEnable:hsw */
7608 I915_WRITE(HALF_SLICE_CHICKEN3
,
7609 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE
));
7611 /* WaSwitchSolVfFArbitrationPriority:hsw */
7612 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
7614 /* WaRsPkgCStateDisplayPMReq:hsw */
7615 I915_WRITE(CHICKEN_PAR1_1
,
7616 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
7618 lpt_init_clock_gating(dev_priv
);
7621 static void ivybridge_init_clock_gating(struct drm_i915_private
*dev_priv
)
7625 ilk_init_lp_watermarks(dev_priv
);
7627 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
7629 /* WaDisableEarlyCull:ivb */
7630 I915_WRITE(_3D_CHICKEN3
,
7631 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
7633 /* WaDisableBackToBackFlipFix:ivb */
7634 I915_WRITE(IVB_CHICKEN3
,
7635 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
7636 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
7638 /* WaDisablePSDDualDispatchEnable:ivb */
7639 if (IS_IVB_GT1(dev_priv
))
7640 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
7641 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
7643 /* WaDisable_RenderCache_OperationalFlush:ivb */
7644 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7646 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7647 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
7648 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
7650 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7651 I915_WRITE(GEN7_L3CNTLREG1
,
7652 GEN7_WA_FOR_GEN7_L3_CONTROL
);
7653 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
7654 GEN7_WA_L3_CHICKEN_MODE
);
7655 if (IS_IVB_GT1(dev_priv
))
7656 I915_WRITE(GEN7_ROW_CHICKEN2
,
7657 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7659 /* must write both registers */
7660 I915_WRITE(GEN7_ROW_CHICKEN2
,
7661 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7662 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
7663 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7666 /* WaForceL3Serialization:ivb */
7667 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
7668 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7671 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7672 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7674 I915_WRITE(GEN6_UCGCTL2
,
7675 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7677 /* This is required by WaCatErrorRejectionIssue:ivb */
7678 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7679 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7680 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7682 g4x_disable_trickle_feed(dev_priv
);
7684 gen7_setup_fixed_func_scheduler(dev_priv
);
7686 if (0) { /* causes HiZ corruption on ivb:gt1 */
7687 /* enable HiZ Raw Stall Optimization */
7688 I915_WRITE(CACHE_MODE_0_GEN7
,
7689 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
7692 /* WaDisable4x2SubspanOptimization:ivb */
7693 I915_WRITE(CACHE_MODE_1
,
7694 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7697 * BSpec recommends 8x4 when MSAA is used,
7698 * however in practice 16x4 seems fastest.
7700 * Note that PS/WM thread counts depend on the WIZ hashing
7701 * disable bit, which we don't touch here, but it's good
7702 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7704 I915_WRITE(GEN7_GT_MODE
,
7705 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7707 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
7708 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
7709 snpcr
|= GEN6_MBC_SNPCR_MED
;
7710 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
7712 if (!HAS_PCH_NOP(dev_priv
))
7713 cpt_init_clock_gating(dev_priv
);
7715 gen6_check_mch_setup(dev_priv
);
7718 static void valleyview_init_clock_gating(struct drm_i915_private
*dev_priv
)
7720 /* WaDisableEarlyCull:vlv */
7721 I915_WRITE(_3D_CHICKEN3
,
7722 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
7724 /* WaDisableBackToBackFlipFix:vlv */
7725 I915_WRITE(IVB_CHICKEN3
,
7726 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
7727 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
7729 /* WaPsdDispatchEnable:vlv */
7730 /* WaDisablePSDDualDispatchEnable:vlv */
7731 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
7732 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
7733 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
7735 /* WaDisable_RenderCache_OperationalFlush:vlv */
7736 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7738 /* WaForceL3Serialization:vlv */
7739 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
7740 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7742 /* WaDisableDopClockGating:vlv */
7743 I915_WRITE(GEN7_ROW_CHICKEN2
,
7744 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7746 /* This is required by WaCatErrorRejectionIssue:vlv */
7747 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7748 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7749 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7751 gen7_setup_fixed_func_scheduler(dev_priv
);
7754 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7755 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7757 I915_WRITE(GEN6_UCGCTL2
,
7758 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7760 /* WaDisableL3Bank2xClockGate:vlv
7761 * Disabling L3 clock gating- MMIO 940c[25] = 1
7762 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7763 I915_WRITE(GEN7_UCGCTL4
,
7764 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
7767 * BSpec says this must be set, even though
7768 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7770 I915_WRITE(CACHE_MODE_1
,
7771 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7774 * BSpec recommends 8x4 when MSAA is used,
7775 * however in practice 16x4 seems fastest.
7777 * Note that PS/WM thread counts depend on the WIZ hashing
7778 * disable bit, which we don't touch here, but it's good
7779 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7781 I915_WRITE(GEN7_GT_MODE
,
7782 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7785 * WaIncreaseL3CreditsForVLVB0:vlv
7786 * This is the hardware default actually.
7788 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
7791 * WaDisableVLVClockGating_VBIIssue:vlv
7792 * Disable clock gating on th GCFG unit to prevent a delay
7793 * in the reporting of vblank events.
7795 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
7798 static void cherryview_init_clock_gating(struct drm_i915_private
*dev_priv
)
7800 /* WaVSRefCountFullforceMissDisable:chv */
7801 /* WaDSRefCountFullforceMissDisable:chv */
7802 I915_WRITE(GEN7_FF_THREAD_MODE
,
7803 I915_READ(GEN7_FF_THREAD_MODE
) &
7804 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7806 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7807 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7808 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7810 /* WaDisableCSUnitClockGating:chv */
7811 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7812 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7814 /* WaDisableSDEUnitClockGating:chv */
7815 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7816 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7819 * WaProgramL3SqcReg1Default:chv
7820 * See gfxspecs/Related Documents/Performance Guide/
7821 * LSQC Setting Recommendations.
7823 gen8_set_l3sqc_credits(dev_priv
, 38, 2);
7826 * GTT cache may not work with big pages, so if those
7827 * are ever enabled GTT cache may need to be disabled.
7829 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
7832 static void g4x_init_clock_gating(struct drm_i915_private
*dev_priv
)
7834 uint32_t dspclk_gate
;
7836 I915_WRITE(RENCLK_GATE_D1
, 0);
7837 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
7838 GS_UNIT_CLOCK_GATE_DISABLE
|
7839 CL_UNIT_CLOCK_GATE_DISABLE
);
7840 I915_WRITE(RAMCLK_GATE_D
, 0);
7841 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
7842 OVRUNIT_CLOCK_GATE_DISABLE
|
7843 OVCUNIT_CLOCK_GATE_DISABLE
;
7844 if (IS_GM45(dev_priv
))
7845 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
7846 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
7848 /* WaDisableRenderCachePipelinedFlush */
7849 I915_WRITE(CACHE_MODE_0
,
7850 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
7852 /* WaDisable_RenderCache_OperationalFlush:g4x */
7853 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7855 g4x_disable_trickle_feed(dev_priv
);
7858 static void crestline_init_clock_gating(struct drm_i915_private
*dev_priv
)
7860 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
7861 I915_WRITE(RENCLK_GATE_D2
, 0);
7862 I915_WRITE(DSPCLK_GATE_D
, 0);
7863 I915_WRITE(RAMCLK_GATE_D
, 0);
7864 I915_WRITE16(DEUC
, 0);
7865 I915_WRITE(MI_ARB_STATE
,
7866 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7868 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7869 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7872 static void broadwater_init_clock_gating(struct drm_i915_private
*dev_priv
)
7874 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
7875 I965_RCC_CLOCK_GATE_DISABLE
|
7876 I965_RCPB_CLOCK_GATE_DISABLE
|
7877 I965_ISC_CLOCK_GATE_DISABLE
|
7878 I965_FBC_CLOCK_GATE_DISABLE
);
7879 I915_WRITE(RENCLK_GATE_D2
, 0);
7880 I915_WRITE(MI_ARB_STATE
,
7881 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7883 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7884 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7887 static void gen3_init_clock_gating(struct drm_i915_private
*dev_priv
)
7889 u32 dstate
= I915_READ(D_STATE
);
7891 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
7892 DSTATE_DOT_CLOCK_GATING
;
7893 I915_WRITE(D_STATE
, dstate
);
7895 if (IS_PINEVIEW(dev_priv
))
7896 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
7898 /* IIR "flip pending" means done if this bit is set */
7899 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
7901 /* interrupts should cause a wake up from C3 */
7902 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
7904 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7905 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
7907 I915_WRITE(MI_ARB_STATE
,
7908 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7911 static void i85x_init_clock_gating(struct drm_i915_private
*dev_priv
)
7913 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
7915 /* interrupts should cause a wake up from C3 */
7916 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
7917 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
7919 I915_WRITE(MEM_MODE
,
7920 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
7923 static void i830_init_clock_gating(struct drm_i915_private
*dev_priv
)
7925 I915_WRITE(MEM_MODE
,
7926 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
7927 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
7930 void intel_init_clock_gating(struct drm_i915_private
*dev_priv
)
7932 dev_priv
->display
.init_clock_gating(dev_priv
);
7935 void intel_suspend_hw(struct drm_i915_private
*dev_priv
)
7937 if (HAS_PCH_LPT(dev_priv
))
7938 lpt_suspend_hw(dev_priv
);
7941 static void nop_init_clock_gating(struct drm_i915_private
*dev_priv
)
7943 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7947 * intel_init_clock_gating_hooks - setup the clock gating hooks
7948 * @dev_priv: device private
7950 * Setup the hooks that configure which clocks of a given platform can be
7951 * gated and also apply various GT and display specific workarounds for these
7952 * platforms. Note that some GT specific workarounds are applied separately
7953 * when GPU contexts or batchbuffers start their execution.
7955 void intel_init_clock_gating_hooks(struct drm_i915_private
*dev_priv
)
7957 if (IS_SKYLAKE(dev_priv
))
7958 dev_priv
->display
.init_clock_gating
= skylake_init_clock_gating
;
7959 else if (IS_KABYLAKE(dev_priv
))
7960 dev_priv
->display
.init_clock_gating
= kabylake_init_clock_gating
;
7961 else if (IS_BROXTON(dev_priv
))
7962 dev_priv
->display
.init_clock_gating
= bxt_init_clock_gating
;
7963 else if (IS_GEMINILAKE(dev_priv
))
7964 dev_priv
->display
.init_clock_gating
= glk_init_clock_gating
;
7965 else if (IS_BROADWELL(dev_priv
))
7966 dev_priv
->display
.init_clock_gating
= broadwell_init_clock_gating
;
7967 else if (IS_CHERRYVIEW(dev_priv
))
7968 dev_priv
->display
.init_clock_gating
= cherryview_init_clock_gating
;
7969 else if (IS_HASWELL(dev_priv
))
7970 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
7971 else if (IS_IVYBRIDGE(dev_priv
))
7972 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
7973 else if (IS_VALLEYVIEW(dev_priv
))
7974 dev_priv
->display
.init_clock_gating
= valleyview_init_clock_gating
;
7975 else if (IS_GEN6(dev_priv
))
7976 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
7977 else if (IS_GEN5(dev_priv
))
7978 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
7979 else if (IS_G4X(dev_priv
))
7980 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
7981 else if (IS_I965GM(dev_priv
))
7982 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
7983 else if (IS_I965G(dev_priv
))
7984 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
7985 else if (IS_GEN3(dev_priv
))
7986 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7987 else if (IS_I85X(dev_priv
) || IS_I865G(dev_priv
))
7988 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
7989 else if (IS_GEN2(dev_priv
))
7990 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
7992 MISSING_CASE(INTEL_DEVID(dev_priv
));
7993 dev_priv
->display
.init_clock_gating
= nop_init_clock_gating
;
7997 /* Set up chip specific power management-related functions */
7998 void intel_init_pm(struct drm_i915_private
*dev_priv
)
8000 intel_fbc_init(dev_priv
);
8003 if (IS_PINEVIEW(dev_priv
))
8004 i915_pineview_get_mem_freq(dev_priv
);
8005 else if (IS_GEN5(dev_priv
))
8006 i915_ironlake_get_mem_freq(dev_priv
);
8008 /* For FIFO watermark updates */
8009 if (INTEL_GEN(dev_priv
) >= 9) {
8010 skl_setup_wm_latency(dev_priv
);
8011 dev_priv
->display
.initial_watermarks
= skl_initial_wm
;
8012 dev_priv
->display
.atomic_update_watermarks
= skl_atomic_update_crtc_wm
;
8013 dev_priv
->display
.compute_global_watermarks
= skl_compute_wm
;
8014 } else if (HAS_PCH_SPLIT(dev_priv
)) {
8015 ilk_setup_wm_latency(dev_priv
);
8017 if ((IS_GEN5(dev_priv
) && dev_priv
->wm
.pri_latency
[1] &&
8018 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
8019 (!IS_GEN5(dev_priv
) && dev_priv
->wm
.pri_latency
[0] &&
8020 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
8021 dev_priv
->display
.compute_pipe_wm
= ilk_compute_pipe_wm
;
8022 dev_priv
->display
.compute_intermediate_wm
=
8023 ilk_compute_intermediate_wm
;
8024 dev_priv
->display
.initial_watermarks
=
8025 ilk_initial_watermarks
;
8026 dev_priv
->display
.optimize_watermarks
=
8027 ilk_optimize_watermarks
;
8029 DRM_DEBUG_KMS("Failed to read display plane latency. "
8032 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
8033 vlv_setup_wm_latency(dev_priv
);
8034 dev_priv
->display
.compute_pipe_wm
= vlv_compute_pipe_wm
;
8035 dev_priv
->display
.compute_intermediate_wm
= vlv_compute_intermediate_wm
;
8036 dev_priv
->display
.initial_watermarks
= vlv_initial_watermarks
;
8037 dev_priv
->display
.optimize_watermarks
= vlv_optimize_watermarks
;
8038 dev_priv
->display
.atomic_update_watermarks
= vlv_atomic_update_fifo
;
8039 } else if (IS_PINEVIEW(dev_priv
)) {
8040 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv
),
8043 dev_priv
->mem_freq
)) {
8044 DRM_INFO("failed to find known CxSR latency "
8045 "(found ddr%s fsb freq %d, mem freq %d), "
8047 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
8048 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
8049 /* Disable CxSR and never update its watermark again */
8050 intel_set_memory_cxsr(dev_priv
, false);
8051 dev_priv
->display
.update_wm
= NULL
;
8053 dev_priv
->display
.update_wm
= pineview_update_wm
;
8054 } else if (IS_G4X(dev_priv
)) {
8055 dev_priv
->display
.update_wm
= g4x_update_wm
;
8056 } else if (IS_GEN4(dev_priv
)) {
8057 dev_priv
->display
.update_wm
= i965_update_wm
;
8058 } else if (IS_GEN3(dev_priv
)) {
8059 dev_priv
->display
.update_wm
= i9xx_update_wm
;
8060 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
8061 } else if (IS_GEN2(dev_priv
)) {
8062 if (INTEL_INFO(dev_priv
)->num_pipes
== 1) {
8063 dev_priv
->display
.update_wm
= i845_update_wm
;
8064 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
8066 dev_priv
->display
.update_wm
= i9xx_update_wm
;
8067 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
8070 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
8074 static inline int gen6_check_mailbox_status(struct drm_i915_private
*dev_priv
)
8077 I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_ERROR_MASK
;
8080 case GEN6_PCODE_SUCCESS
:
8082 case GEN6_PCODE_UNIMPLEMENTED_CMD
:
8083 case GEN6_PCODE_ILLEGAL_CMD
:
8085 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
8086 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
8088 case GEN6_PCODE_TIMEOUT
:
8091 MISSING_CASE(flags
);
8096 static inline int gen7_check_mailbox_status(struct drm_i915_private
*dev_priv
)
8099 I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_ERROR_MASK
;
8102 case GEN6_PCODE_SUCCESS
:
8104 case GEN6_PCODE_ILLEGAL_CMD
:
8106 case GEN7_PCODE_TIMEOUT
:
8108 case GEN7_PCODE_ILLEGAL_DATA
:
8110 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
8113 MISSING_CASE(flags
);
8118 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u32 mbox
, u32
*val
)
8122 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
8124 /* GEN6_PCODE_* are outside of the forcewake domain, we can
8125 * use te fw I915_READ variants to reduce the amount of work
8126 * required when reading/writing.
8129 if (I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
8130 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
8134 I915_WRITE_FW(GEN6_PCODE_DATA
, *val
);
8135 I915_WRITE_FW(GEN6_PCODE_DATA1
, 0);
8136 I915_WRITE_FW(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
8138 if (intel_wait_for_register_fw(dev_priv
,
8139 GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
, 0,
8141 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
8145 *val
= I915_READ_FW(GEN6_PCODE_DATA
);
8146 I915_WRITE_FW(GEN6_PCODE_DATA
, 0);
8148 if (INTEL_GEN(dev_priv
) > 6)
8149 status
= gen7_check_mailbox_status(dev_priv
);
8151 status
= gen6_check_mailbox_status(dev_priv
);
8154 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
8162 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
,
8167 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
8169 /* GEN6_PCODE_* are outside of the forcewake domain, we can
8170 * use te fw I915_READ variants to reduce the amount of work
8171 * required when reading/writing.
8174 if (I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
8175 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
8179 I915_WRITE_FW(GEN6_PCODE_DATA
, val
);
8180 I915_WRITE_FW(GEN6_PCODE_DATA1
, 0);
8181 I915_WRITE_FW(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
8183 if (intel_wait_for_register_fw(dev_priv
,
8184 GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
, 0,
8186 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
8190 I915_WRITE_FW(GEN6_PCODE_DATA
, 0);
8192 if (INTEL_GEN(dev_priv
) > 6)
8193 status
= gen7_check_mailbox_status(dev_priv
);
8195 status
= gen6_check_mailbox_status(dev_priv
);
8198 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
8206 static bool skl_pcode_try_request(struct drm_i915_private
*dev_priv
, u32 mbox
,
8207 u32 request
, u32 reply_mask
, u32 reply
,
8212 *status
= sandybridge_pcode_read(dev_priv
, mbox
, &val
);
8214 return *status
|| ((val
& reply_mask
) == reply
);
8218 * skl_pcode_request - send PCODE request until acknowledgment
8219 * @dev_priv: device private
8220 * @mbox: PCODE mailbox ID the request is targeted for
8221 * @request: request ID
8222 * @reply_mask: mask used to check for request acknowledgment
8223 * @reply: value used to check for request acknowledgment
8224 * @timeout_base_ms: timeout for polling with preemption enabled
8226 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
8227 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
8228 * The request is acknowledged once the PCODE reply dword equals @reply after
8229 * applying @reply_mask. Polling is first attempted with preemption enabled
8230 * for @timeout_base_ms and if this times out for another 50 ms with
8231 * preemption disabled.
8233 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
8234 * other error as reported by PCODE.
8236 int skl_pcode_request(struct drm_i915_private
*dev_priv
, u32 mbox
, u32 request
,
8237 u32 reply_mask
, u32 reply
, int timeout_base_ms
)
8242 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
8244 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
8248 * Prime the PCODE by doing a request first. Normally it guarantees
8249 * that a subsequent request, at most @timeout_base_ms later, succeeds.
8250 * _wait_for() doesn't guarantee when its passed condition is evaluated
8251 * first, so send the first request explicitly.
8257 ret
= _wait_for(COND
, timeout_base_ms
* 1000, 10);
8262 * The above can time out if the number of requests was low (2 in the
8263 * worst case) _and_ PCODE was busy for some reason even after a
8264 * (queued) request and @timeout_base_ms delay. As a workaround retry
8265 * the poll with preemption disabled to maximize the number of
8266 * requests. Increase the timeout from @timeout_base_ms to 50ms to
8267 * account for interrupts that could reduce the number of these
8268 * requests, and for any quirks of the PCODE firmware that delays
8269 * the request completion.
8271 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
8272 WARN_ON_ONCE(timeout_base_ms
> 3);
8274 ret
= wait_for_atomic(COND
, 50);
8278 return ret
? ret
: status
;
8282 static int byt_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
8286 * Slow = Fast = GPLL ref * N
8288 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* (val
- 0xb7), 1000);
8291 static int byt_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
8293 return DIV_ROUND_CLOSEST(1000 * val
, dev_priv
->rps
.gpll_ref_freq
) + 0xb7;
8296 static int chv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
8300 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
8302 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* val
, 2 * 2 * 1000);
8305 static int chv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
8307 /* CHV needs even values */
8308 return DIV_ROUND_CLOSEST(2 * 1000 * val
, dev_priv
->rps
.gpll_ref_freq
) * 2;
8311 int intel_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
8313 if (IS_GEN9(dev_priv
))
8314 return DIV_ROUND_CLOSEST(val
* GT_FREQUENCY_MULTIPLIER
,
8316 else if (IS_CHERRYVIEW(dev_priv
))
8317 return chv_gpu_freq(dev_priv
, val
);
8318 else if (IS_VALLEYVIEW(dev_priv
))
8319 return byt_gpu_freq(dev_priv
, val
);
8321 return val
* GT_FREQUENCY_MULTIPLIER
;
8324 int intel_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
8326 if (IS_GEN9(dev_priv
))
8327 return DIV_ROUND_CLOSEST(val
* GEN9_FREQ_SCALER
,
8328 GT_FREQUENCY_MULTIPLIER
);
8329 else if (IS_CHERRYVIEW(dev_priv
))
8330 return chv_freq_opcode(dev_priv
, val
);
8331 else if (IS_VALLEYVIEW(dev_priv
))
8332 return byt_freq_opcode(dev_priv
, val
);
8334 return DIV_ROUND_CLOSEST(val
, GT_FREQUENCY_MULTIPLIER
);
8337 struct request_boost
{
8338 struct work_struct work
;
8339 struct drm_i915_gem_request
*req
;
8342 static void __intel_rps_boost_work(struct work_struct
*work
)
8344 struct request_boost
*boost
= container_of(work
, struct request_boost
, work
);
8345 struct drm_i915_gem_request
*req
= boost
->req
;
8347 if (!i915_gem_request_completed(req
))
8348 gen6_rps_boost(req
->i915
, NULL
, req
->emitted_jiffies
);
8350 i915_gem_request_put(req
);
8354 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request
*req
)
8356 struct request_boost
*boost
;
8358 if (req
== NULL
|| INTEL_GEN(req
->i915
) < 6)
8361 if (i915_gem_request_completed(req
))
8364 boost
= kmalloc(sizeof(*boost
), GFP_ATOMIC
);
8368 boost
->req
= i915_gem_request_get(req
);
8370 INIT_WORK(&boost
->work
, __intel_rps_boost_work
);
8371 queue_work(req
->i915
->wq
, &boost
->work
);
8374 void intel_pm_setup(struct drm_i915_private
*dev_priv
)
8376 mutex_init(&dev_priv
->rps
.hw_lock
);
8377 spin_lock_init(&dev_priv
->rps
.client_lock
);
8379 INIT_DELAYED_WORK(&dev_priv
->rps
.autoenable_work
,
8380 __intel_autoenable_gt_powersave
);
8381 INIT_LIST_HEAD(&dev_priv
->rps
.clients
);
8383 dev_priv
->pm
.suspended
= false;
8384 atomic_set(&dev_priv
->pm
.wakeref_count
, 0);
8387 static u64
vlv_residency_raw(struct drm_i915_private
*dev_priv
,
8388 const i915_reg_t reg
)
8390 u32 lower
, upper
, tmp
;
8393 /* The register accessed do not need forcewake. We borrow
8394 * uncore lock to prevent concurrent access to range reg.
8396 spin_lock_irq(&dev_priv
->uncore
.lock
);
8398 /* vlv and chv residency counters are 40 bits in width.
8399 * With a control bit, we can choose between upper or lower
8400 * 32bit window into this counter.
8402 * Although we always use the counter in high-range mode elsewhere,
8403 * userspace may attempt to read the value before rc6 is initialised,
8404 * before we have set the default VLV_COUNTER_CONTROL value. So always
8405 * set the high bit to be safe.
8407 I915_WRITE_FW(VLV_COUNTER_CONTROL
,
8408 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
));
8409 upper
= I915_READ_FW(reg
);
8413 I915_WRITE_FW(VLV_COUNTER_CONTROL
,
8414 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH
));
8415 lower
= I915_READ_FW(reg
);
8417 I915_WRITE_FW(VLV_COUNTER_CONTROL
,
8418 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
));
8419 upper
= I915_READ_FW(reg
);
8420 } while (upper
!= tmp
&& --loop
);
8422 /* Everywhere else we always use VLV_COUNTER_CONTROL with the
8423 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
8427 spin_unlock_irq(&dev_priv
->uncore
.lock
);
8429 return lower
| (u64
)upper
<< 8;
8432 u64
intel_rc6_residency_us(struct drm_i915_private
*dev_priv
,
8433 const i915_reg_t reg
)
8435 u64 time_hw
, units
, div
;
8437 if (!intel_enable_rc6())
8440 intel_runtime_pm_get(dev_priv
);
8442 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
8443 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
8445 div
= dev_priv
->czclk_freq
;
8447 time_hw
= vlv_residency_raw(dev_priv
, reg
);
8448 } else if (IS_GEN9_LP(dev_priv
)) {
8450 div
= 1200; /* 833.33ns */
8452 time_hw
= I915_READ(reg
);
8454 units
= 128000; /* 1.28us */
8457 time_hw
= I915_READ(reg
);
8460 intel_runtime_pm_put(dev_priv
);
8461 return DIV_ROUND_UP_ULL(time_hw
* units
, div
);