2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_i915_private
*dev_priv
)
61 if (HAS_LLC(dev_priv
)) {
63 * WaCompressedResourceDisplayNewHashMode:skl,kbl
64 * Display WA#0390: skl,kbl
66 * Must match Sampler, Pixel Back End, and Media. See
67 * WaCompressedResourceSamplerPbeMediaNewHashMode.
69 I915_WRITE(CHICKEN_PAR1_1
,
70 I915_READ(CHICKEN_PAR1_1
) |
71 SKL_DE_COMPRESSED_HASH_MODE
);
74 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
75 I915_WRITE(CHICKEN_PAR1_1
,
76 I915_READ(CHICKEN_PAR1_1
) | SKL_EDP_PSR_FIX_RDWRAP
);
78 I915_WRITE(GEN8_CONFIG0
,
79 I915_READ(GEN8_CONFIG0
) | GEN9_DEFAULT_FIXES
);
81 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
82 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
83 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
85 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
86 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
87 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
89 DISP_FBC_MEMORY_WAKE
);
91 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
92 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
93 ILK_DPFC_DISABLE_DUMMY0
);
95 if (IS_SKYLAKE(dev_priv
)) {
96 /* WaDisableDopClockGating */
97 I915_WRITE(GEN7_MISCCPCTL
, I915_READ(GEN7_MISCCPCTL
)
98 & ~GEN7_DOP_CLOCK_GATE_ENABLE
);
102 static void bxt_init_clock_gating(struct drm_i915_private
*dev_priv
)
104 gen9_init_clock_gating(dev_priv
);
106 /* WaDisableSDEUnitClockGating:bxt */
107 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
108 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
112 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
114 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
115 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ
);
118 * Wa: Backlight PWM may stop in the asserted state, causing backlight
121 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
122 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
125 static void glk_init_clock_gating(struct drm_i915_private
*dev_priv
)
128 gen9_init_clock_gating(dev_priv
);
131 * WaDisablePWMClockGating:glk
132 * Backlight PWM may stop in the asserted state, causing backlight
135 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
136 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
138 /* WaDDIIOTimeout:glk */
139 if (IS_GLK_REVID(dev_priv
, 0, GLK_REVID_A1
)) {
140 u32 val
= I915_READ(CHICKEN_MISC_2
);
141 val
&= ~(GLK_CL0_PWR_DOWN
|
144 I915_WRITE(CHICKEN_MISC_2
, val
);
147 /* Display WA #1133: WaFbcSkipSegments:glk */
148 val
= I915_READ(ILK_DPFC_CHICKEN
);
149 val
&= ~GLK_SKIP_SEG_COUNT_MASK
;
150 val
|= GLK_SKIP_SEG_EN
| GLK_SKIP_SEG_COUNT(1);
151 I915_WRITE(ILK_DPFC_CHICKEN
, val
);
154 static void i915_pineview_get_mem_freq(struct drm_i915_private
*dev_priv
)
158 tmp
= I915_READ(CLKCFG
);
160 switch (tmp
& CLKCFG_FSB_MASK
) {
162 dev_priv
->fsb_freq
= 533; /* 133*4 */
165 dev_priv
->fsb_freq
= 800; /* 200*4 */
168 dev_priv
->fsb_freq
= 667; /* 167*4 */
171 dev_priv
->fsb_freq
= 400; /* 100*4 */
175 switch (tmp
& CLKCFG_MEM_MASK
) {
177 dev_priv
->mem_freq
= 533;
180 dev_priv
->mem_freq
= 667;
183 dev_priv
->mem_freq
= 800;
187 /* detect pineview DDR3 setting */
188 tmp
= I915_READ(CSHRDDR3CTL
);
189 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
192 static void i915_ironlake_get_mem_freq(struct drm_i915_private
*dev_priv
)
196 ddrpll
= I915_READ16(DDRMPLL1
);
197 csipll
= I915_READ16(CSIPLL0
);
199 switch (ddrpll
& 0xff) {
201 dev_priv
->mem_freq
= 800;
204 dev_priv
->mem_freq
= 1066;
207 dev_priv
->mem_freq
= 1333;
210 dev_priv
->mem_freq
= 1600;
213 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
215 dev_priv
->mem_freq
= 0;
219 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
221 switch (csipll
& 0x3ff) {
223 dev_priv
->fsb_freq
= 3200;
226 dev_priv
->fsb_freq
= 3733;
229 dev_priv
->fsb_freq
= 4266;
232 dev_priv
->fsb_freq
= 4800;
235 dev_priv
->fsb_freq
= 5333;
238 dev_priv
->fsb_freq
= 5866;
241 dev_priv
->fsb_freq
= 6400;
244 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
246 dev_priv
->fsb_freq
= 0;
250 if (dev_priv
->fsb_freq
== 3200) {
251 dev_priv
->ips
.c_m
= 0;
252 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
253 dev_priv
->ips
.c_m
= 1;
255 dev_priv
->ips
.c_m
= 2;
259 static const struct cxsr_latency cxsr_latency_table
[] = {
260 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
261 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
262 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
263 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
264 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
266 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
267 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
268 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
269 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
270 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
272 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
273 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
274 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
275 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
276 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
278 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
279 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
280 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
281 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
282 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
284 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
285 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
286 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
287 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
288 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
290 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
291 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
292 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
293 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
294 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
297 static const struct cxsr_latency
*intel_get_cxsr_latency(bool is_desktop
,
302 const struct cxsr_latency
*latency
;
305 if (fsb
== 0 || mem
== 0)
308 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
309 latency
= &cxsr_latency_table
[i
];
310 if (is_desktop
== latency
->is_desktop
&&
311 is_ddr3
== latency
->is_ddr3
&&
312 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
316 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
321 static void chv_set_memory_dvfs(struct drm_i915_private
*dev_priv
, bool enable
)
325 mutex_lock(&dev_priv
->rps
.hw_lock
);
327 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
329 val
&= ~FORCE_DDR_HIGH_FREQ
;
331 val
|= FORCE_DDR_HIGH_FREQ
;
332 val
&= ~FORCE_DDR_LOW_FREQ
;
333 val
|= FORCE_DDR_FREQ_REQ_ACK
;
334 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
336 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
337 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3))
338 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
340 mutex_unlock(&dev_priv
->rps
.hw_lock
);
343 static void chv_set_memory_pm5(struct drm_i915_private
*dev_priv
, bool enable
)
347 mutex_lock(&dev_priv
->rps
.hw_lock
);
349 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
351 val
|= DSP_MAXFIFO_PM5_ENABLE
;
353 val
&= ~DSP_MAXFIFO_PM5_ENABLE
;
354 vlv_punit_write(dev_priv
, PUNIT_REG_DSPFREQ
, val
);
356 mutex_unlock(&dev_priv
->rps
.hw_lock
);
359 #define FW_WM(value, plane) \
360 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
362 static bool _intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
367 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
368 was_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
369 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
370 POSTING_READ(FW_BLC_SELF_VLV
);
371 } else if (IS_G4X(dev_priv
) || IS_I965GM(dev_priv
)) {
372 was_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
373 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
374 POSTING_READ(FW_BLC_SELF
);
375 } else if (IS_PINEVIEW(dev_priv
)) {
376 val
= I915_READ(DSPFW3
);
377 was_enabled
= val
& PINEVIEW_SELF_REFRESH_EN
;
379 val
|= PINEVIEW_SELF_REFRESH_EN
;
381 val
&= ~PINEVIEW_SELF_REFRESH_EN
;
382 I915_WRITE(DSPFW3
, val
);
383 POSTING_READ(DSPFW3
);
384 } else if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
)) {
385 was_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
386 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
387 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
388 I915_WRITE(FW_BLC_SELF
, val
);
389 POSTING_READ(FW_BLC_SELF
);
390 } else if (IS_I915GM(dev_priv
)) {
392 * FIXME can't find a bit like this for 915G, and
393 * and yet it does have the related watermark in
394 * FW_BLC_SELF. What's going on?
396 was_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
397 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
398 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
399 I915_WRITE(INSTPM
, val
);
400 POSTING_READ(INSTPM
);
405 trace_intel_memory_cxsr(dev_priv
, was_enabled
, enable
);
407 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
408 enableddisabled(enable
),
409 enableddisabled(was_enabled
));
415 * intel_set_memory_cxsr - Configure CxSR state
416 * @dev_priv: i915 device
417 * @enable: Allow vs. disallow CxSR
419 * Allow or disallow the system to enter a special CxSR
420 * (C-state self refresh) state. What typically happens in CxSR mode
421 * is that several display FIFOs may get combined into a single larger
422 * FIFO for a particular plane (so called max FIFO mode) to allow the
423 * system to defer memory fetches longer, and the memory will enter
426 * Note that enabling CxSR does not guarantee that the system enter
427 * this special mode, nor does it guarantee that the system stays
428 * in that mode once entered. So this just allows/disallows the system
429 * to autonomously utilize the CxSR mode. Other factors such as core
430 * C-states will affect when/if the system actually enters/exits the
433 * Note that on VLV/CHV this actually only controls the max FIFO mode,
434 * and the system is free to enter/exit memory self refresh at any time
435 * even when the use of CxSR has been disallowed.
437 * While the system is actually in the CxSR/max FIFO mode, some plane
438 * control registers will not get latched on vblank. Thus in order to
439 * guarantee the system will respond to changes in the plane registers
440 * we must always disallow CxSR prior to making changes to those registers.
441 * Unfortunately the system will re-evaluate the CxSR conditions at
442 * frame start which happens after vblank start (which is when the plane
443 * registers would get latched), so we can't proceed with the plane update
444 * during the same frame where we disallowed CxSR.
446 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
447 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
448 * the hardware w.r.t. HPLL SR when writing to plane registers.
449 * Disallowing just CxSR is sufficient.
451 bool intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
455 mutex_lock(&dev_priv
->wm
.wm_mutex
);
456 ret
= _intel_set_memory_cxsr(dev_priv
, enable
);
457 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
458 dev_priv
->wm
.vlv
.cxsr
= enable
;
459 else if (IS_G4X(dev_priv
))
460 dev_priv
->wm
.g4x
.cxsr
= enable
;
461 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
467 * Latency for FIFO fetches is dependent on several factors:
468 * - memory configuration (speed, channels)
470 * - current MCH state
471 * It can be fairly high in some situations, so here we assume a fairly
472 * pessimal value. It's a tradeoff between extra memory fetches (if we
473 * set this value too high, the FIFO will fetch frequently to stay full)
474 * and power consumption (set it too low to save power and we might see
475 * FIFO underruns and display "flicker").
477 * A value of 5us seems to be a good balance; safe for very low end
478 * platforms but not overly aggressive on lower latency configs.
480 static const int pessimal_latency_ns
= 5000;
482 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
483 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
485 static void vlv_get_fifo_size(struct intel_crtc_state
*crtc_state
)
487 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
488 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
489 struct vlv_fifo_state
*fifo_state
= &crtc_state
->wm
.vlv
.fifo_state
;
490 enum pipe pipe
= crtc
->pipe
;
491 int sprite0_start
, sprite1_start
;
494 uint32_t dsparb
, dsparb2
, dsparb3
;
496 dsparb
= I915_READ(DSPARB
);
497 dsparb2
= I915_READ(DSPARB2
);
498 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 0, 0);
499 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 8, 4);
502 dsparb
= I915_READ(DSPARB
);
503 dsparb2
= I915_READ(DSPARB2
);
504 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 16, 8);
505 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 24, 12);
508 dsparb2
= I915_READ(DSPARB2
);
509 dsparb3
= I915_READ(DSPARB3
);
510 sprite0_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 0, 16);
511 sprite1_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 8, 20);
518 fifo_state
->plane
[PLANE_PRIMARY
] = sprite0_start
;
519 fifo_state
->plane
[PLANE_SPRITE0
] = sprite1_start
- sprite0_start
;
520 fifo_state
->plane
[PLANE_SPRITE1
] = 511 - sprite1_start
;
521 fifo_state
->plane
[PLANE_CURSOR
] = 63;
524 static int i9xx_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
526 uint32_t dsparb
= I915_READ(DSPARB
);
529 size
= dsparb
& 0x7f;
531 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
533 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
534 plane
? "B" : "A", size
);
539 static int i830_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
541 uint32_t dsparb
= I915_READ(DSPARB
);
544 size
= dsparb
& 0x1ff;
546 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
547 size
>>= 1; /* Convert to cachelines */
549 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
550 plane
? "B" : "A", size
);
555 static int i845_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
557 uint32_t dsparb
= I915_READ(DSPARB
);
560 size
= dsparb
& 0x7f;
561 size
>>= 2; /* Convert to cachelines */
563 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
570 /* Pineview has different values for various configs */
571 static const struct intel_watermark_params pineview_display_wm
= {
572 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
573 .max_wm
= PINEVIEW_MAX_WM
,
574 .default_wm
= PINEVIEW_DFT_WM
,
575 .guard_size
= PINEVIEW_GUARD_WM
,
576 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
578 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
579 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
580 .max_wm
= PINEVIEW_MAX_WM
,
581 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
582 .guard_size
= PINEVIEW_GUARD_WM
,
583 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
585 static const struct intel_watermark_params pineview_cursor_wm
= {
586 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
587 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
588 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
589 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
590 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
592 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
593 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
594 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
595 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
596 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
597 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
599 static const struct intel_watermark_params i965_cursor_wm_info
= {
600 .fifo_size
= I965_CURSOR_FIFO
,
601 .max_wm
= I965_CURSOR_MAX_WM
,
602 .default_wm
= I965_CURSOR_DFT_WM
,
604 .cacheline_size
= I915_FIFO_LINE_SIZE
,
606 static const struct intel_watermark_params i945_wm_info
= {
607 .fifo_size
= I945_FIFO_SIZE
,
608 .max_wm
= I915_MAX_WM
,
611 .cacheline_size
= I915_FIFO_LINE_SIZE
,
613 static const struct intel_watermark_params i915_wm_info
= {
614 .fifo_size
= I915_FIFO_SIZE
,
615 .max_wm
= I915_MAX_WM
,
618 .cacheline_size
= I915_FIFO_LINE_SIZE
,
620 static const struct intel_watermark_params i830_a_wm_info
= {
621 .fifo_size
= I855GM_FIFO_SIZE
,
622 .max_wm
= I915_MAX_WM
,
625 .cacheline_size
= I830_FIFO_LINE_SIZE
,
627 static const struct intel_watermark_params i830_bc_wm_info
= {
628 .fifo_size
= I855GM_FIFO_SIZE
,
629 .max_wm
= I915_MAX_WM
/2,
632 .cacheline_size
= I830_FIFO_LINE_SIZE
,
634 static const struct intel_watermark_params i845_wm_info
= {
635 .fifo_size
= I830_FIFO_SIZE
,
636 .max_wm
= I915_MAX_WM
,
639 .cacheline_size
= I830_FIFO_LINE_SIZE
,
643 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
644 * @pixel_rate: Pipe pixel rate in kHz
645 * @cpp: Plane bytes per pixel
646 * @latency: Memory wakeup latency in 0.1us units
648 * Compute the watermark using the method 1 or "small buffer"
649 * formula. The caller may additonally add extra cachelines
650 * to account for TLB misses and clock crossings.
652 * This method is concerned with the short term drain rate
653 * of the FIFO, ie. it does not account for blanking periods
654 * which would effectively reduce the average drain rate across
655 * a longer period. The name "small" refers to the fact the
656 * FIFO is relatively small compared to the amount of data
659 * The FIFO level vs. time graph might look something like:
663 * __---__---__ (- plane active, _ blanking)
666 * or perhaps like this:
669 * __----__----__ (- plane active, _ blanking)
673 * The watermark in bytes
675 static unsigned int intel_wm_method1(unsigned int pixel_rate
,
677 unsigned int latency
)
681 ret
= (uint64_t) pixel_rate
* cpp
* latency
;
682 ret
= DIV_ROUND_UP_ULL(ret
, 10000);
688 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
689 * @pixel_rate: Pipe pixel rate in kHz
690 * @htotal: Pipe horizontal total
691 * @width: Plane width in pixels
692 * @cpp: Plane bytes per pixel
693 * @latency: Memory wakeup latency in 0.1us units
695 * Compute the watermark using the method 2 or "large buffer"
696 * formula. The caller may additonally add extra cachelines
697 * to account for TLB misses and clock crossings.
699 * This method is concerned with the long term drain rate
700 * of the FIFO, ie. it does account for blanking periods
701 * which effectively reduce the average drain rate across
702 * a longer period. The name "large" refers to the fact the
703 * FIFO is relatively large compared to the amount of data
706 * The FIFO level vs. time graph might look something like:
711 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
715 * The watermark in bytes
717 static unsigned int intel_wm_method2(unsigned int pixel_rate
,
721 unsigned int latency
)
726 * FIXME remove once all users are computing
727 * watermarks in the correct place.
729 if (WARN_ON_ONCE(htotal
== 0))
732 ret
= (latency
* pixel_rate
) / (htotal
* 10000);
733 ret
= (ret
+ 1) * width
* cpp
;
739 * intel_calculate_wm - calculate watermark level
740 * @pixel_rate: pixel clock
741 * @wm: chip FIFO params
742 * @cpp: bytes per pixel
743 * @latency_ns: memory latency for the platform
745 * Calculate the watermark level (the level at which the display plane will
746 * start fetching from memory again). Each chip has a different display
747 * FIFO size and allocation, so the caller needs to figure that out and pass
748 * in the correct intel_watermark_params structure.
750 * As the pixel clock runs, the FIFO will be drained at a rate that depends
751 * on the pixel size. When it reaches the watermark level, it'll start
752 * fetching FIFO line sized based chunks from memory until the FIFO fills
753 * past the watermark point. If the FIFO drains completely, a FIFO underrun
754 * will occur, and a display engine hang could result.
756 static unsigned int intel_calculate_wm(int pixel_rate
,
757 const struct intel_watermark_params
*wm
,
758 int fifo_size
, int cpp
,
759 unsigned int latency_ns
)
761 int entries
, wm_size
;
764 * Note: we need to make sure we don't overflow for various clock &
766 * clocks go from a few thousand to several hundred thousand.
767 * latency is usually a few thousand
769 entries
= intel_wm_method1(pixel_rate
, cpp
,
771 entries
= DIV_ROUND_UP(entries
, wm
->cacheline_size
) +
773 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries
);
775 wm_size
= fifo_size
- entries
;
776 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size
);
778 /* Don't promote wm_size to unsigned... */
779 if (wm_size
> wm
->max_wm
)
780 wm_size
= wm
->max_wm
;
782 wm_size
= wm
->default_wm
;
785 * Bspec seems to indicate that the value shouldn't be lower than
786 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
787 * Lets go for 8 which is the burst size since certain platforms
788 * already use a hardcoded 8 (which is what the spec says should be
797 static bool is_disabling(int old
, int new, int threshold
)
799 return old
>= threshold
&& new < threshold
;
802 static bool is_enabling(int old
, int new, int threshold
)
804 return old
< threshold
&& new >= threshold
;
807 static int intel_wm_num_levels(struct drm_i915_private
*dev_priv
)
809 return dev_priv
->wm
.max_level
+ 1;
812 static bool intel_wm_plane_visible(const struct intel_crtc_state
*crtc_state
,
813 const struct intel_plane_state
*plane_state
)
815 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
817 /* FIXME check the 'enable' instead */
818 if (!crtc_state
->base
.active
)
822 * Treat cursor with fb as always visible since cursor updates
823 * can happen faster than the vrefresh rate, and the current
824 * watermark code doesn't handle that correctly. Cursor updates
825 * which set/clear the fb or change the cursor size are going
826 * to get throttled by intel_legacy_cursor_update() to work
827 * around this problem with the watermark code.
829 if (plane
->id
== PLANE_CURSOR
)
830 return plane_state
->base
.fb
!= NULL
;
832 return plane_state
->base
.visible
;
835 static struct intel_crtc
*single_enabled_crtc(struct drm_i915_private
*dev_priv
)
837 struct intel_crtc
*crtc
, *enabled
= NULL
;
839 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
840 if (intel_crtc_active(crtc
)) {
850 static void pineview_update_wm(struct intel_crtc
*unused_crtc
)
852 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
853 struct intel_crtc
*crtc
;
854 const struct cxsr_latency
*latency
;
858 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv
),
863 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
864 intel_set_memory_cxsr(dev_priv
, false);
868 crtc
= single_enabled_crtc(dev_priv
);
870 const struct drm_display_mode
*adjusted_mode
=
871 &crtc
->config
->base
.adjusted_mode
;
872 const struct drm_framebuffer
*fb
=
873 crtc
->base
.primary
->state
->fb
;
874 int cpp
= fb
->format
->cpp
[0];
875 int clock
= adjusted_mode
->crtc_clock
;
878 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
879 pineview_display_wm
.fifo_size
,
880 cpp
, latency
->display_sr
);
881 reg
= I915_READ(DSPFW1
);
882 reg
&= ~DSPFW_SR_MASK
;
883 reg
|= FW_WM(wm
, SR
);
884 I915_WRITE(DSPFW1
, reg
);
885 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
888 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
889 pineview_display_wm
.fifo_size
,
890 4, latency
->cursor_sr
);
891 reg
= I915_READ(DSPFW3
);
892 reg
&= ~DSPFW_CURSOR_SR_MASK
;
893 reg
|= FW_WM(wm
, CURSOR_SR
);
894 I915_WRITE(DSPFW3
, reg
);
896 /* Display HPLL off SR */
897 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
898 pineview_display_hplloff_wm
.fifo_size
,
899 cpp
, latency
->display_hpll_disable
);
900 reg
= I915_READ(DSPFW3
);
901 reg
&= ~DSPFW_HPLL_SR_MASK
;
902 reg
|= FW_WM(wm
, HPLL_SR
);
903 I915_WRITE(DSPFW3
, reg
);
905 /* cursor HPLL off SR */
906 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
907 pineview_display_hplloff_wm
.fifo_size
,
908 4, latency
->cursor_hpll_disable
);
909 reg
= I915_READ(DSPFW3
);
910 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
911 reg
|= FW_WM(wm
, HPLL_CURSOR
);
912 I915_WRITE(DSPFW3
, reg
);
913 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
915 intel_set_memory_cxsr(dev_priv
, true);
917 intel_set_memory_cxsr(dev_priv
, false);
922 * Documentation says:
923 * "If the line size is small, the TLB fetches can get in the way of the
924 * data fetches, causing some lag in the pixel data return which is not
925 * accounted for in the above formulas. The following adjustment only
926 * needs to be applied if eight whole lines fit in the buffer at once.
927 * The WM is adjusted upwards by the difference between the FIFO size
928 * and the size of 8 whole lines. This adjustment is always performed
929 * in the actual pixel depth regardless of whether FBC is enabled or not."
931 static int g4x_tlb_miss_wa(int fifo_size
, int width
, int cpp
)
933 int tlb_miss
= fifo_size
* 64 - width
* cpp
* 8;
935 return max(0, tlb_miss
);
938 static void g4x_write_wm_values(struct drm_i915_private
*dev_priv
,
939 const struct g4x_wm_values
*wm
)
943 for_each_pipe(dev_priv
, pipe
)
944 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv
, pipe
), wm
);
947 FW_WM(wm
->sr
.plane
, SR
) |
948 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
], CURSORB
) |
949 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
], PLANEB
) |
950 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
], PLANEA
));
952 (wm
->fbc_en
? DSPFW_FBC_SR_EN
: 0) |
953 FW_WM(wm
->sr
.fbc
, FBC_SR
) |
954 FW_WM(wm
->hpll
.fbc
, FBC_HPLL_SR
) |
955 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEB
) |
956 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
], CURSORA
) |
957 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
], SPRITEA
));
959 (wm
->hpll_en
? DSPFW_HPLL_SR_EN
: 0) |
960 FW_WM(wm
->sr
.cursor
, CURSOR_SR
) |
961 FW_WM(wm
->hpll
.cursor
, HPLL_CURSOR
) |
962 FW_WM(wm
->hpll
.plane
, HPLL_SR
));
964 POSTING_READ(DSPFW1
);
967 #define FW_WM_VLV(value, plane) \
968 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
970 static void vlv_write_wm_values(struct drm_i915_private
*dev_priv
,
971 const struct vlv_wm_values
*wm
)
975 for_each_pipe(dev_priv
, pipe
) {
976 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv
, pipe
), wm
);
978 I915_WRITE(VLV_DDL(pipe
),
979 (wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] << DDL_CURSOR_SHIFT
) |
980 (wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] << DDL_SPRITE_SHIFT(1)) |
981 (wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] << DDL_SPRITE_SHIFT(0)) |
982 (wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] << DDL_PLANE_SHIFT
));
986 * Zero the (unused) WM1 watermarks, and also clear all the
987 * high order bits so that there are no out of bounds values
988 * present in the registers during the reprogramming.
990 I915_WRITE(DSPHOWM
, 0);
991 I915_WRITE(DSPHOWM1
, 0);
992 I915_WRITE(DSPFW4
, 0);
993 I915_WRITE(DSPFW5
, 0);
994 I915_WRITE(DSPFW6
, 0);
997 FW_WM(wm
->sr
.plane
, SR
) |
998 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
], CURSORB
) |
999 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
], PLANEB
) |
1000 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
], PLANEA
));
1002 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
], SPRITEB
) |
1003 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
], CURSORA
) |
1004 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
], SPRITEA
));
1006 FW_WM(wm
->sr
.cursor
, CURSOR_SR
));
1008 if (IS_CHERRYVIEW(dev_priv
)) {
1009 I915_WRITE(DSPFW7_CHV
,
1010 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
], SPRITED
) |
1011 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEC
));
1012 I915_WRITE(DSPFW8_CHV
,
1013 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
], SPRITEF
) |
1014 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
], SPRITEE
));
1015 I915_WRITE(DSPFW9_CHV
,
1016 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
], PLANEC
) |
1017 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_CURSOR
], CURSORC
));
1019 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
1020 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] >> 8, SPRITEF_HI
) |
1021 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] >> 8, SPRITEE_HI
) |
1022 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] >> 8, PLANEC_HI
) |
1023 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] >> 8, SPRITED_HI
) |
1024 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] >> 8, SPRITEC_HI
) |
1025 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] >> 8, PLANEB_HI
) |
1026 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] >> 8, SPRITEB_HI
) |
1027 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] >> 8, SPRITEA_HI
) |
1028 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] >> 8, PLANEA_HI
));
1031 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
], SPRITED
) |
1032 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEC
));
1034 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
1035 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] >> 8, SPRITED_HI
) |
1036 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] >> 8, SPRITEC_HI
) |
1037 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] >> 8, PLANEB_HI
) |
1038 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] >> 8, SPRITEB_HI
) |
1039 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] >> 8, SPRITEA_HI
) |
1040 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] >> 8, PLANEA_HI
));
1043 POSTING_READ(DSPFW1
);
1048 static void g4x_setup_wm_latency(struct drm_i915_private
*dev_priv
)
1050 /* all latencies in usec */
1051 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_NORMAL
] = 5;
1052 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_SR
] = 12;
1053 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_HPLL
] = 35;
1055 dev_priv
->wm
.max_level
= G4X_WM_LEVEL_HPLL
;
1058 static int g4x_plane_fifo_size(enum plane_id plane_id
, int level
)
1061 * DSPCNTR[13] supposedly controls whether the
1062 * primary plane can use the FIFO space otherwise
1063 * reserved for the sprite plane. It's not 100% clear
1064 * what the actual FIFO size is, but it looks like we
1065 * can happily set both primary and sprite watermarks
1066 * up to 127 cachelines. So that would seem to mean
1067 * that either DSPCNTR[13] doesn't do anything, or that
1068 * the total FIFO is >= 256 cachelines in size. Either
1069 * way, we don't seem to have to worry about this
1070 * repartitioning as the maximum watermark value the
1071 * register can hold for each plane is lower than the
1072 * minimum FIFO size.
1078 return level
== G4X_WM_LEVEL_NORMAL
? 127 : 511;
1080 return level
== G4X_WM_LEVEL_NORMAL
? 127 : 0;
1082 MISSING_CASE(plane_id
);
1087 static int g4x_fbc_fifo_size(int level
)
1090 case G4X_WM_LEVEL_SR
:
1092 case G4X_WM_LEVEL_HPLL
:
1095 MISSING_CASE(level
);
1100 static uint16_t g4x_compute_wm(const struct intel_crtc_state
*crtc_state
,
1101 const struct intel_plane_state
*plane_state
,
1104 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
1105 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
1106 const struct drm_display_mode
*adjusted_mode
=
1107 &crtc_state
->base
.adjusted_mode
;
1108 int clock
, htotal
, cpp
, width
, wm
;
1109 int latency
= dev_priv
->wm
.pri_latency
[level
] * 10;
1114 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
1118 * Not 100% sure which way ELK should go here as the
1119 * spec only says CL/CTG should assume 32bpp and BW
1120 * doesn't need to. But as these things followed the
1121 * mobile vs. desktop lines on gen3 as well, let's
1122 * assume ELK doesn't need this.
1124 * The spec also fails to list such a restriction for
1125 * the HPLL watermark, which seems a little strange.
1126 * Let's use 32bpp for the HPLL watermark as well.
1128 if (IS_GM45(dev_priv
) && plane
->id
== PLANE_PRIMARY
&&
1129 level
!= G4X_WM_LEVEL_NORMAL
)
1132 cpp
= plane_state
->base
.fb
->format
->cpp
[0];
1134 clock
= adjusted_mode
->crtc_clock
;
1135 htotal
= adjusted_mode
->crtc_htotal
;
1137 if (plane
->id
== PLANE_CURSOR
)
1138 width
= plane_state
->base
.crtc_w
;
1140 width
= drm_rect_width(&plane_state
->base
.dst
);
1142 if (plane
->id
== PLANE_CURSOR
) {
1143 wm
= intel_wm_method2(clock
, htotal
, width
, cpp
, latency
);
1144 } else if (plane
->id
== PLANE_PRIMARY
&&
1145 level
== G4X_WM_LEVEL_NORMAL
) {
1146 wm
= intel_wm_method1(clock
, cpp
, latency
);
1150 small
= intel_wm_method1(clock
, cpp
, latency
);
1151 large
= intel_wm_method2(clock
, htotal
, width
, cpp
, latency
);
1153 wm
= min(small
, large
);
1156 wm
+= g4x_tlb_miss_wa(g4x_plane_fifo_size(plane
->id
, level
),
1159 wm
= DIV_ROUND_UP(wm
, 64) + 2;
1161 return min_t(int, wm
, USHRT_MAX
);
1164 static bool g4x_raw_plane_wm_set(struct intel_crtc_state
*crtc_state
,
1165 int level
, enum plane_id plane_id
, u16 value
)
1167 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1170 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1171 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1173 dirty
|= raw
->plane
[plane_id
] != value
;
1174 raw
->plane
[plane_id
] = value
;
1180 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state
*crtc_state
,
1181 int level
, u16 value
)
1183 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1186 /* NORMAL level doesn't have an FBC watermark */
1187 level
= max(level
, G4X_WM_LEVEL_SR
);
1189 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1190 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1192 dirty
|= raw
->fbc
!= value
;
1199 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state
*cstate
,
1200 const struct intel_plane_state
*pstate
,
1203 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state
*crtc_state
,
1204 const struct intel_plane_state
*plane_state
)
1206 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
1207 int num_levels
= intel_wm_num_levels(to_i915(plane
->base
.dev
));
1208 enum plane_id plane_id
= plane
->id
;
1212 if (!intel_wm_plane_visible(crtc_state
, plane_state
)) {
1213 dirty
|= g4x_raw_plane_wm_set(crtc_state
, 0, plane_id
, 0);
1214 if (plane_id
== PLANE_PRIMARY
)
1215 dirty
|= g4x_raw_fbc_wm_set(crtc_state
, 0, 0);
1219 for (level
= 0; level
< num_levels
; level
++) {
1220 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1223 wm
= g4x_compute_wm(crtc_state
, plane_state
, level
);
1224 max_wm
= g4x_plane_fifo_size(plane_id
, level
);
1229 dirty
|= raw
->plane
[plane_id
] != wm
;
1230 raw
->plane
[plane_id
] = wm
;
1232 if (plane_id
!= PLANE_PRIMARY
||
1233 level
== G4X_WM_LEVEL_NORMAL
)
1236 wm
= ilk_compute_fbc_wm(crtc_state
, plane_state
,
1237 raw
->plane
[plane_id
]);
1238 max_wm
= g4x_fbc_fifo_size(level
);
1241 * FBC wm is not mandatory as we
1242 * can always just disable its use.
1247 dirty
|= raw
->fbc
!= wm
;
1251 /* mark watermarks as invalid */
1252 dirty
|= g4x_raw_plane_wm_set(crtc_state
, level
, plane_id
, USHRT_MAX
);
1254 if (plane_id
== PLANE_PRIMARY
)
1255 dirty
|= g4x_raw_fbc_wm_set(crtc_state
, level
, USHRT_MAX
);
1259 DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1261 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_NORMAL
].plane
[plane_id
],
1262 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_SR
].plane
[plane_id
],
1263 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_HPLL
].plane
[plane_id
]);
1265 if (plane_id
== PLANE_PRIMARY
)
1266 DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
1267 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_SR
].fbc
,
1268 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_HPLL
].fbc
);
1274 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1275 enum plane_id plane_id
, int level
)
1277 const struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1279 return raw
->plane
[plane_id
] <= g4x_plane_fifo_size(plane_id
, level
);
1282 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1285 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1287 if (level
> dev_priv
->wm
.max_level
)
1290 return g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_PRIMARY
, level
) &&
1291 g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE0
, level
) &&
1292 g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_CURSOR
, level
);
1295 /* mark all levels starting from 'level' as invalid */
1296 static void g4x_invalidate_wms(struct intel_crtc
*crtc
,
1297 struct g4x_wm_state
*wm_state
, int level
)
1299 if (level
<= G4X_WM_LEVEL_NORMAL
) {
1300 enum plane_id plane_id
;
1302 for_each_plane_id_on_crtc(crtc
, plane_id
)
1303 wm_state
->wm
.plane
[plane_id
] = USHRT_MAX
;
1306 if (level
<= G4X_WM_LEVEL_SR
) {
1307 wm_state
->cxsr
= false;
1308 wm_state
->sr
.cursor
= USHRT_MAX
;
1309 wm_state
->sr
.plane
= USHRT_MAX
;
1310 wm_state
->sr
.fbc
= USHRT_MAX
;
1313 if (level
<= G4X_WM_LEVEL_HPLL
) {
1314 wm_state
->hpll_en
= false;
1315 wm_state
->hpll
.cursor
= USHRT_MAX
;
1316 wm_state
->hpll
.plane
= USHRT_MAX
;
1317 wm_state
->hpll
.fbc
= USHRT_MAX
;
1321 static int g4x_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
1323 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1324 struct intel_atomic_state
*state
=
1325 to_intel_atomic_state(crtc_state
->base
.state
);
1326 struct g4x_wm_state
*wm_state
= &crtc_state
->wm
.g4x
.optimal
;
1327 int num_active_planes
= hweight32(crtc_state
->active_planes
&
1328 ~BIT(PLANE_CURSOR
));
1329 const struct g4x_pipe_wm
*raw
;
1330 const struct intel_plane_state
*old_plane_state
;
1331 const struct intel_plane_state
*new_plane_state
;
1332 struct intel_plane
*plane
;
1333 enum plane_id plane_id
;
1335 unsigned int dirty
= 0;
1337 for_each_oldnew_intel_plane_in_state(state
, plane
,
1339 new_plane_state
, i
) {
1340 if (new_plane_state
->base
.crtc
!= &crtc
->base
&&
1341 old_plane_state
->base
.crtc
!= &crtc
->base
)
1344 if (g4x_raw_plane_wm_compute(crtc_state
, new_plane_state
))
1345 dirty
|= BIT(plane
->id
);
1351 level
= G4X_WM_LEVEL_NORMAL
;
1352 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1355 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1356 for_each_plane_id_on_crtc(crtc
, plane_id
)
1357 wm_state
->wm
.plane
[plane_id
] = raw
->plane
[plane_id
];
1359 level
= G4X_WM_LEVEL_SR
;
1361 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1364 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1365 wm_state
->sr
.plane
= raw
->plane
[PLANE_PRIMARY
];
1366 wm_state
->sr
.cursor
= raw
->plane
[PLANE_CURSOR
];
1367 wm_state
->sr
.fbc
= raw
->fbc
;
1369 wm_state
->cxsr
= num_active_planes
== BIT(PLANE_PRIMARY
);
1371 level
= G4X_WM_LEVEL_HPLL
;
1373 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1376 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1377 wm_state
->hpll
.plane
= raw
->plane
[PLANE_PRIMARY
];
1378 wm_state
->hpll
.cursor
= raw
->plane
[PLANE_CURSOR
];
1379 wm_state
->hpll
.fbc
= raw
->fbc
;
1381 wm_state
->hpll_en
= wm_state
->cxsr
;
1386 if (level
== G4X_WM_LEVEL_NORMAL
)
1389 /* invalidate the higher levels */
1390 g4x_invalidate_wms(crtc
, wm_state
, level
);
1393 * Determine if the FBC watermark(s) can be used. IF
1394 * this isn't the case we prefer to disable the FBC
1395 ( watermark(s) rather than disable the SR/HPLL
1396 * level(s) entirely.
1398 wm_state
->fbc_en
= level
> G4X_WM_LEVEL_NORMAL
;
1400 if (level
>= G4X_WM_LEVEL_SR
&&
1401 wm_state
->sr
.fbc
> g4x_fbc_fifo_size(G4X_WM_LEVEL_SR
))
1402 wm_state
->fbc_en
= false;
1403 else if (level
>= G4X_WM_LEVEL_HPLL
&&
1404 wm_state
->hpll
.fbc
> g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL
))
1405 wm_state
->fbc_en
= false;
1410 static int g4x_compute_intermediate_wm(struct drm_device
*dev
,
1411 struct intel_crtc
*crtc
,
1412 struct intel_crtc_state
*crtc_state
)
1414 struct g4x_wm_state
*intermediate
= &crtc_state
->wm
.g4x
.intermediate
;
1415 const struct g4x_wm_state
*optimal
= &crtc_state
->wm
.g4x
.optimal
;
1416 const struct g4x_wm_state
*active
= &crtc
->wm
.active
.g4x
;
1417 enum plane_id plane_id
;
1419 intermediate
->cxsr
= optimal
->cxsr
&& active
->cxsr
&&
1420 !crtc_state
->disable_cxsr
;
1421 intermediate
->hpll_en
= optimal
->hpll_en
&& active
->hpll_en
&&
1422 !crtc_state
->disable_cxsr
;
1423 intermediate
->fbc_en
= optimal
->fbc_en
&& active
->fbc_en
;
1425 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1426 intermediate
->wm
.plane
[plane_id
] =
1427 max(optimal
->wm
.plane
[plane_id
],
1428 active
->wm
.plane
[plane_id
]);
1430 WARN_ON(intermediate
->wm
.plane
[plane_id
] >
1431 g4x_plane_fifo_size(plane_id
, G4X_WM_LEVEL_NORMAL
));
1434 intermediate
->sr
.plane
= max(optimal
->sr
.plane
,
1436 intermediate
->sr
.cursor
= max(optimal
->sr
.cursor
,
1438 intermediate
->sr
.fbc
= max(optimal
->sr
.fbc
,
1441 intermediate
->hpll
.plane
= max(optimal
->hpll
.plane
,
1442 active
->hpll
.plane
);
1443 intermediate
->hpll
.cursor
= max(optimal
->hpll
.cursor
,
1444 active
->hpll
.cursor
);
1445 intermediate
->hpll
.fbc
= max(optimal
->hpll
.fbc
,
1448 WARN_ON((intermediate
->sr
.plane
>
1449 g4x_plane_fifo_size(PLANE_PRIMARY
, G4X_WM_LEVEL_SR
) ||
1450 intermediate
->sr
.cursor
>
1451 g4x_plane_fifo_size(PLANE_CURSOR
, G4X_WM_LEVEL_SR
)) &&
1452 intermediate
->cxsr
);
1453 WARN_ON((intermediate
->sr
.plane
>
1454 g4x_plane_fifo_size(PLANE_PRIMARY
, G4X_WM_LEVEL_HPLL
) ||
1455 intermediate
->sr
.cursor
>
1456 g4x_plane_fifo_size(PLANE_CURSOR
, G4X_WM_LEVEL_HPLL
)) &&
1457 intermediate
->hpll_en
);
1459 WARN_ON(intermediate
->sr
.fbc
> g4x_fbc_fifo_size(1) &&
1460 intermediate
->fbc_en
&& intermediate
->cxsr
);
1461 WARN_ON(intermediate
->hpll
.fbc
> g4x_fbc_fifo_size(2) &&
1462 intermediate
->fbc_en
&& intermediate
->hpll_en
);
1465 * If our intermediate WM are identical to the final WM, then we can
1466 * omit the post-vblank programming; only update if it's different.
1468 if (memcmp(intermediate
, optimal
, sizeof(*intermediate
)) != 0)
1469 crtc_state
->wm
.need_postvbl_update
= true;
1474 static void g4x_merge_wm(struct drm_i915_private
*dev_priv
,
1475 struct g4x_wm_values
*wm
)
1477 struct intel_crtc
*crtc
;
1478 int num_active_crtcs
= 0;
1484 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1485 const struct g4x_wm_state
*wm_state
= &crtc
->wm
.active
.g4x
;
1490 if (!wm_state
->cxsr
)
1492 if (!wm_state
->hpll_en
)
1493 wm
->hpll_en
= false;
1494 if (!wm_state
->fbc_en
)
1500 if (num_active_crtcs
!= 1) {
1502 wm
->hpll_en
= false;
1506 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1507 const struct g4x_wm_state
*wm_state
= &crtc
->wm
.active
.g4x
;
1508 enum pipe pipe
= crtc
->pipe
;
1510 wm
->pipe
[pipe
] = wm_state
->wm
;
1511 if (crtc
->active
&& wm
->cxsr
)
1512 wm
->sr
= wm_state
->sr
;
1513 if (crtc
->active
&& wm
->hpll_en
)
1514 wm
->hpll
= wm_state
->hpll
;
1518 static void g4x_program_watermarks(struct drm_i915_private
*dev_priv
)
1520 struct g4x_wm_values
*old_wm
= &dev_priv
->wm
.g4x
;
1521 struct g4x_wm_values new_wm
= {};
1523 g4x_merge_wm(dev_priv
, &new_wm
);
1525 if (memcmp(old_wm
, &new_wm
, sizeof(new_wm
)) == 0)
1528 if (is_disabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
1529 _intel_set_memory_cxsr(dev_priv
, false);
1531 g4x_write_wm_values(dev_priv
, &new_wm
);
1533 if (is_enabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
1534 _intel_set_memory_cxsr(dev_priv
, true);
1539 static void g4x_initial_watermarks(struct intel_atomic_state
*state
,
1540 struct intel_crtc_state
*crtc_state
)
1542 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1543 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1545 mutex_lock(&dev_priv
->wm
.wm_mutex
);
1546 crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.intermediate
;
1547 g4x_program_watermarks(dev_priv
);
1548 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
1551 static void g4x_optimize_watermarks(struct intel_atomic_state
*state
,
1552 struct intel_crtc_state
*crtc_state
)
1554 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1555 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1557 if (!crtc_state
->wm
.need_postvbl_update
)
1560 mutex_lock(&dev_priv
->wm
.wm_mutex
);
1561 intel_crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.optimal
;
1562 g4x_program_watermarks(dev_priv
);
1563 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
1566 /* latency must be in 0.1us units. */
1567 static unsigned int vlv_wm_method2(unsigned int pixel_rate
,
1568 unsigned int htotal
,
1571 unsigned int latency
)
1575 ret
= intel_wm_method2(pixel_rate
, htotal
,
1576 width
, cpp
, latency
);
1577 ret
= DIV_ROUND_UP(ret
, 64);
1582 static void vlv_setup_wm_latency(struct drm_i915_private
*dev_priv
)
1584 /* all latencies in usec */
1585 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM2
] = 3;
1587 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM2
;
1589 if (IS_CHERRYVIEW(dev_priv
)) {
1590 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM5
] = 12;
1591 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_DDR_DVFS
] = 33;
1593 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_DDR_DVFS
;
1597 static uint16_t vlv_compute_wm_level(const struct intel_crtc_state
*crtc_state
,
1598 const struct intel_plane_state
*plane_state
,
1601 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
1602 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
1603 const struct drm_display_mode
*adjusted_mode
=
1604 &crtc_state
->base
.adjusted_mode
;
1605 int clock
, htotal
, cpp
, width
, wm
;
1607 if (dev_priv
->wm
.pri_latency
[level
] == 0)
1610 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
1613 cpp
= plane_state
->base
.fb
->format
->cpp
[0];
1614 clock
= adjusted_mode
->crtc_clock
;
1615 htotal
= adjusted_mode
->crtc_htotal
;
1616 width
= crtc_state
->pipe_src_w
;
1618 if (plane
->id
== PLANE_CURSOR
) {
1620 * FIXME the formula gives values that are
1621 * too big for the cursor FIFO, and hence we
1622 * would never be able to use cursors. For
1623 * now just hardcode the watermark.
1627 wm
= vlv_wm_method2(clock
, htotal
, width
, cpp
,
1628 dev_priv
->wm
.pri_latency
[level
] * 10);
1631 return min_t(int, wm
, USHRT_MAX
);
1634 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes
)
1636 return (active_planes
& (BIT(PLANE_SPRITE0
) |
1637 BIT(PLANE_SPRITE1
))) == BIT(PLANE_SPRITE1
);
1640 static int vlv_compute_fifo(struct intel_crtc_state
*crtc_state
)
1642 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1643 const struct g4x_pipe_wm
*raw
=
1644 &crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM2
];
1645 struct vlv_fifo_state
*fifo_state
= &crtc_state
->wm
.vlv
.fifo_state
;
1646 unsigned int active_planes
= crtc_state
->active_planes
& ~BIT(PLANE_CURSOR
);
1647 int num_active_planes
= hweight32(active_planes
);
1648 const int fifo_size
= 511;
1649 int fifo_extra
, fifo_left
= fifo_size
;
1650 int sprite0_fifo_extra
= 0;
1651 unsigned int total_rate
;
1652 enum plane_id plane_id
;
1655 * When enabling sprite0 after sprite1 has already been enabled
1656 * we tend to get an underrun unless sprite0 already has some
1657 * FIFO space allcoated. Hence we always allocate at least one
1658 * cacheline for sprite0 whenever sprite1 is enabled.
1660 * All other plane enable sequences appear immune to this problem.
1662 if (vlv_need_sprite0_fifo_workaround(active_planes
))
1663 sprite0_fifo_extra
= 1;
1665 total_rate
= raw
->plane
[PLANE_PRIMARY
] +
1666 raw
->plane
[PLANE_SPRITE0
] +
1667 raw
->plane
[PLANE_SPRITE1
] +
1670 if (total_rate
> fifo_size
)
1673 if (total_rate
== 0)
1676 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1679 if ((active_planes
& BIT(plane_id
)) == 0) {
1680 fifo_state
->plane
[plane_id
] = 0;
1684 rate
= raw
->plane
[plane_id
];
1685 fifo_state
->plane
[plane_id
] = fifo_size
* rate
/ total_rate
;
1686 fifo_left
-= fifo_state
->plane
[plane_id
];
1689 fifo_state
->plane
[PLANE_SPRITE0
] += sprite0_fifo_extra
;
1690 fifo_left
-= sprite0_fifo_extra
;
1692 fifo_state
->plane
[PLANE_CURSOR
] = 63;
1694 fifo_extra
= DIV_ROUND_UP(fifo_left
, num_active_planes
?: 1);
1696 /* spread the remainder evenly */
1697 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1703 if ((active_planes
& BIT(plane_id
)) == 0)
1706 plane_extra
= min(fifo_extra
, fifo_left
);
1707 fifo_state
->plane
[plane_id
] += plane_extra
;
1708 fifo_left
-= plane_extra
;
1711 WARN_ON(active_planes
!= 0 && fifo_left
!= 0);
1713 /* give it all to the first plane if none are active */
1714 if (active_planes
== 0) {
1715 WARN_ON(fifo_left
!= fifo_size
);
1716 fifo_state
->plane
[PLANE_PRIMARY
] = fifo_left
;
1722 /* mark all levels starting from 'level' as invalid */
1723 static void vlv_invalidate_wms(struct intel_crtc
*crtc
,
1724 struct vlv_wm_state
*wm_state
, int level
)
1726 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1728 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1729 enum plane_id plane_id
;
1731 for_each_plane_id_on_crtc(crtc
, plane_id
)
1732 wm_state
->wm
[level
].plane
[plane_id
] = USHRT_MAX
;
1734 wm_state
->sr
[level
].cursor
= USHRT_MAX
;
1735 wm_state
->sr
[level
].plane
= USHRT_MAX
;
1739 static u16
vlv_invert_wm_value(u16 wm
, u16 fifo_size
)
1744 return fifo_size
- wm
;
1748 * Starting from 'level' set all higher
1749 * levels to 'value' in the "raw" watermarks.
1751 static bool vlv_raw_plane_wm_set(struct intel_crtc_state
*crtc_state
,
1752 int level
, enum plane_id plane_id
, u16 value
)
1754 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
1755 int num_levels
= intel_wm_num_levels(dev_priv
);
1758 for (; level
< num_levels
; level
++) {
1759 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1761 dirty
|= raw
->plane
[plane_id
] != value
;
1762 raw
->plane
[plane_id
] = value
;
1768 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state
*crtc_state
,
1769 const struct intel_plane_state
*plane_state
)
1771 struct intel_plane
*plane
= to_intel_plane(plane_state
->base
.plane
);
1772 enum plane_id plane_id
= plane
->id
;
1773 int num_levels
= intel_wm_num_levels(to_i915(plane
->base
.dev
));
1777 if (!intel_wm_plane_visible(crtc_state
, plane_state
)) {
1778 dirty
|= vlv_raw_plane_wm_set(crtc_state
, 0, plane_id
, 0);
1782 for (level
= 0; level
< num_levels
; level
++) {
1783 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1784 int wm
= vlv_compute_wm_level(crtc_state
, plane_state
, level
);
1785 int max_wm
= plane_id
== PLANE_CURSOR
? 63 : 511;
1790 dirty
|= raw
->plane
[plane_id
] != wm
;
1791 raw
->plane
[plane_id
] = wm
;
1794 /* mark all higher levels as invalid */
1795 dirty
|= vlv_raw_plane_wm_set(crtc_state
, level
, plane_id
, USHRT_MAX
);
1799 DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1801 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM2
].plane
[plane_id
],
1802 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM5
].plane
[plane_id
],
1803 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_DDR_DVFS
].plane
[plane_id
]);
1808 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1809 enum plane_id plane_id
, int level
)
1811 const struct g4x_pipe_wm
*raw
=
1812 &crtc_state
->wm
.vlv
.raw
[level
];
1813 const struct vlv_fifo_state
*fifo_state
=
1814 &crtc_state
->wm
.vlv
.fifo_state
;
1816 return raw
->plane
[plane_id
] <= fifo_state
->plane
[plane_id
];
1819 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state
*crtc_state
, int level
)
1821 return vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_PRIMARY
, level
) &&
1822 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE0
, level
) &&
1823 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE1
, level
) &&
1824 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_CURSOR
, level
);
1827 static int vlv_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
1829 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1830 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1831 struct intel_atomic_state
*state
=
1832 to_intel_atomic_state(crtc_state
->base
.state
);
1833 struct vlv_wm_state
*wm_state
= &crtc_state
->wm
.vlv
.optimal
;
1834 const struct vlv_fifo_state
*fifo_state
=
1835 &crtc_state
->wm
.vlv
.fifo_state
;
1836 int num_active_planes
= hweight32(crtc_state
->active_planes
&
1837 ~BIT(PLANE_CURSOR
));
1838 bool needs_modeset
= drm_atomic_crtc_needs_modeset(&crtc_state
->base
);
1839 const struct intel_plane_state
*old_plane_state
;
1840 const struct intel_plane_state
*new_plane_state
;
1841 struct intel_plane
*plane
;
1842 enum plane_id plane_id
;
1844 unsigned int dirty
= 0;
1846 for_each_oldnew_intel_plane_in_state(state
, plane
,
1848 new_plane_state
, i
) {
1849 if (new_plane_state
->base
.crtc
!= &crtc
->base
&&
1850 old_plane_state
->base
.crtc
!= &crtc
->base
)
1853 if (vlv_raw_plane_wm_compute(crtc_state
, new_plane_state
))
1854 dirty
|= BIT(plane
->id
);
1858 * DSPARB registers may have been reset due to the
1859 * power well being turned off. Make sure we restore
1860 * them to a consistent state even if no primary/sprite
1861 * planes are initially active.
1864 crtc_state
->fifo_changed
= true;
1869 /* cursor changes don't warrant a FIFO recompute */
1870 if (dirty
& ~BIT(PLANE_CURSOR
)) {
1871 const struct intel_crtc_state
*old_crtc_state
=
1872 intel_atomic_get_old_crtc_state(state
, crtc
);
1873 const struct vlv_fifo_state
*old_fifo_state
=
1874 &old_crtc_state
->wm
.vlv
.fifo_state
;
1876 ret
= vlv_compute_fifo(crtc_state
);
1880 if (needs_modeset
||
1881 memcmp(old_fifo_state
, fifo_state
,
1882 sizeof(*fifo_state
)) != 0)
1883 crtc_state
->fifo_changed
= true;
1886 /* initially allow all levels */
1887 wm_state
->num_levels
= intel_wm_num_levels(dev_priv
);
1889 * Note that enabling cxsr with no primary/sprite planes
1890 * enabled can wedge the pipe. Hence we only allow cxsr
1891 * with exactly one enabled primary/sprite plane.
1893 wm_state
->cxsr
= crtc
->pipe
!= PIPE_C
&& num_active_planes
== 1;
1895 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1896 const struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1897 const int sr_fifo_size
= INTEL_INFO(dev_priv
)->num_pipes
* 512 - 1;
1899 if (!vlv_raw_crtc_wm_is_valid(crtc_state
, level
))
1902 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1903 wm_state
->wm
[level
].plane
[plane_id
] =
1904 vlv_invert_wm_value(raw
->plane
[plane_id
],
1905 fifo_state
->plane
[plane_id
]);
1908 wm_state
->sr
[level
].plane
=
1909 vlv_invert_wm_value(max3(raw
->plane
[PLANE_PRIMARY
],
1910 raw
->plane
[PLANE_SPRITE0
],
1911 raw
->plane
[PLANE_SPRITE1
]),
1914 wm_state
->sr
[level
].cursor
=
1915 vlv_invert_wm_value(raw
->plane
[PLANE_CURSOR
],
1922 /* limit to only levels we can actually handle */
1923 wm_state
->num_levels
= level
;
1925 /* invalidate the higher levels */
1926 vlv_invalidate_wms(crtc
, wm_state
, level
);
1931 #define VLV_FIFO(plane, value) \
1932 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1934 static void vlv_atomic_update_fifo(struct intel_atomic_state
*state
,
1935 struct intel_crtc_state
*crtc_state
)
1937 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
1938 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1939 const struct vlv_fifo_state
*fifo_state
=
1940 &crtc_state
->wm
.vlv
.fifo_state
;
1941 int sprite0_start
, sprite1_start
, fifo_size
;
1943 if (!crtc_state
->fifo_changed
)
1946 sprite0_start
= fifo_state
->plane
[PLANE_PRIMARY
];
1947 sprite1_start
= fifo_state
->plane
[PLANE_SPRITE0
] + sprite0_start
;
1948 fifo_size
= fifo_state
->plane
[PLANE_SPRITE1
] + sprite1_start
;
1950 WARN_ON(fifo_state
->plane
[PLANE_CURSOR
] != 63);
1951 WARN_ON(fifo_size
!= 511);
1953 trace_vlv_fifo_size(crtc
, sprite0_start
, sprite1_start
, fifo_size
);
1956 * uncore.lock serves a double purpose here. It allows us to
1957 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1958 * it protects the DSPARB registers from getting clobbered by
1959 * parallel updates from multiple pipes.
1961 * intel_pipe_update_start() has already disabled interrupts
1962 * for us, so a plain spin_lock() is sufficient here.
1964 spin_lock(&dev_priv
->uncore
.lock
);
1966 switch (crtc
->pipe
) {
1967 uint32_t dsparb
, dsparb2
, dsparb3
;
1969 dsparb
= I915_READ_FW(DSPARB
);
1970 dsparb2
= I915_READ_FW(DSPARB2
);
1972 dsparb
&= ~(VLV_FIFO(SPRITEA
, 0xff) |
1973 VLV_FIFO(SPRITEB
, 0xff));
1974 dsparb
|= (VLV_FIFO(SPRITEA
, sprite0_start
) |
1975 VLV_FIFO(SPRITEB
, sprite1_start
));
1977 dsparb2
&= ~(VLV_FIFO(SPRITEA_HI
, 0x1) |
1978 VLV_FIFO(SPRITEB_HI
, 0x1));
1979 dsparb2
|= (VLV_FIFO(SPRITEA_HI
, sprite0_start
>> 8) |
1980 VLV_FIFO(SPRITEB_HI
, sprite1_start
>> 8));
1982 I915_WRITE_FW(DSPARB
, dsparb
);
1983 I915_WRITE_FW(DSPARB2
, dsparb2
);
1986 dsparb
= I915_READ_FW(DSPARB
);
1987 dsparb2
= I915_READ_FW(DSPARB2
);
1989 dsparb
&= ~(VLV_FIFO(SPRITEC
, 0xff) |
1990 VLV_FIFO(SPRITED
, 0xff));
1991 dsparb
|= (VLV_FIFO(SPRITEC
, sprite0_start
) |
1992 VLV_FIFO(SPRITED
, sprite1_start
));
1994 dsparb2
&= ~(VLV_FIFO(SPRITEC_HI
, 0xff) |
1995 VLV_FIFO(SPRITED_HI
, 0xff));
1996 dsparb2
|= (VLV_FIFO(SPRITEC_HI
, sprite0_start
>> 8) |
1997 VLV_FIFO(SPRITED_HI
, sprite1_start
>> 8));
1999 I915_WRITE_FW(DSPARB
, dsparb
);
2000 I915_WRITE_FW(DSPARB2
, dsparb2
);
2003 dsparb3
= I915_READ_FW(DSPARB3
);
2004 dsparb2
= I915_READ_FW(DSPARB2
);
2006 dsparb3
&= ~(VLV_FIFO(SPRITEE
, 0xff) |
2007 VLV_FIFO(SPRITEF
, 0xff));
2008 dsparb3
|= (VLV_FIFO(SPRITEE
, sprite0_start
) |
2009 VLV_FIFO(SPRITEF
, sprite1_start
));
2011 dsparb2
&= ~(VLV_FIFO(SPRITEE_HI
, 0xff) |
2012 VLV_FIFO(SPRITEF_HI
, 0xff));
2013 dsparb2
|= (VLV_FIFO(SPRITEE_HI
, sprite0_start
>> 8) |
2014 VLV_FIFO(SPRITEF_HI
, sprite1_start
>> 8));
2016 I915_WRITE_FW(DSPARB3
, dsparb3
);
2017 I915_WRITE_FW(DSPARB2
, dsparb2
);
2023 POSTING_READ_FW(DSPARB
);
2025 spin_unlock(&dev_priv
->uncore
.lock
);
2030 static int vlv_compute_intermediate_wm(struct drm_device
*dev
,
2031 struct intel_crtc
*crtc
,
2032 struct intel_crtc_state
*crtc_state
)
2034 struct vlv_wm_state
*intermediate
= &crtc_state
->wm
.vlv
.intermediate
;
2035 const struct vlv_wm_state
*optimal
= &crtc_state
->wm
.vlv
.optimal
;
2036 const struct vlv_wm_state
*active
= &crtc
->wm
.active
.vlv
;
2039 intermediate
->num_levels
= min(optimal
->num_levels
, active
->num_levels
);
2040 intermediate
->cxsr
= optimal
->cxsr
&& active
->cxsr
&&
2041 !crtc_state
->disable_cxsr
;
2043 for (level
= 0; level
< intermediate
->num_levels
; level
++) {
2044 enum plane_id plane_id
;
2046 for_each_plane_id_on_crtc(crtc
, plane_id
) {
2047 intermediate
->wm
[level
].plane
[plane_id
] =
2048 min(optimal
->wm
[level
].plane
[plane_id
],
2049 active
->wm
[level
].plane
[plane_id
]);
2052 intermediate
->sr
[level
].plane
= min(optimal
->sr
[level
].plane
,
2053 active
->sr
[level
].plane
);
2054 intermediate
->sr
[level
].cursor
= min(optimal
->sr
[level
].cursor
,
2055 active
->sr
[level
].cursor
);
2058 vlv_invalidate_wms(crtc
, intermediate
, level
);
2061 * If our intermediate WM are identical to the final WM, then we can
2062 * omit the post-vblank programming; only update if it's different.
2064 if (memcmp(intermediate
, optimal
, sizeof(*intermediate
)) != 0)
2065 crtc_state
->wm
.need_postvbl_update
= true;
2070 static void vlv_merge_wm(struct drm_i915_private
*dev_priv
,
2071 struct vlv_wm_values
*wm
)
2073 struct intel_crtc
*crtc
;
2074 int num_active_crtcs
= 0;
2076 wm
->level
= dev_priv
->wm
.max_level
;
2079 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
2080 const struct vlv_wm_state
*wm_state
= &crtc
->wm
.active
.vlv
;
2085 if (!wm_state
->cxsr
)
2089 wm
->level
= min_t(int, wm
->level
, wm_state
->num_levels
- 1);
2092 if (num_active_crtcs
!= 1)
2095 if (num_active_crtcs
> 1)
2096 wm
->level
= VLV_WM_LEVEL_PM2
;
2098 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
2099 const struct vlv_wm_state
*wm_state
= &crtc
->wm
.active
.vlv
;
2100 enum pipe pipe
= crtc
->pipe
;
2102 wm
->pipe
[pipe
] = wm_state
->wm
[wm
->level
];
2103 if (crtc
->active
&& wm
->cxsr
)
2104 wm
->sr
= wm_state
->sr
[wm
->level
];
2106 wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] = DDL_PRECISION_HIGH
| 2;
2107 wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] = DDL_PRECISION_HIGH
| 2;
2108 wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] = DDL_PRECISION_HIGH
| 2;
2109 wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] = DDL_PRECISION_HIGH
| 2;
2113 static void vlv_program_watermarks(struct drm_i915_private
*dev_priv
)
2115 struct vlv_wm_values
*old_wm
= &dev_priv
->wm
.vlv
;
2116 struct vlv_wm_values new_wm
= {};
2118 vlv_merge_wm(dev_priv
, &new_wm
);
2120 if (memcmp(old_wm
, &new_wm
, sizeof(new_wm
)) == 0)
2123 if (is_disabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_DDR_DVFS
))
2124 chv_set_memory_dvfs(dev_priv
, false);
2126 if (is_disabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_PM5
))
2127 chv_set_memory_pm5(dev_priv
, false);
2129 if (is_disabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
2130 _intel_set_memory_cxsr(dev_priv
, false);
2132 vlv_write_wm_values(dev_priv
, &new_wm
);
2134 if (is_enabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
2135 _intel_set_memory_cxsr(dev_priv
, true);
2137 if (is_enabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_PM5
))
2138 chv_set_memory_pm5(dev_priv
, true);
2140 if (is_enabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_DDR_DVFS
))
2141 chv_set_memory_dvfs(dev_priv
, true);
2146 static void vlv_initial_watermarks(struct intel_atomic_state
*state
,
2147 struct intel_crtc_state
*crtc_state
)
2149 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
2150 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
2152 mutex_lock(&dev_priv
->wm
.wm_mutex
);
2153 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.intermediate
;
2154 vlv_program_watermarks(dev_priv
);
2155 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
2158 static void vlv_optimize_watermarks(struct intel_atomic_state
*state
,
2159 struct intel_crtc_state
*crtc_state
)
2161 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->base
.crtc
->dev
);
2162 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
2164 if (!crtc_state
->wm
.need_postvbl_update
)
2167 mutex_lock(&dev_priv
->wm
.wm_mutex
);
2168 intel_crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.optimal
;
2169 vlv_program_watermarks(dev_priv
);
2170 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
2173 static void i965_update_wm(struct intel_crtc
*unused_crtc
)
2175 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2176 struct intel_crtc
*crtc
;
2181 /* Calc sr entries for one plane configs */
2182 crtc
= single_enabled_crtc(dev_priv
);
2184 /* self-refresh has much higher latency */
2185 static const int sr_latency_ns
= 12000;
2186 const struct drm_display_mode
*adjusted_mode
=
2187 &crtc
->config
->base
.adjusted_mode
;
2188 const struct drm_framebuffer
*fb
=
2189 crtc
->base
.primary
->state
->fb
;
2190 int clock
= adjusted_mode
->crtc_clock
;
2191 int htotal
= adjusted_mode
->crtc_htotal
;
2192 int hdisplay
= crtc
->config
->pipe_src_w
;
2193 int cpp
= fb
->format
->cpp
[0];
2196 entries
= intel_wm_method2(clock
, htotal
,
2197 hdisplay
, cpp
, sr_latency_ns
/ 100);
2198 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
2199 srwm
= I965_FIFO_SIZE
- entries
;
2203 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
2206 entries
= intel_wm_method2(clock
, htotal
,
2207 crtc
->base
.cursor
->state
->crtc_w
, 4,
2208 sr_latency_ns
/ 100);
2209 entries
= DIV_ROUND_UP(entries
,
2210 i965_cursor_wm_info
.cacheline_size
) +
2211 i965_cursor_wm_info
.guard_size
;
2213 cursor_sr
= i965_cursor_wm_info
.fifo_size
- entries
;
2214 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
2215 cursor_sr
= i965_cursor_wm_info
.max_wm
;
2217 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
2218 "cursor %d\n", srwm
, cursor_sr
);
2220 cxsr_enabled
= true;
2222 cxsr_enabled
= false;
2223 /* Turn off self refresh if both pipes are enabled */
2224 intel_set_memory_cxsr(dev_priv
, false);
2227 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2230 /* 965 has limitations... */
2231 I915_WRITE(DSPFW1
, FW_WM(srwm
, SR
) |
2235 I915_WRITE(DSPFW2
, FW_WM(8, CURSORA
) |
2236 FW_WM(8, PLANEC_OLD
));
2237 /* update cursor SR watermark */
2238 I915_WRITE(DSPFW3
, FW_WM(cursor_sr
, CURSOR_SR
));
2241 intel_set_memory_cxsr(dev_priv
, true);
2246 static void i9xx_update_wm(struct intel_crtc
*unused_crtc
)
2248 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2249 const struct intel_watermark_params
*wm_info
;
2254 int planea_wm
, planeb_wm
;
2255 struct intel_crtc
*crtc
, *enabled
= NULL
;
2257 if (IS_I945GM(dev_priv
))
2258 wm_info
= &i945_wm_info
;
2259 else if (!IS_GEN2(dev_priv
))
2260 wm_info
= &i915_wm_info
;
2262 wm_info
= &i830_a_wm_info
;
2264 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, 0);
2265 crtc
= intel_get_crtc_for_plane(dev_priv
, 0);
2266 if (intel_crtc_active(crtc
)) {
2267 const struct drm_display_mode
*adjusted_mode
=
2268 &crtc
->config
->base
.adjusted_mode
;
2269 const struct drm_framebuffer
*fb
=
2270 crtc
->base
.primary
->state
->fb
;
2273 if (IS_GEN2(dev_priv
))
2276 cpp
= fb
->format
->cpp
[0];
2278 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2279 wm_info
, fifo_size
, cpp
,
2280 pessimal_latency_ns
);
2283 planea_wm
= fifo_size
- wm_info
->guard_size
;
2284 if (planea_wm
> (long)wm_info
->max_wm
)
2285 planea_wm
= wm_info
->max_wm
;
2288 if (IS_GEN2(dev_priv
))
2289 wm_info
= &i830_bc_wm_info
;
2291 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, 1);
2292 crtc
= intel_get_crtc_for_plane(dev_priv
, 1);
2293 if (intel_crtc_active(crtc
)) {
2294 const struct drm_display_mode
*adjusted_mode
=
2295 &crtc
->config
->base
.adjusted_mode
;
2296 const struct drm_framebuffer
*fb
=
2297 crtc
->base
.primary
->state
->fb
;
2300 if (IS_GEN2(dev_priv
))
2303 cpp
= fb
->format
->cpp
[0];
2305 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2306 wm_info
, fifo_size
, cpp
,
2307 pessimal_latency_ns
);
2308 if (enabled
== NULL
)
2313 planeb_wm
= fifo_size
- wm_info
->guard_size
;
2314 if (planeb_wm
> (long)wm_info
->max_wm
)
2315 planeb_wm
= wm_info
->max_wm
;
2318 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
2320 if (IS_I915GM(dev_priv
) && enabled
) {
2321 struct drm_i915_gem_object
*obj
;
2323 obj
= intel_fb_obj(enabled
->base
.primary
->state
->fb
);
2325 /* self-refresh seems busted with untiled */
2326 if (!i915_gem_object_is_tiled(obj
))
2331 * Overlay gets an aggressive default since video jitter is bad.
2335 /* Play safe and disable self-refresh before adjusting watermarks. */
2336 intel_set_memory_cxsr(dev_priv
, false);
2338 /* Calc sr entries for one plane configs */
2339 if (HAS_FW_BLC(dev_priv
) && enabled
) {
2340 /* self-refresh has much higher latency */
2341 static const int sr_latency_ns
= 6000;
2342 const struct drm_display_mode
*adjusted_mode
=
2343 &enabled
->config
->base
.adjusted_mode
;
2344 const struct drm_framebuffer
*fb
=
2345 enabled
->base
.primary
->state
->fb
;
2346 int clock
= adjusted_mode
->crtc_clock
;
2347 int htotal
= adjusted_mode
->crtc_htotal
;
2348 int hdisplay
= enabled
->config
->pipe_src_w
;
2352 if (IS_I915GM(dev_priv
) || IS_I945GM(dev_priv
))
2355 cpp
= fb
->format
->cpp
[0];
2357 entries
= intel_wm_method2(clock
, htotal
, hdisplay
, cpp
,
2358 sr_latency_ns
/ 100);
2359 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
2360 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
2361 srwm
= wm_info
->fifo_size
- entries
;
2365 if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
2366 I915_WRITE(FW_BLC_SELF
,
2367 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
2369 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
2372 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2373 planea_wm
, planeb_wm
, cwm
, srwm
);
2375 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
2376 fwater_hi
= (cwm
& 0x1f);
2378 /* Set request length to 8 cachelines per fetch */
2379 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
2380 fwater_hi
= fwater_hi
| (1 << 8);
2382 I915_WRITE(FW_BLC
, fwater_lo
);
2383 I915_WRITE(FW_BLC2
, fwater_hi
);
2386 intel_set_memory_cxsr(dev_priv
, true);
2389 static void i845_update_wm(struct intel_crtc
*unused_crtc
)
2391 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2392 struct intel_crtc
*crtc
;
2393 const struct drm_display_mode
*adjusted_mode
;
2397 crtc
= single_enabled_crtc(dev_priv
);
2401 adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
2402 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2404 dev_priv
->display
.get_fifo_size(dev_priv
, 0),
2405 4, pessimal_latency_ns
);
2406 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
2407 fwater_lo
|= (3<<8) | planea_wm
;
2409 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
2411 I915_WRITE(FW_BLC
, fwater_lo
);
2414 /* latency must be in 0.1us units. */
2415 static unsigned int ilk_wm_method1(unsigned int pixel_rate
,
2417 unsigned int latency
)
2421 ret
= intel_wm_method1(pixel_rate
, cpp
, latency
);
2422 ret
= DIV_ROUND_UP(ret
, 64) + 2;
2427 /* latency must be in 0.1us units. */
2428 static unsigned int ilk_wm_method2(unsigned int pixel_rate
,
2429 unsigned int htotal
,
2432 unsigned int latency
)
2436 ret
= intel_wm_method2(pixel_rate
, htotal
,
2437 width
, cpp
, latency
);
2438 ret
= DIV_ROUND_UP(ret
, 64) + 2;
2443 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
2447 * Neither of these should be possible since this function shouldn't be
2448 * called if the CRTC is off or the plane is invisible. But let's be
2449 * extra paranoid to avoid a potential divide-by-zero if we screw up
2450 * elsewhere in the driver.
2454 if (WARN_ON(!horiz_pixels
))
2457 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* cpp
) + 2;
2460 struct ilk_wm_maximums
{
2468 * For both WM_PIPE and WM_LP.
2469 * mem_value must be in 0.1us units.
2471 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state
*cstate
,
2472 const struct intel_plane_state
*pstate
,
2476 uint32_t method1
, method2
;
2479 if (!intel_wm_plane_visible(cstate
, pstate
))
2482 cpp
= pstate
->base
.fb
->format
->cpp
[0];
2484 method1
= ilk_wm_method1(cstate
->pixel_rate
, cpp
, mem_value
);
2489 method2
= ilk_wm_method2(cstate
->pixel_rate
,
2490 cstate
->base
.adjusted_mode
.crtc_htotal
,
2491 drm_rect_width(&pstate
->base
.dst
),
2494 return min(method1
, method2
);
2498 * For both WM_PIPE and WM_LP.
2499 * mem_value must be in 0.1us units.
2501 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state
*cstate
,
2502 const struct intel_plane_state
*pstate
,
2505 uint32_t method1
, method2
;
2508 if (!intel_wm_plane_visible(cstate
, pstate
))
2511 cpp
= pstate
->base
.fb
->format
->cpp
[0];
2513 method1
= ilk_wm_method1(cstate
->pixel_rate
, cpp
, mem_value
);
2514 method2
= ilk_wm_method2(cstate
->pixel_rate
,
2515 cstate
->base
.adjusted_mode
.crtc_htotal
,
2516 drm_rect_width(&pstate
->base
.dst
),
2518 return min(method1
, method2
);
2522 * For both WM_PIPE and WM_LP.
2523 * mem_value must be in 0.1us units.
2525 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state
*cstate
,
2526 const struct intel_plane_state
*pstate
,
2531 if (!intel_wm_plane_visible(cstate
, pstate
))
2534 cpp
= pstate
->base
.fb
->format
->cpp
[0];
2536 return ilk_wm_method2(cstate
->pixel_rate
,
2537 cstate
->base
.adjusted_mode
.crtc_htotal
,
2538 pstate
->base
.crtc_w
, cpp
, mem_value
);
2541 /* Only for WM_LP. */
2542 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state
*cstate
,
2543 const struct intel_plane_state
*pstate
,
2548 if (!intel_wm_plane_visible(cstate
, pstate
))
2551 cpp
= pstate
->base
.fb
->format
->cpp
[0];
2553 return ilk_wm_fbc(pri_val
, drm_rect_width(&pstate
->base
.dst
), cpp
);
2557 ilk_display_fifo_size(const struct drm_i915_private
*dev_priv
)
2559 if (INTEL_GEN(dev_priv
) >= 8)
2561 else if (INTEL_GEN(dev_priv
) >= 7)
2568 ilk_plane_wm_reg_max(const struct drm_i915_private
*dev_priv
,
2569 int level
, bool is_sprite
)
2571 if (INTEL_GEN(dev_priv
) >= 8)
2572 /* BDW primary/sprite plane watermarks */
2573 return level
== 0 ? 255 : 2047;
2574 else if (INTEL_GEN(dev_priv
) >= 7)
2575 /* IVB/HSW primary/sprite plane watermarks */
2576 return level
== 0 ? 127 : 1023;
2577 else if (!is_sprite
)
2578 /* ILK/SNB primary plane watermarks */
2579 return level
== 0 ? 127 : 511;
2581 /* ILK/SNB sprite plane watermarks */
2582 return level
== 0 ? 63 : 255;
2586 ilk_cursor_wm_reg_max(const struct drm_i915_private
*dev_priv
, int level
)
2588 if (INTEL_GEN(dev_priv
) >= 7)
2589 return level
== 0 ? 63 : 255;
2591 return level
== 0 ? 31 : 63;
2594 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private
*dev_priv
)
2596 if (INTEL_GEN(dev_priv
) >= 8)
2602 /* Calculate the maximum primary/sprite plane watermark */
2603 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
2605 const struct intel_wm_config
*config
,
2606 enum intel_ddb_partitioning ddb_partitioning
,
2609 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2610 unsigned int fifo_size
= ilk_display_fifo_size(dev_priv
);
2612 /* if sprites aren't enabled, sprites get nothing */
2613 if (is_sprite
&& !config
->sprites_enabled
)
2616 /* HSW allows LP1+ watermarks even with multiple pipes */
2617 if (level
== 0 || config
->num_pipes_active
> 1) {
2618 fifo_size
/= INTEL_INFO(dev_priv
)->num_pipes
;
2621 * For some reason the non self refresh
2622 * FIFO size is only half of the self
2623 * refresh FIFO size on ILK/SNB.
2625 if (INTEL_GEN(dev_priv
) <= 6)
2629 if (config
->sprites_enabled
) {
2630 /* level 0 is always calculated with 1:1 split */
2631 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
2640 /* clamp to max that the registers can hold */
2641 return min(fifo_size
, ilk_plane_wm_reg_max(dev_priv
, level
, is_sprite
));
2644 /* Calculate the maximum cursor plane watermark */
2645 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
2647 const struct intel_wm_config
*config
)
2649 /* HSW LP1+ watermarks w/ multiple pipes */
2650 if (level
> 0 && config
->num_pipes_active
> 1)
2653 /* otherwise just report max that registers can hold */
2654 return ilk_cursor_wm_reg_max(to_i915(dev
), level
);
2657 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
2659 const struct intel_wm_config
*config
,
2660 enum intel_ddb_partitioning ddb_partitioning
,
2661 struct ilk_wm_maximums
*max
)
2663 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
2664 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
2665 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
2666 max
->fbc
= ilk_fbc_wm_reg_max(to_i915(dev
));
2669 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private
*dev_priv
,
2671 struct ilk_wm_maximums
*max
)
2673 max
->pri
= ilk_plane_wm_reg_max(dev_priv
, level
, false);
2674 max
->spr
= ilk_plane_wm_reg_max(dev_priv
, level
, true);
2675 max
->cur
= ilk_cursor_wm_reg_max(dev_priv
, level
);
2676 max
->fbc
= ilk_fbc_wm_reg_max(dev_priv
);
2679 static bool ilk_validate_wm_level(int level
,
2680 const struct ilk_wm_maximums
*max
,
2681 struct intel_wm_level
*result
)
2685 /* already determined to be invalid? */
2686 if (!result
->enable
)
2689 result
->enable
= result
->pri_val
<= max
->pri
&&
2690 result
->spr_val
<= max
->spr
&&
2691 result
->cur_val
<= max
->cur
;
2693 ret
= result
->enable
;
2696 * HACK until we can pre-compute everything,
2697 * and thus fail gracefully if LP0 watermarks
2700 if (level
== 0 && !result
->enable
) {
2701 if (result
->pri_val
> max
->pri
)
2702 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2703 level
, result
->pri_val
, max
->pri
);
2704 if (result
->spr_val
> max
->spr
)
2705 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2706 level
, result
->spr_val
, max
->spr
);
2707 if (result
->cur_val
> max
->cur
)
2708 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2709 level
, result
->cur_val
, max
->cur
);
2711 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
2712 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
2713 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
2714 result
->enable
= true;
2720 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2721 const struct intel_crtc
*intel_crtc
,
2723 struct intel_crtc_state
*cstate
,
2724 struct intel_plane_state
*pristate
,
2725 struct intel_plane_state
*sprstate
,
2726 struct intel_plane_state
*curstate
,
2727 struct intel_wm_level
*result
)
2729 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2730 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2731 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2733 /* WM1+ latency values stored in 0.5us units */
2741 result
->pri_val
= ilk_compute_pri_wm(cstate
, pristate
,
2742 pri_latency
, level
);
2743 result
->fbc_val
= ilk_compute_fbc_wm(cstate
, pristate
, result
->pri_val
);
2747 result
->spr_val
= ilk_compute_spr_wm(cstate
, sprstate
, spr_latency
);
2750 result
->cur_val
= ilk_compute_cur_wm(cstate
, curstate
, cur_latency
);
2752 result
->enable
= true;
2756 hsw_compute_linetime_wm(const struct intel_crtc_state
*cstate
)
2758 const struct intel_atomic_state
*intel_state
=
2759 to_intel_atomic_state(cstate
->base
.state
);
2760 const struct drm_display_mode
*adjusted_mode
=
2761 &cstate
->base
.adjusted_mode
;
2762 u32 linetime
, ips_linetime
;
2764 if (!cstate
->base
.active
)
2766 if (WARN_ON(adjusted_mode
->crtc_clock
== 0))
2768 if (WARN_ON(intel_state
->cdclk
.logical
.cdclk
== 0))
2771 /* The WM are computed with base on how long it takes to fill a single
2772 * row at the given clock rate, multiplied by 8.
2774 linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2775 adjusted_mode
->crtc_clock
);
2776 ips_linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2777 intel_state
->cdclk
.logical
.cdclk
);
2779 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2780 PIPE_WM_LINETIME_TIME(linetime
);
2783 static void intel_read_wm_latency(struct drm_i915_private
*dev_priv
,
2786 if (INTEL_GEN(dev_priv
) >= 9) {
2789 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2791 /* read the first set of memory latencies[0:3] */
2792 val
= 0; /* data0 to be programmed to 0 for first set */
2793 mutex_lock(&dev_priv
->rps
.hw_lock
);
2794 ret
= sandybridge_pcode_read(dev_priv
,
2795 GEN9_PCODE_READ_MEM_LATENCY
,
2797 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2800 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2804 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2805 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2806 GEN9_MEM_LATENCY_LEVEL_MASK
;
2807 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2808 GEN9_MEM_LATENCY_LEVEL_MASK
;
2809 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2810 GEN9_MEM_LATENCY_LEVEL_MASK
;
2812 /* read the second set of memory latencies[4:7] */
2813 val
= 1; /* data0 to be programmed to 1 for second set */
2814 mutex_lock(&dev_priv
->rps
.hw_lock
);
2815 ret
= sandybridge_pcode_read(dev_priv
,
2816 GEN9_PCODE_READ_MEM_LATENCY
,
2818 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2820 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2824 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2825 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2826 GEN9_MEM_LATENCY_LEVEL_MASK
;
2827 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2828 GEN9_MEM_LATENCY_LEVEL_MASK
;
2829 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2830 GEN9_MEM_LATENCY_LEVEL_MASK
;
2833 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2834 * need to be disabled. We make sure to sanitize the values out
2835 * of the punit to satisfy this requirement.
2837 for (level
= 1; level
<= max_level
; level
++) {
2838 if (wm
[level
] == 0) {
2839 for (i
= level
+ 1; i
<= max_level
; i
++)
2846 * WaWmMemoryReadLatency:skl+,glk
2848 * punit doesn't take into account the read latency so we need
2849 * to add 2us to the various latency levels we retrieve from the
2850 * punit when level 0 response data us 0us.
2854 for (level
= 1; level
<= max_level
; level
++) {
2861 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2862 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
2864 wm
[0] = (sskpd
>> 56) & 0xFF;
2866 wm
[0] = sskpd
& 0xF;
2867 wm
[1] = (sskpd
>> 4) & 0xFF;
2868 wm
[2] = (sskpd
>> 12) & 0xFF;
2869 wm
[3] = (sskpd
>> 20) & 0x1FF;
2870 wm
[4] = (sskpd
>> 32) & 0x1FF;
2871 } else if (INTEL_GEN(dev_priv
) >= 6) {
2872 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
2874 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2875 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2876 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2877 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2878 } else if (INTEL_GEN(dev_priv
) >= 5) {
2879 uint32_t mltr
= I915_READ(MLTR_ILK
);
2881 /* ILK primary LP0 latency is 700 ns */
2883 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2884 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2886 MISSING_CASE(INTEL_DEVID(dev_priv
));
2890 static void intel_fixup_spr_wm_latency(struct drm_i915_private
*dev_priv
,
2893 /* ILK sprite LP0 latency is 1300 ns */
2894 if (IS_GEN5(dev_priv
))
2898 static void intel_fixup_cur_wm_latency(struct drm_i915_private
*dev_priv
,
2901 /* ILK cursor LP0 latency is 1300 ns */
2902 if (IS_GEN5(dev_priv
))
2905 /* WaDoubleCursorLP3Latency:ivb */
2906 if (IS_IVYBRIDGE(dev_priv
))
2910 int ilk_wm_max_level(const struct drm_i915_private
*dev_priv
)
2912 /* how many WM levels are we expecting */
2913 if (INTEL_GEN(dev_priv
) >= 9)
2915 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2917 else if (INTEL_GEN(dev_priv
) >= 6)
2923 static void intel_print_wm_latency(struct drm_i915_private
*dev_priv
,
2925 const uint16_t wm
[8])
2927 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2929 for (level
= 0; level
<= max_level
; level
++) {
2930 unsigned int latency
= wm
[level
];
2933 DRM_ERROR("%s WM%d latency not provided\n",
2939 * - latencies are in us on gen9.
2940 * - before then, WM1+ latency values are in 0.5us units
2942 if (INTEL_GEN(dev_priv
) >= 9)
2947 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2948 name
, level
, wm
[level
],
2949 latency
/ 10, latency
% 10);
2953 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
2954 uint16_t wm
[5], uint16_t min
)
2956 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2961 wm
[0] = max(wm
[0], min
);
2962 for (level
= 1; level
<= max_level
; level
++)
2963 wm
[level
] = max_t(uint16_t, wm
[level
], DIV_ROUND_UP(min
, 5));
2968 static void snb_wm_latency_quirk(struct drm_i915_private
*dev_priv
)
2973 * The BIOS provided WM memory latency values are often
2974 * inadequate for high resolution displays. Adjust them.
2976 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
2977 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
2978 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
2983 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2984 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
2985 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
2986 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
2989 static void ilk_setup_wm_latency(struct drm_i915_private
*dev_priv
)
2991 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
);
2993 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
2994 sizeof(dev_priv
->wm
.pri_latency
));
2995 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
2996 sizeof(dev_priv
->wm
.pri_latency
));
2998 intel_fixup_spr_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
);
2999 intel_fixup_cur_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
);
3001 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
3002 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
3003 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
3005 if (IS_GEN6(dev_priv
))
3006 snb_wm_latency_quirk(dev_priv
);
3009 static void skl_setup_wm_latency(struct drm_i915_private
*dev_priv
)
3011 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.skl_latency
);
3012 intel_print_wm_latency(dev_priv
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
3015 static bool ilk_validate_pipe_wm(struct drm_device
*dev
,
3016 struct intel_pipe_wm
*pipe_wm
)
3018 /* LP0 watermark maximums depend on this pipe alone */
3019 const struct intel_wm_config config
= {
3020 .num_pipes_active
= 1,
3021 .sprites_enabled
= pipe_wm
->sprites_enabled
,
3022 .sprites_scaled
= pipe_wm
->sprites_scaled
,
3024 struct ilk_wm_maximums max
;
3026 /* LP0 watermarks always use 1/2 DDB partitioning */
3027 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
3029 /* At least LP0 must be valid */
3030 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0])) {
3031 DRM_DEBUG_KMS("LP0 watermark invalid\n");
3038 /* Compute new watermarks for the pipe */
3039 static int ilk_compute_pipe_wm(struct intel_crtc_state
*cstate
)
3041 struct drm_atomic_state
*state
= cstate
->base
.state
;
3042 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
3043 struct intel_pipe_wm
*pipe_wm
;
3044 struct drm_device
*dev
= state
->dev
;
3045 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
3046 struct intel_plane
*intel_plane
;
3047 struct intel_plane_state
*pristate
= NULL
;
3048 struct intel_plane_state
*sprstate
= NULL
;
3049 struct intel_plane_state
*curstate
= NULL
;
3050 int level
, max_level
= ilk_wm_max_level(dev_priv
), usable_level
;
3051 struct ilk_wm_maximums max
;
3053 pipe_wm
= &cstate
->wm
.ilk
.optimal
;
3055 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3056 struct intel_plane_state
*ps
;
3058 ps
= intel_atomic_get_existing_plane_state(state
,
3063 if (intel_plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
3065 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_OVERLAY
)
3067 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
3071 pipe_wm
->pipe_enabled
= cstate
->base
.active
;
3073 pipe_wm
->sprites_enabled
= sprstate
->base
.visible
;
3074 pipe_wm
->sprites_scaled
= sprstate
->base
.visible
&&
3075 (drm_rect_width(&sprstate
->base
.dst
) != drm_rect_width(&sprstate
->base
.src
) >> 16 ||
3076 drm_rect_height(&sprstate
->base
.dst
) != drm_rect_height(&sprstate
->base
.src
) >> 16);
3079 usable_level
= max_level
;
3081 /* ILK/SNB: LP2+ watermarks only w/o sprites */
3082 if (INTEL_GEN(dev_priv
) <= 6 && pipe_wm
->sprites_enabled
)
3085 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3086 if (pipe_wm
->sprites_scaled
)
3089 ilk_compute_wm_level(dev_priv
, intel_crtc
, 0, cstate
,
3090 pristate
, sprstate
, curstate
, &pipe_wm
->raw_wm
[0]);
3092 memset(&pipe_wm
->wm
, 0, sizeof(pipe_wm
->wm
));
3093 pipe_wm
->wm
[0] = pipe_wm
->raw_wm
[0];
3095 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
3096 pipe_wm
->linetime
= hsw_compute_linetime_wm(cstate
);
3098 if (!ilk_validate_pipe_wm(dev
, pipe_wm
))
3101 ilk_compute_wm_reg_maximums(dev_priv
, 1, &max
);
3103 for (level
= 1; level
<= max_level
; level
++) {
3104 struct intel_wm_level
*wm
= &pipe_wm
->raw_wm
[level
];
3106 ilk_compute_wm_level(dev_priv
, intel_crtc
, level
, cstate
,
3107 pristate
, sprstate
, curstate
, wm
);
3110 * Disable any watermark level that exceeds the
3111 * register maximums since such watermarks are
3114 if (level
> usable_level
)
3117 if (ilk_validate_wm_level(level
, &max
, wm
))
3118 pipe_wm
->wm
[level
] = *wm
;
3120 usable_level
= level
;
3127 * Build a set of 'intermediate' watermark values that satisfy both the old
3128 * state and the new state. These can be programmed to the hardware
3131 static int ilk_compute_intermediate_wm(struct drm_device
*dev
,
3132 struct intel_crtc
*intel_crtc
,
3133 struct intel_crtc_state
*newstate
)
3135 struct intel_pipe_wm
*a
= &newstate
->wm
.ilk
.intermediate
;
3136 struct intel_pipe_wm
*b
= &intel_crtc
->wm
.active
.ilk
;
3137 int level
, max_level
= ilk_wm_max_level(to_i915(dev
));
3140 * Start with the final, target watermarks, then combine with the
3141 * currently active watermarks to get values that are safe both before
3142 * and after the vblank.
3144 *a
= newstate
->wm
.ilk
.optimal
;
3145 a
->pipe_enabled
|= b
->pipe_enabled
;
3146 a
->sprites_enabled
|= b
->sprites_enabled
;
3147 a
->sprites_scaled
|= b
->sprites_scaled
;
3149 for (level
= 0; level
<= max_level
; level
++) {
3150 struct intel_wm_level
*a_wm
= &a
->wm
[level
];
3151 const struct intel_wm_level
*b_wm
= &b
->wm
[level
];
3153 a_wm
->enable
&= b_wm
->enable
;
3154 a_wm
->pri_val
= max(a_wm
->pri_val
, b_wm
->pri_val
);
3155 a_wm
->spr_val
= max(a_wm
->spr_val
, b_wm
->spr_val
);
3156 a_wm
->cur_val
= max(a_wm
->cur_val
, b_wm
->cur_val
);
3157 a_wm
->fbc_val
= max(a_wm
->fbc_val
, b_wm
->fbc_val
);
3161 * We need to make sure that these merged watermark values are
3162 * actually a valid configuration themselves. If they're not,
3163 * there's no safe way to transition from the old state to
3164 * the new state, so we need to fail the atomic transaction.
3166 if (!ilk_validate_pipe_wm(dev
, a
))
3170 * If our intermediate WM are identical to the final WM, then we can
3171 * omit the post-vblank programming; only update if it's different.
3173 if (memcmp(a
, &newstate
->wm
.ilk
.optimal
, sizeof(*a
)) != 0)
3174 newstate
->wm
.need_postvbl_update
= true;
3180 * Merge the watermarks from all active pipes for a specific level.
3182 static void ilk_merge_wm_level(struct drm_device
*dev
,
3184 struct intel_wm_level
*ret_wm
)
3186 const struct intel_crtc
*intel_crtc
;
3188 ret_wm
->enable
= true;
3190 for_each_intel_crtc(dev
, intel_crtc
) {
3191 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
.ilk
;
3192 const struct intel_wm_level
*wm
= &active
->wm
[level
];
3194 if (!active
->pipe_enabled
)
3198 * The watermark values may have been used in the past,
3199 * so we must maintain them in the registers for some
3200 * time even if the level is now disabled.
3203 ret_wm
->enable
= false;
3205 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
3206 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
3207 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
3208 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
3213 * Merge all low power watermarks for all active pipes.
3215 static void ilk_wm_merge(struct drm_device
*dev
,
3216 const struct intel_wm_config
*config
,
3217 const struct ilk_wm_maximums
*max
,
3218 struct intel_pipe_wm
*merged
)
3220 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3221 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3222 int last_enabled_level
= max_level
;
3224 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3225 if ((INTEL_GEN(dev_priv
) <= 6 || IS_IVYBRIDGE(dev_priv
)) &&
3226 config
->num_pipes_active
> 1)
3227 last_enabled_level
= 0;
3229 /* ILK: FBC WM must be disabled always */
3230 merged
->fbc_wm_enabled
= INTEL_GEN(dev_priv
) >= 6;
3232 /* merge each WM1+ level */
3233 for (level
= 1; level
<= max_level
; level
++) {
3234 struct intel_wm_level
*wm
= &merged
->wm
[level
];
3236 ilk_merge_wm_level(dev
, level
, wm
);
3238 if (level
> last_enabled_level
)
3240 else if (!ilk_validate_wm_level(level
, max
, wm
))
3241 /* make sure all following levels get disabled */
3242 last_enabled_level
= level
- 1;
3245 * The spec says it is preferred to disable
3246 * FBC WMs instead of disabling a WM level.
3248 if (wm
->fbc_val
> max
->fbc
) {
3250 merged
->fbc_wm_enabled
= false;
3255 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3257 * FIXME this is racy. FBC might get enabled later.
3258 * What we should check here is whether FBC can be
3259 * enabled sometime later.
3261 if (IS_GEN5(dev_priv
) && !merged
->fbc_wm_enabled
&&
3262 intel_fbc_is_active(dev_priv
)) {
3263 for (level
= 2; level
<= max_level
; level
++) {
3264 struct intel_wm_level
*wm
= &merged
->wm
[level
];
3271 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
3273 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3274 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
3277 /* The value we need to program into the WM_LPx latency field */
3278 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
3280 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3282 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
3285 return dev_priv
->wm
.pri_latency
[level
];
3288 static void ilk_compute_wm_results(struct drm_device
*dev
,
3289 const struct intel_pipe_wm
*merged
,
3290 enum intel_ddb_partitioning partitioning
,
3291 struct ilk_wm_values
*results
)
3293 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3294 struct intel_crtc
*intel_crtc
;
3297 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
3298 results
->partitioning
= partitioning
;
3300 /* LP1+ register values */
3301 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
3302 const struct intel_wm_level
*r
;
3304 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
3306 r
= &merged
->wm
[level
];
3309 * Maintain the watermark values even if the level is
3310 * disabled. Doing otherwise could cause underruns.
3312 results
->wm_lp
[wm_lp
- 1] =
3313 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
3314 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
3318 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
3320 if (INTEL_GEN(dev_priv
) >= 8)
3321 results
->wm_lp
[wm_lp
- 1] |=
3322 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
3324 results
->wm_lp
[wm_lp
- 1] |=
3325 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
3328 * Always set WM1S_LP_EN when spr_val != 0, even if the
3329 * level is disabled. Doing otherwise could cause underruns.
3331 if (INTEL_GEN(dev_priv
) <= 6 && r
->spr_val
) {
3332 WARN_ON(wm_lp
!= 1);
3333 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
3335 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
3338 /* LP0 register values */
3339 for_each_intel_crtc(dev
, intel_crtc
) {
3340 enum pipe pipe
= intel_crtc
->pipe
;
3341 const struct intel_wm_level
*r
=
3342 &intel_crtc
->wm
.active
.ilk
.wm
[0];
3344 if (WARN_ON(!r
->enable
))
3347 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.ilk
.linetime
;
3349 results
->wm_pipe
[pipe
] =
3350 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
3351 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
3356 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3357 * case both are at the same level. Prefer r1 in case they're the same. */
3358 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
3359 struct intel_pipe_wm
*r1
,
3360 struct intel_pipe_wm
*r2
)
3362 int level
, max_level
= ilk_wm_max_level(to_i915(dev
));
3363 int level1
= 0, level2
= 0;
3365 for (level
= 1; level
<= max_level
; level
++) {
3366 if (r1
->wm
[level
].enable
)
3368 if (r2
->wm
[level
].enable
)
3372 if (level1
== level2
) {
3373 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
3377 } else if (level1
> level2
) {
3384 /* dirty bits used to track which watermarks need changes */
3385 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3386 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3387 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3388 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3389 #define WM_DIRTY_FBC (1 << 24)
3390 #define WM_DIRTY_DDB (1 << 25)
3392 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
3393 const struct ilk_wm_values
*old
,
3394 const struct ilk_wm_values
*new)
3396 unsigned int dirty
= 0;
3400 for_each_pipe(dev_priv
, pipe
) {
3401 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
3402 dirty
|= WM_DIRTY_LINETIME(pipe
);
3403 /* Must disable LP1+ watermarks too */
3404 dirty
|= WM_DIRTY_LP_ALL
;
3407 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
3408 dirty
|= WM_DIRTY_PIPE(pipe
);
3409 /* Must disable LP1+ watermarks too */
3410 dirty
|= WM_DIRTY_LP_ALL
;
3414 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
3415 dirty
|= WM_DIRTY_FBC
;
3416 /* Must disable LP1+ watermarks too */
3417 dirty
|= WM_DIRTY_LP_ALL
;
3420 if (old
->partitioning
!= new->partitioning
) {
3421 dirty
|= WM_DIRTY_DDB
;
3422 /* Must disable LP1+ watermarks too */
3423 dirty
|= WM_DIRTY_LP_ALL
;
3426 /* LP1+ watermarks already deemed dirty, no need to continue */
3427 if (dirty
& WM_DIRTY_LP_ALL
)
3430 /* Find the lowest numbered LP1+ watermark in need of an update... */
3431 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
3432 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
3433 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
3437 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3438 for (; wm_lp
<= 3; wm_lp
++)
3439 dirty
|= WM_DIRTY_LP(wm_lp
);
3444 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
3447 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
3448 bool changed
= false;
3450 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
3451 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
3452 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
3455 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
3456 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
3457 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
3460 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
3461 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
3462 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
3467 * Don't touch WM1S_LP_EN here.
3468 * Doing so could cause underruns.
3475 * The spec says we shouldn't write when we don't need, because every write
3476 * causes WMs to be re-evaluated, expending some power.
3478 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
3479 struct ilk_wm_values
*results
)
3481 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
3485 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
3489 _ilk_disable_lp_wm(dev_priv
, dirty
);
3491 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
3492 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
3493 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
3494 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
3495 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
3496 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
3498 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
3499 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
3500 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
3501 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
3502 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
3503 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
3505 if (dirty
& WM_DIRTY_DDB
) {
3506 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
3507 val
= I915_READ(WM_MISC
);
3508 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
3509 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
3511 val
|= WM_MISC_DATA_PARTITION_5_6
;
3512 I915_WRITE(WM_MISC
, val
);
3514 val
= I915_READ(DISP_ARB_CTL2
);
3515 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
3516 val
&= ~DISP_DATA_PARTITION_5_6
;
3518 val
|= DISP_DATA_PARTITION_5_6
;
3519 I915_WRITE(DISP_ARB_CTL2
, val
);
3523 if (dirty
& WM_DIRTY_FBC
) {
3524 val
= I915_READ(DISP_ARB_CTL
);
3525 if (results
->enable_fbc_wm
)
3526 val
&= ~DISP_FBC_WM_DIS
;
3528 val
|= DISP_FBC_WM_DIS
;
3529 I915_WRITE(DISP_ARB_CTL
, val
);
3532 if (dirty
& WM_DIRTY_LP(1) &&
3533 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
3534 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
3536 if (INTEL_GEN(dev_priv
) >= 7) {
3537 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
3538 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
3539 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
3540 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
3543 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
3544 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
3545 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
3546 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
3547 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
3548 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
3550 dev_priv
->wm
.hw
= *results
;
3553 bool ilk_disable_lp_wm(struct drm_device
*dev
)
3555 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3557 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
3561 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3562 * so assume we'll always need it in order to avoid underruns.
3564 static bool skl_needs_memory_bw_wa(struct intel_atomic_state
*state
)
3566 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
3568 if (IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
))
3575 intel_has_sagv(struct drm_i915_private
*dev_priv
)
3577 if (IS_KABYLAKE(dev_priv
) || IS_COFFEELAKE(dev_priv
) ||
3578 IS_CANNONLAKE(dev_priv
))
3581 if (IS_SKYLAKE(dev_priv
) &&
3582 dev_priv
->sagv_status
!= I915_SAGV_NOT_CONTROLLED
)
3589 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3590 * depending on power and performance requirements. The display engine access
3591 * to system memory is blocked during the adjustment time. Because of the
3592 * blocking time, having this enabled can cause full system hangs and/or pipe
3593 * underruns if we don't meet all of the following requirements:
3595 * - <= 1 pipe enabled
3596 * - All planes can enable watermarks for latencies >= SAGV engine block time
3597 * - We're not using an interlaced display configuration
3600 intel_enable_sagv(struct drm_i915_private
*dev_priv
)
3604 if (!intel_has_sagv(dev_priv
))
3607 if (dev_priv
->sagv_status
== I915_SAGV_ENABLED
)
3610 DRM_DEBUG_KMS("Enabling the SAGV\n");
3611 mutex_lock(&dev_priv
->rps
.hw_lock
);
3613 ret
= sandybridge_pcode_write(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
3616 /* We don't need to wait for the SAGV when enabling */
3617 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3620 * Some skl systems, pre-release machines in particular,
3621 * don't actually have an SAGV.
3623 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
3624 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3625 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
3627 } else if (ret
< 0) {
3628 DRM_ERROR("Failed to enable the SAGV\n");
3632 dev_priv
->sagv_status
= I915_SAGV_ENABLED
;
3637 intel_disable_sagv(struct drm_i915_private
*dev_priv
)
3641 if (!intel_has_sagv(dev_priv
))
3644 if (dev_priv
->sagv_status
== I915_SAGV_DISABLED
)
3647 DRM_DEBUG_KMS("Disabling the SAGV\n");
3648 mutex_lock(&dev_priv
->rps
.hw_lock
);
3650 /* bspec says to keep retrying for at least 1 ms */
3651 ret
= skl_pcode_request(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
3653 GEN9_SAGV_IS_DISABLED
, GEN9_SAGV_IS_DISABLED
,
3655 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3658 * Some skl systems, pre-release machines in particular,
3659 * don't actually have an SAGV.
3661 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
3662 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3663 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
3665 } else if (ret
< 0) {
3666 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret
);
3670 dev_priv
->sagv_status
= I915_SAGV_DISABLED
;
3674 bool intel_can_enable_sagv(struct drm_atomic_state
*state
)
3676 struct drm_device
*dev
= state
->dev
;
3677 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3678 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3679 struct intel_crtc
*crtc
;
3680 struct intel_plane
*plane
;
3681 struct intel_crtc_state
*cstate
;
3684 int sagv_block_time_us
= IS_GEN9(dev_priv
) ? 30 : 20;
3686 if (!intel_has_sagv(dev_priv
))
3690 * SKL+ workaround: bspec recommends we disable the SAGV when we have
3691 * more then one pipe enabled
3693 * If there are no active CRTCs, no additional checks need be performed
3695 if (hweight32(intel_state
->active_crtcs
) == 0)
3697 else if (hweight32(intel_state
->active_crtcs
) > 1)
3700 /* Since we're now guaranteed to only have one active CRTC... */
3701 pipe
= ffs(intel_state
->active_crtcs
) - 1;
3702 crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
3703 cstate
= to_intel_crtc_state(crtc
->base
.state
);
3705 if (crtc
->base
.state
->adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
3708 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
3709 struct skl_plane_wm
*wm
=
3710 &cstate
->wm
.skl
.optimal
.planes
[plane
->id
];
3712 /* Skip this plane if it's not enabled */
3713 if (!wm
->wm
[0].plane_en
)
3716 /* Find the highest enabled wm level for this plane */
3717 for (level
= ilk_wm_max_level(dev_priv
);
3718 !wm
->wm
[level
].plane_en
; --level
)
3721 latency
= dev_priv
->wm
.skl_latency
[level
];
3723 if (skl_needs_memory_bw_wa(intel_state
) &&
3724 plane
->base
.state
->fb
->modifier
==
3725 I915_FORMAT_MOD_X_TILED
)
3729 * If any of the planes on this pipe don't enable wm levels that
3730 * incur memory latencies higher than sagv_block_time_us we
3731 * can't enable the SAGV.
3733 if (latency
< sagv_block_time_us
)
3741 skl_ddb_get_pipe_allocation_limits(struct drm_device
*dev
,
3742 const struct intel_crtc_state
*cstate
,
3743 struct skl_ddb_entry
*alloc
, /* out */
3744 int *num_active
/* out */)
3746 struct drm_atomic_state
*state
= cstate
->base
.state
;
3747 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3748 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3749 struct drm_crtc
*for_crtc
= cstate
->base
.crtc
;
3750 unsigned int pipe_size
, ddb_size
;
3751 int nth_active_pipe
;
3753 if (WARN_ON(!state
) || !cstate
->base
.active
) {
3756 *num_active
= hweight32(dev_priv
->active_crtcs
);
3760 if (intel_state
->active_pipe_changes
)
3761 *num_active
= hweight32(intel_state
->active_crtcs
);
3763 *num_active
= hweight32(dev_priv
->active_crtcs
);
3765 ddb_size
= INTEL_INFO(dev_priv
)->ddb_size
;
3766 WARN_ON(ddb_size
== 0);
3768 ddb_size
-= 4; /* 4 blocks for bypass path allocation */
3771 * If the state doesn't change the active CRTC's, then there's
3772 * no need to recalculate; the existing pipe allocation limits
3773 * should remain unchanged. Note that we're safe from racing
3774 * commits since any racing commit that changes the active CRTC
3775 * list would need to grab _all_ crtc locks, including the one
3776 * we currently hold.
3778 if (!intel_state
->active_pipe_changes
) {
3780 * alloc may be cleared by clear_intel_crtc_state,
3781 * copy from old state to be sure
3783 *alloc
= to_intel_crtc_state(for_crtc
->state
)->wm
.skl
.ddb
;
3787 nth_active_pipe
= hweight32(intel_state
->active_crtcs
&
3788 (drm_crtc_mask(for_crtc
) - 1));
3789 pipe_size
= ddb_size
/ hweight32(intel_state
->active_crtcs
);
3790 alloc
->start
= nth_active_pipe
* ddb_size
/ *num_active
;
3791 alloc
->end
= alloc
->start
+ pipe_size
;
3794 static unsigned int skl_cursor_allocation(int num_active
)
3796 if (num_active
== 1)
3802 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry
*entry
, u32 reg
)
3804 entry
->start
= reg
& 0x3ff;
3805 entry
->end
= (reg
>> 16) & 0x3ff;
3810 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
,
3811 struct skl_ddb_allocation
*ddb
/* out */)
3813 struct intel_crtc
*crtc
;
3815 memset(ddb
, 0, sizeof(*ddb
));
3817 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
3818 enum intel_display_power_domain power_domain
;
3819 enum plane_id plane_id
;
3820 enum pipe pipe
= crtc
->pipe
;
3822 power_domain
= POWER_DOMAIN_PIPE(pipe
);
3823 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
))
3826 for_each_plane_id_on_crtc(crtc
, plane_id
) {
3829 if (plane_id
!= PLANE_CURSOR
)
3830 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane_id
));
3832 val
= I915_READ(CUR_BUF_CFG(pipe
));
3834 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][plane_id
], val
);
3837 intel_display_power_put(dev_priv
, power_domain
);
3842 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3843 * The bspec defines downscale amount as:
3846 * Horizontal down scale amount = maximum[1, Horizontal source size /
3847 * Horizontal destination size]
3848 * Vertical down scale amount = maximum[1, Vertical source size /
3849 * Vertical destination size]
3850 * Total down scale amount = Horizontal down scale amount *
3851 * Vertical down scale amount
3854 * Return value is provided in 16.16 fixed point form to retain fractional part.
3855 * Caller should take care of dividing & rounding off the value.
3857 static uint_fixed_16_16_t
3858 skl_plane_downscale_amount(const struct intel_crtc_state
*cstate
,
3859 const struct intel_plane_state
*pstate
)
3861 struct intel_plane
*plane
= to_intel_plane(pstate
->base
.plane
);
3862 uint32_t src_w
, src_h
, dst_w
, dst_h
;
3863 uint_fixed_16_16_t fp_w_ratio
, fp_h_ratio
;
3864 uint_fixed_16_16_t downscale_h
, downscale_w
;
3866 if (WARN_ON(!intel_wm_plane_visible(cstate
, pstate
)))
3867 return u32_to_fixed16(0);
3869 /* n.b., src is 16.16 fixed point, dst is whole integer */
3870 if (plane
->id
== PLANE_CURSOR
) {
3872 * Cursors only support 0/180 degree rotation,
3873 * hence no need to account for rotation here.
3875 src_w
= pstate
->base
.src_w
>> 16;
3876 src_h
= pstate
->base
.src_h
>> 16;
3877 dst_w
= pstate
->base
.crtc_w
;
3878 dst_h
= pstate
->base
.crtc_h
;
3881 * Src coordinates are already rotated by 270 degrees for
3882 * the 90/270 degree plane rotation cases (to match the
3883 * GTT mapping), hence no need to account for rotation here.
3885 src_w
= drm_rect_width(&pstate
->base
.src
) >> 16;
3886 src_h
= drm_rect_height(&pstate
->base
.src
) >> 16;
3887 dst_w
= drm_rect_width(&pstate
->base
.dst
);
3888 dst_h
= drm_rect_height(&pstate
->base
.dst
);
3891 fp_w_ratio
= div_fixed16(src_w
, dst_w
);
3892 fp_h_ratio
= div_fixed16(src_h
, dst_h
);
3893 downscale_w
= max_fixed16(fp_w_ratio
, u32_to_fixed16(1));
3894 downscale_h
= max_fixed16(fp_h_ratio
, u32_to_fixed16(1));
3896 return mul_fixed16(downscale_w
, downscale_h
);
3899 static uint_fixed_16_16_t
3900 skl_pipe_downscale_amount(const struct intel_crtc_state
*crtc_state
)
3902 uint_fixed_16_16_t pipe_downscale
= u32_to_fixed16(1);
3904 if (!crtc_state
->base
.enable
)
3905 return pipe_downscale
;
3907 if (crtc_state
->pch_pfit
.enabled
) {
3908 uint32_t src_w
, src_h
, dst_w
, dst_h
;
3909 uint32_t pfit_size
= crtc_state
->pch_pfit
.size
;
3910 uint_fixed_16_16_t fp_w_ratio
, fp_h_ratio
;
3911 uint_fixed_16_16_t downscale_h
, downscale_w
;
3913 src_w
= crtc_state
->pipe_src_w
;
3914 src_h
= crtc_state
->pipe_src_h
;
3915 dst_w
= pfit_size
>> 16;
3916 dst_h
= pfit_size
& 0xffff;
3918 if (!dst_w
|| !dst_h
)
3919 return pipe_downscale
;
3921 fp_w_ratio
= div_fixed16(src_w
, dst_w
);
3922 fp_h_ratio
= div_fixed16(src_h
, dst_h
);
3923 downscale_w
= max_fixed16(fp_w_ratio
, u32_to_fixed16(1));
3924 downscale_h
= max_fixed16(fp_h_ratio
, u32_to_fixed16(1));
3926 pipe_downscale
= mul_fixed16(downscale_w
, downscale_h
);
3929 return pipe_downscale
;
3932 int skl_check_pipe_max_pixel_rate(struct intel_crtc
*intel_crtc
,
3933 struct intel_crtc_state
*cstate
)
3935 struct drm_crtc_state
*crtc_state
= &cstate
->base
;
3936 struct drm_atomic_state
*state
= crtc_state
->state
;
3937 struct drm_plane
*plane
;
3938 const struct drm_plane_state
*pstate
;
3939 struct intel_plane_state
*intel_pstate
;
3940 int crtc_clock
, dotclk
;
3941 uint32_t pipe_max_pixel_rate
;
3942 uint_fixed_16_16_t pipe_downscale
;
3943 uint_fixed_16_16_t max_downscale
= u32_to_fixed16(1);
3945 if (!cstate
->base
.enable
)
3948 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, crtc_state
) {
3949 uint_fixed_16_16_t plane_downscale
;
3950 uint_fixed_16_16_t fp_9_div_8
= div_fixed16(9, 8);
3953 if (!intel_wm_plane_visible(cstate
,
3954 to_intel_plane_state(pstate
)))
3957 if (WARN_ON(!pstate
->fb
))
3960 intel_pstate
= to_intel_plane_state(pstate
);
3961 plane_downscale
= skl_plane_downscale_amount(cstate
,
3963 bpp
= pstate
->fb
->format
->cpp
[0] * 8;
3965 plane_downscale
= mul_fixed16(plane_downscale
,
3968 max_downscale
= max_fixed16(plane_downscale
, max_downscale
);
3970 pipe_downscale
= skl_pipe_downscale_amount(cstate
);
3972 pipe_downscale
= mul_fixed16(pipe_downscale
, max_downscale
);
3974 crtc_clock
= crtc_state
->adjusted_mode
.crtc_clock
;
3975 dotclk
= to_intel_atomic_state(state
)->cdclk
.logical
.cdclk
;
3977 if (IS_GEMINILAKE(to_i915(intel_crtc
->base
.dev
)))
3980 pipe_max_pixel_rate
= div_round_up_u32_fixed16(dotclk
, pipe_downscale
);
3982 if (pipe_max_pixel_rate
< crtc_clock
) {
3983 DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
3991 skl_plane_relative_data_rate(const struct intel_crtc_state
*cstate
,
3992 const struct drm_plane_state
*pstate
,
3995 struct intel_plane
*plane
= to_intel_plane(pstate
->plane
);
3996 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
3998 uint32_t width
= 0, height
= 0;
3999 struct drm_framebuffer
*fb
;
4001 uint_fixed_16_16_t down_scale_amount
;
4003 if (!intel_pstate
->base
.visible
)
4007 format
= fb
->format
->format
;
4009 if (plane
->id
== PLANE_CURSOR
)
4011 if (y
&& format
!= DRM_FORMAT_NV12
)
4015 * Src coordinates are already rotated by 270 degrees for
4016 * the 90/270 degree plane rotation cases (to match the
4017 * GTT mapping), hence no need to account for rotation here.
4019 width
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
4020 height
= drm_rect_height(&intel_pstate
->base
.src
) >> 16;
4022 /* for planar format */
4023 if (format
== DRM_FORMAT_NV12
) {
4024 if (y
) /* y-plane data rate */
4025 data_rate
= width
* height
*
4027 else /* uv-plane data rate */
4028 data_rate
= (width
/ 2) * (height
/ 2) *
4031 /* for packed formats */
4032 data_rate
= width
* height
* fb
->format
->cpp
[0];
4035 down_scale_amount
= skl_plane_downscale_amount(cstate
, intel_pstate
);
4037 return mul_round_up_u32_fixed16(data_rate
, down_scale_amount
);
4041 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
4042 * a 8192x4096@32bpp framebuffer:
4043 * 3 * 4096 * 8192 * 4 < 2^32
4046 skl_get_total_relative_data_rate(struct intel_crtc_state
*intel_cstate
,
4047 unsigned *plane_data_rate
,
4048 unsigned *plane_y_data_rate
)
4050 struct drm_crtc_state
*cstate
= &intel_cstate
->base
;
4051 struct drm_atomic_state
*state
= cstate
->state
;
4052 struct drm_plane
*plane
;
4053 const struct drm_plane_state
*pstate
;
4054 unsigned int total_data_rate
= 0;
4056 if (WARN_ON(!state
))
4059 /* Calculate and cache data rate for each plane */
4060 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, cstate
) {
4061 enum plane_id plane_id
= to_intel_plane(plane
)->id
;
4065 rate
= skl_plane_relative_data_rate(intel_cstate
,
4067 plane_data_rate
[plane_id
] = rate
;
4069 total_data_rate
+= rate
;
4072 rate
= skl_plane_relative_data_rate(intel_cstate
,
4074 plane_y_data_rate
[plane_id
] = rate
;
4076 total_data_rate
+= rate
;
4079 return total_data_rate
;
4083 skl_ddb_min_alloc(const struct drm_plane_state
*pstate
,
4086 struct drm_framebuffer
*fb
= pstate
->fb
;
4087 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
4088 uint32_t src_w
, src_h
;
4089 uint32_t min_scanlines
= 8;
4095 /* For packed formats, no y-plane, return 0 */
4096 if (y
&& fb
->format
->format
!= DRM_FORMAT_NV12
)
4099 /* For Non Y-tile return 8-blocks */
4100 if (fb
->modifier
!= I915_FORMAT_MOD_Y_TILED
&&
4101 fb
->modifier
!= I915_FORMAT_MOD_Yf_TILED
&&
4102 fb
->modifier
!= I915_FORMAT_MOD_Y_TILED_CCS
&&
4103 fb
->modifier
!= I915_FORMAT_MOD_Yf_TILED_CCS
)
4107 * Src coordinates are already rotated by 270 degrees for
4108 * the 90/270 degree plane rotation cases (to match the
4109 * GTT mapping), hence no need to account for rotation here.
4111 src_w
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
4112 src_h
= drm_rect_height(&intel_pstate
->base
.src
) >> 16;
4114 /* Halve UV plane width and height for NV12 */
4115 if (fb
->format
->format
== DRM_FORMAT_NV12
&& !y
) {
4120 if (fb
->format
->format
== DRM_FORMAT_NV12
&& !y
)
4121 plane_bpp
= fb
->format
->cpp
[1];
4123 plane_bpp
= fb
->format
->cpp
[0];
4125 if (drm_rotation_90_or_270(pstate
->rotation
)) {
4126 switch (plane_bpp
) {
4140 WARN(1, "Unsupported pixel depth %u for rotation",
4146 return DIV_ROUND_UP((4 * src_w
* plane_bpp
), 512) * min_scanlines
/4 + 3;
4150 skl_ddb_calc_min(const struct intel_crtc_state
*cstate
, int num_active
,
4151 uint16_t *minimum
, uint16_t *y_minimum
)
4153 const struct drm_plane_state
*pstate
;
4154 struct drm_plane
*plane
;
4156 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, &cstate
->base
) {
4157 enum plane_id plane_id
= to_intel_plane(plane
)->id
;
4159 if (plane_id
== PLANE_CURSOR
)
4162 if (!pstate
->visible
)
4165 minimum
[plane_id
] = skl_ddb_min_alloc(pstate
, 0);
4166 y_minimum
[plane_id
] = skl_ddb_min_alloc(pstate
, 1);
4169 minimum
[PLANE_CURSOR
] = skl_cursor_allocation(num_active
);
4173 skl_allocate_pipe_ddb(struct intel_crtc_state
*cstate
,
4174 struct skl_ddb_allocation
*ddb
/* out */)
4176 struct drm_atomic_state
*state
= cstate
->base
.state
;
4177 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
4178 struct drm_device
*dev
= crtc
->dev
;
4179 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4180 enum pipe pipe
= intel_crtc
->pipe
;
4181 struct skl_ddb_entry
*alloc
= &cstate
->wm
.skl
.ddb
;
4182 uint16_t alloc_size
, start
;
4183 uint16_t minimum
[I915_MAX_PLANES
] = {};
4184 uint16_t y_minimum
[I915_MAX_PLANES
] = {};
4185 unsigned int total_data_rate
;
4186 enum plane_id plane_id
;
4188 unsigned plane_data_rate
[I915_MAX_PLANES
] = {};
4189 unsigned plane_y_data_rate
[I915_MAX_PLANES
] = {};
4190 uint16_t total_min_blocks
= 0;
4192 /* Clear the partitioning for disabled planes. */
4193 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
4194 memset(ddb
->y_plane
[pipe
], 0, sizeof(ddb
->y_plane
[pipe
]));
4196 if (WARN_ON(!state
))
4199 if (!cstate
->base
.active
) {
4200 alloc
->start
= alloc
->end
= 0;
4204 skl_ddb_get_pipe_allocation_limits(dev
, cstate
, alloc
, &num_active
);
4205 alloc_size
= skl_ddb_entry_size(alloc
);
4206 if (alloc_size
== 0)
4209 skl_ddb_calc_min(cstate
, num_active
, minimum
, y_minimum
);
4212 * 1. Allocate the mininum required blocks for each active plane
4213 * and allocate the cursor, it doesn't require extra allocation
4214 * proportional to the data rate.
4217 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
4218 total_min_blocks
+= minimum
[plane_id
];
4219 total_min_blocks
+= y_minimum
[plane_id
];
4222 if (total_min_blocks
> alloc_size
) {
4223 DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
4224 DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks
,
4229 alloc_size
-= total_min_blocks
;
4230 ddb
->plane
[pipe
][PLANE_CURSOR
].start
= alloc
->end
- minimum
[PLANE_CURSOR
];
4231 ddb
->plane
[pipe
][PLANE_CURSOR
].end
= alloc
->end
;
4234 * 2. Distribute the remaining space in proportion to the amount of
4235 * data each plane needs to fetch from memory.
4237 * FIXME: we may not allocate every single block here.
4239 total_data_rate
= skl_get_total_relative_data_rate(cstate
,
4242 if (total_data_rate
== 0)
4245 start
= alloc
->start
;
4246 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
4247 unsigned int data_rate
, y_data_rate
;
4248 uint16_t plane_blocks
, y_plane_blocks
= 0;
4250 if (plane_id
== PLANE_CURSOR
)
4253 data_rate
= plane_data_rate
[plane_id
];
4256 * allocation for (packed formats) or (uv-plane part of planar format):
4257 * promote the expression to 64 bits to avoid overflowing, the
4258 * result is < available as data_rate / total_data_rate < 1
4260 plane_blocks
= minimum
[plane_id
];
4261 plane_blocks
+= div_u64((uint64_t)alloc_size
* data_rate
,
4264 /* Leave disabled planes at (0,0) */
4266 ddb
->plane
[pipe
][plane_id
].start
= start
;
4267 ddb
->plane
[pipe
][plane_id
].end
= start
+ plane_blocks
;
4270 start
+= plane_blocks
;
4273 * allocation for y_plane part of planar format:
4275 y_data_rate
= plane_y_data_rate
[plane_id
];
4277 y_plane_blocks
= y_minimum
[plane_id
];
4278 y_plane_blocks
+= div_u64((uint64_t)alloc_size
* y_data_rate
,
4282 ddb
->y_plane
[pipe
][plane_id
].start
= start
;
4283 ddb
->y_plane
[pipe
][plane_id
].end
= start
+ y_plane_blocks
;
4286 start
+= y_plane_blocks
;
4293 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4294 * for the read latency) and cpp should always be <= 8, so that
4295 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4296 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4298 static uint_fixed_16_16_t
4299 skl_wm_method1(const struct drm_i915_private
*dev_priv
, uint32_t pixel_rate
,
4300 uint8_t cpp
, uint32_t latency
)
4302 uint32_t wm_intermediate_val
;
4303 uint_fixed_16_16_t ret
;
4306 return FP_16_16_MAX
;
4308 wm_intermediate_val
= latency
* pixel_rate
* cpp
;
4309 ret
= div_fixed16(wm_intermediate_val
, 1000 * 512);
4311 if (INTEL_GEN(dev_priv
) >= 10)
4312 ret
= add_fixed16_u32(ret
, 1);
4317 static uint_fixed_16_16_t
skl_wm_method2(uint32_t pixel_rate
,
4318 uint32_t pipe_htotal
,
4320 uint_fixed_16_16_t plane_blocks_per_line
)
4322 uint32_t wm_intermediate_val
;
4323 uint_fixed_16_16_t ret
;
4326 return FP_16_16_MAX
;
4328 wm_intermediate_val
= latency
* pixel_rate
;
4329 wm_intermediate_val
= DIV_ROUND_UP(wm_intermediate_val
,
4330 pipe_htotal
* 1000);
4331 ret
= mul_u32_fixed16(wm_intermediate_val
, plane_blocks_per_line
);
4335 static uint_fixed_16_16_t
4336 intel_get_linetime_us(struct intel_crtc_state
*cstate
)
4338 uint32_t pixel_rate
;
4339 uint32_t crtc_htotal
;
4340 uint_fixed_16_16_t linetime_us
;
4342 if (!cstate
->base
.active
)
4343 return u32_to_fixed16(0);
4345 pixel_rate
= cstate
->pixel_rate
;
4347 if (WARN_ON(pixel_rate
== 0))
4348 return u32_to_fixed16(0);
4350 crtc_htotal
= cstate
->base
.adjusted_mode
.crtc_htotal
;
4351 linetime_us
= div_fixed16(crtc_htotal
* 1000, pixel_rate
);
4357 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state
*cstate
,
4358 const struct intel_plane_state
*pstate
)
4360 uint64_t adjusted_pixel_rate
;
4361 uint_fixed_16_16_t downscale_amount
;
4363 /* Shouldn't reach here on disabled planes... */
4364 if (WARN_ON(!intel_wm_plane_visible(cstate
, pstate
)))
4368 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4369 * with additional adjustments for plane-specific scaling.
4371 adjusted_pixel_rate
= cstate
->pixel_rate
;
4372 downscale_amount
= skl_plane_downscale_amount(cstate
, pstate
);
4374 return mul_round_up_u32_fixed16(adjusted_pixel_rate
,
4379 skl_compute_plane_wm_params(const struct drm_i915_private
*dev_priv
,
4380 struct intel_crtc_state
*cstate
,
4381 const struct intel_plane_state
*intel_pstate
,
4382 struct skl_wm_params
*wp
)
4384 struct intel_plane
*plane
= to_intel_plane(intel_pstate
->base
.plane
);
4385 const struct drm_plane_state
*pstate
= &intel_pstate
->base
;
4386 const struct drm_framebuffer
*fb
= pstate
->fb
;
4387 uint32_t interm_pbpl
;
4388 struct intel_atomic_state
*state
=
4389 to_intel_atomic_state(cstate
->base
.state
);
4390 bool apply_memory_bw_wa
= skl_needs_memory_bw_wa(state
);
4392 if (!intel_wm_plane_visible(cstate
, intel_pstate
))
4395 wp
->y_tiled
= fb
->modifier
== I915_FORMAT_MOD_Y_TILED
||
4396 fb
->modifier
== I915_FORMAT_MOD_Yf_TILED
||
4397 fb
->modifier
== I915_FORMAT_MOD_Y_TILED_CCS
||
4398 fb
->modifier
== I915_FORMAT_MOD_Yf_TILED_CCS
;
4399 wp
->x_tiled
= fb
->modifier
== I915_FORMAT_MOD_X_TILED
;
4400 wp
->rc_surface
= fb
->modifier
== I915_FORMAT_MOD_Y_TILED_CCS
||
4401 fb
->modifier
== I915_FORMAT_MOD_Yf_TILED_CCS
;
4403 if (plane
->id
== PLANE_CURSOR
) {
4404 wp
->width
= intel_pstate
->base
.crtc_w
;
4407 * Src coordinates are already rotated by 270 degrees for
4408 * the 90/270 degree plane rotation cases (to match the
4409 * GTT mapping), hence no need to account for rotation here.
4411 wp
->width
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
4414 wp
->cpp
= (fb
->format
->format
== DRM_FORMAT_NV12
) ? fb
->format
->cpp
[1] :
4416 wp
->plane_pixel_rate
= skl_adjusted_plane_pixel_rate(cstate
,
4419 if (drm_rotation_90_or_270(pstate
->rotation
)) {
4423 wp
->y_min_scanlines
= 16;
4426 wp
->y_min_scanlines
= 8;
4429 wp
->y_min_scanlines
= 4;
4432 MISSING_CASE(wp
->cpp
);
4436 wp
->y_min_scanlines
= 4;
4439 if (apply_memory_bw_wa
)
4440 wp
->y_min_scanlines
*= 2;
4442 wp
->plane_bytes_per_line
= wp
->width
* wp
->cpp
;
4444 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
*
4445 wp
->y_min_scanlines
, 512);
4447 if (INTEL_GEN(dev_priv
) >= 10)
4450 wp
->plane_blocks_per_line
= div_fixed16(interm_pbpl
,
4451 wp
->y_min_scanlines
);
4452 } else if (wp
->x_tiled
&& IS_GEN9(dev_priv
)) {
4453 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
, 512);
4454 wp
->plane_blocks_per_line
= u32_to_fixed16(interm_pbpl
);
4456 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
, 512) + 1;
4457 wp
->plane_blocks_per_line
= u32_to_fixed16(interm_pbpl
);
4460 wp
->y_tile_minimum
= mul_u32_fixed16(wp
->y_min_scanlines
,
4461 wp
->plane_blocks_per_line
);
4462 wp
->linetime_us
= fixed16_to_u32_round_up(
4463 intel_get_linetime_us(cstate
));
4468 static int skl_compute_plane_wm(const struct drm_i915_private
*dev_priv
,
4469 struct intel_crtc_state
*cstate
,
4470 const struct intel_plane_state
*intel_pstate
,
4471 uint16_t ddb_allocation
,
4473 const struct skl_wm_params
*wp
,
4474 uint16_t *out_blocks
, /* out */
4475 uint8_t *out_lines
, /* out */
4476 bool *enabled
/* out */)
4478 const struct drm_plane_state
*pstate
= &intel_pstate
->base
;
4479 uint32_t latency
= dev_priv
->wm
.skl_latency
[level
];
4480 uint_fixed_16_16_t method1
, method2
;
4481 uint_fixed_16_16_t selected_result
;
4482 uint32_t res_blocks
, res_lines
;
4483 struct intel_atomic_state
*state
=
4484 to_intel_atomic_state(cstate
->base
.state
);
4485 bool apply_memory_bw_wa
= skl_needs_memory_bw_wa(state
);
4488 !intel_wm_plane_visible(cstate
, intel_pstate
)) {
4493 /* Display WA #1141: kbl,cfl */
4494 if ((IS_KABYLAKE(dev_priv
) || IS_COFFEELAKE(dev_priv
) ||
4495 IS_CNL_REVID(dev_priv
, CNL_REVID_A0
, CNL_REVID_B0
)) &&
4496 dev_priv
->ipc_enabled
)
4499 if (apply_memory_bw_wa
&& wp
->x_tiled
)
4502 method1
= skl_wm_method1(dev_priv
, wp
->plane_pixel_rate
,
4504 method2
= skl_wm_method2(wp
->plane_pixel_rate
,
4505 cstate
->base
.adjusted_mode
.crtc_htotal
,
4507 wp
->plane_blocks_per_line
);
4510 selected_result
= max_fixed16(method2
, wp
->y_tile_minimum
);
4512 if ((wp
->cpp
* cstate
->base
.adjusted_mode
.crtc_htotal
/
4513 512 < 1) && (wp
->plane_bytes_per_line
/ 512 < 1))
4514 selected_result
= method2
;
4515 else if (ddb_allocation
>=
4516 fixed16_to_u32_round_up(wp
->plane_blocks_per_line
))
4517 selected_result
= min_fixed16(method1
, method2
);
4518 else if (latency
>= wp
->linetime_us
)
4519 selected_result
= min_fixed16(method1
, method2
);
4521 selected_result
= method1
;
4524 res_blocks
= fixed16_to_u32_round_up(selected_result
) + 1;
4525 res_lines
= div_round_up_fixed16(selected_result
,
4526 wp
->plane_blocks_per_line
);
4528 /* Display WA #1125: skl,bxt,kbl,glk */
4529 if (level
== 0 && wp
->rc_surface
)
4530 res_blocks
+= fixed16_to_u32_round_up(wp
->y_tile_minimum
);
4532 /* Display WA #1126: skl,bxt,kbl,glk */
4533 if (level
>= 1 && level
<= 7) {
4535 res_blocks
+= fixed16_to_u32_round_up(
4536 wp
->y_tile_minimum
);
4537 res_lines
+= wp
->y_min_scanlines
;
4543 if (res_blocks
>= ddb_allocation
|| res_lines
> 31) {
4547 * If there are no valid level 0 watermarks, then we can't
4548 * support this display configuration.
4553 struct drm_plane
*plane
= pstate
->plane
;
4555 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
4556 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
4557 plane
->base
.id
, plane
->name
,
4558 res_blocks
, ddb_allocation
, res_lines
);
4563 *out_blocks
= res_blocks
;
4564 *out_lines
= res_lines
;
4571 skl_compute_wm_levels(const struct drm_i915_private
*dev_priv
,
4572 struct skl_ddb_allocation
*ddb
,
4573 struct intel_crtc_state
*cstate
,
4574 const struct intel_plane_state
*intel_pstate
,
4575 const struct skl_wm_params
*wm_params
,
4576 struct skl_plane_wm
*wm
)
4578 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4579 struct drm_plane
*plane
= intel_pstate
->base
.plane
;
4580 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
4581 uint16_t ddb_blocks
;
4582 enum pipe pipe
= intel_crtc
->pipe
;
4583 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4586 if (WARN_ON(!intel_pstate
->base
.fb
))
4589 ddb_blocks
= skl_ddb_entry_size(&ddb
->plane
[pipe
][intel_plane
->id
]);
4591 for (level
= 0; level
<= max_level
; level
++) {
4592 struct skl_wm_level
*result
= &wm
->wm
[level
];
4594 ret
= skl_compute_plane_wm(dev_priv
,
4600 &result
->plane_res_b
,
4601 &result
->plane_res_l
,
4611 skl_compute_linetime_wm(struct intel_crtc_state
*cstate
)
4613 struct drm_atomic_state
*state
= cstate
->base
.state
;
4614 struct drm_i915_private
*dev_priv
= to_i915(state
->dev
);
4615 uint_fixed_16_16_t linetime_us
;
4616 uint32_t linetime_wm
;
4618 linetime_us
= intel_get_linetime_us(cstate
);
4620 if (is_fixed16_zero(linetime_us
))
4623 linetime_wm
= fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us
));
4625 /* Display WA #1135: bxt:ALL GLK:ALL */
4626 if ((IS_BROXTON(dev_priv
) || IS_GEMINILAKE(dev_priv
)) &&
4627 dev_priv
->ipc_enabled
)
4633 static void skl_compute_transition_wm(struct intel_crtc_state
*cstate
,
4634 struct skl_wm_params
*wp
,
4635 struct skl_wm_level
*wm_l0
,
4636 uint16_t ddb_allocation
,
4637 struct skl_wm_level
*trans_wm
/* out */)
4639 struct drm_device
*dev
= cstate
->base
.crtc
->dev
;
4640 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
4641 uint16_t trans_min
, trans_y_tile_min
;
4642 const uint16_t trans_amount
= 10; /* This is configurable amount */
4643 uint16_t trans_offset_b
, res_blocks
;
4645 if (!cstate
->base
.active
)
4648 /* Transition WM are not recommended by HW team for GEN9 */
4649 if (INTEL_GEN(dev_priv
) <= 9)
4652 /* Transition WM don't make any sense if ipc is disabled */
4653 if (!dev_priv
->ipc_enabled
)
4656 if (INTEL_GEN(dev_priv
) >= 10)
4659 trans_offset_b
= trans_min
+ trans_amount
;
4662 trans_y_tile_min
= (uint16_t) mul_round_up_u32_fixed16(2,
4663 wp
->y_tile_minimum
);
4664 res_blocks
= max(wm_l0
->plane_res_b
, trans_y_tile_min
) +
4667 res_blocks
= wm_l0
->plane_res_b
+ trans_offset_b
;
4669 /* WA BUG:1938466 add one block for non y-tile planes */
4670 if (IS_CNL_REVID(dev_priv
, CNL_REVID_A0
, CNL_REVID_A0
))
4677 if (res_blocks
< ddb_allocation
) {
4678 trans_wm
->plane_res_b
= res_blocks
;
4679 trans_wm
->plane_en
= true;
4684 trans_wm
->plane_en
= false;
4687 static int skl_build_pipe_wm(struct intel_crtc_state
*cstate
,
4688 struct skl_ddb_allocation
*ddb
,
4689 struct skl_pipe_wm
*pipe_wm
)
4691 struct drm_device
*dev
= cstate
->base
.crtc
->dev
;
4692 struct drm_crtc_state
*crtc_state
= &cstate
->base
;
4693 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
4694 struct drm_plane
*plane
;
4695 const struct drm_plane_state
*pstate
;
4696 struct skl_plane_wm
*wm
;
4700 * We'll only calculate watermarks for planes that are actually
4701 * enabled, so make sure all other planes are set as disabled.
4703 memset(pipe_wm
->planes
, 0, sizeof(pipe_wm
->planes
));
4705 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, crtc_state
) {
4706 const struct intel_plane_state
*intel_pstate
=
4707 to_intel_plane_state(pstate
);
4708 enum plane_id plane_id
= to_intel_plane(plane
)->id
;
4709 struct skl_wm_params wm_params
;
4710 enum pipe pipe
= to_intel_crtc(cstate
->base
.crtc
)->pipe
;
4711 uint16_t ddb_blocks
;
4713 wm
= &pipe_wm
->planes
[plane_id
];
4714 ddb_blocks
= skl_ddb_entry_size(&ddb
->plane
[pipe
][plane_id
]);
4715 memset(&wm_params
, 0, sizeof(struct skl_wm_params
));
4717 ret
= skl_compute_plane_wm_params(dev_priv
, cstate
,
4718 intel_pstate
, &wm_params
);
4722 ret
= skl_compute_wm_levels(dev_priv
, ddb
, cstate
,
4723 intel_pstate
, &wm_params
, wm
);
4726 skl_compute_transition_wm(cstate
, &wm_params
, &wm
->wm
[0],
4727 ddb_blocks
, &wm
->trans_wm
);
4729 pipe_wm
->linetime
= skl_compute_linetime_wm(cstate
);
4734 static void skl_ddb_entry_write(struct drm_i915_private
*dev_priv
,
4736 const struct skl_ddb_entry
*entry
)
4739 I915_WRITE(reg
, (entry
->end
- 1) << 16 | entry
->start
);
4744 static void skl_write_wm_level(struct drm_i915_private
*dev_priv
,
4746 const struct skl_wm_level
*level
)
4750 if (level
->plane_en
) {
4752 val
|= level
->plane_res_b
;
4753 val
|= level
->plane_res_l
<< PLANE_WM_LINES_SHIFT
;
4756 I915_WRITE(reg
, val
);
4759 static void skl_write_plane_wm(struct intel_crtc
*intel_crtc
,
4760 const struct skl_plane_wm
*wm
,
4761 const struct skl_ddb_allocation
*ddb
,
4762 enum plane_id plane_id
)
4764 struct drm_crtc
*crtc
= &intel_crtc
->base
;
4765 struct drm_device
*dev
= crtc
->dev
;
4766 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4767 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4768 enum pipe pipe
= intel_crtc
->pipe
;
4770 for (level
= 0; level
<= max_level
; level
++) {
4771 skl_write_wm_level(dev_priv
, PLANE_WM(pipe
, plane_id
, level
),
4774 skl_write_wm_level(dev_priv
, PLANE_WM_TRANS(pipe
, plane_id
),
4777 skl_ddb_entry_write(dev_priv
, PLANE_BUF_CFG(pipe
, plane_id
),
4778 &ddb
->plane
[pipe
][plane_id
]);
4779 skl_ddb_entry_write(dev_priv
, PLANE_NV12_BUF_CFG(pipe
, plane_id
),
4780 &ddb
->y_plane
[pipe
][plane_id
]);
4783 static void skl_write_cursor_wm(struct intel_crtc
*intel_crtc
,
4784 const struct skl_plane_wm
*wm
,
4785 const struct skl_ddb_allocation
*ddb
)
4787 struct drm_crtc
*crtc
= &intel_crtc
->base
;
4788 struct drm_device
*dev
= crtc
->dev
;
4789 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4790 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4791 enum pipe pipe
= intel_crtc
->pipe
;
4793 for (level
= 0; level
<= max_level
; level
++) {
4794 skl_write_wm_level(dev_priv
, CUR_WM(pipe
, level
),
4797 skl_write_wm_level(dev_priv
, CUR_WM_TRANS(pipe
), &wm
->trans_wm
);
4799 skl_ddb_entry_write(dev_priv
, CUR_BUF_CFG(pipe
),
4800 &ddb
->plane
[pipe
][PLANE_CURSOR
]);
4803 bool skl_wm_level_equals(const struct skl_wm_level
*l1
,
4804 const struct skl_wm_level
*l2
)
4806 if (l1
->plane_en
!= l2
->plane_en
)
4809 /* If both planes aren't enabled, the rest shouldn't matter */
4813 return (l1
->plane_res_l
== l2
->plane_res_l
&&
4814 l1
->plane_res_b
== l2
->plane_res_b
);
4817 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry
*a
,
4818 const struct skl_ddb_entry
*b
)
4820 return a
->start
< b
->end
&& b
->start
< a
->end
;
4823 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry
**entries
,
4824 const struct skl_ddb_entry
*ddb
,
4829 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
4830 if (i
!= ignore
&& entries
[i
] &&
4831 skl_ddb_entries_overlap(ddb
, entries
[i
]))
4837 static int skl_update_pipe_wm(struct drm_crtc_state
*cstate
,
4838 const struct skl_pipe_wm
*old_pipe_wm
,
4839 struct skl_pipe_wm
*pipe_wm
, /* out */
4840 struct skl_ddb_allocation
*ddb
, /* out */
4841 bool *changed
/* out */)
4843 struct intel_crtc_state
*intel_cstate
= to_intel_crtc_state(cstate
);
4846 ret
= skl_build_pipe_wm(intel_cstate
, ddb
, pipe_wm
);
4850 if (!memcmp(old_pipe_wm
, pipe_wm
, sizeof(*pipe_wm
)))
4859 pipes_modified(struct drm_atomic_state
*state
)
4861 struct drm_crtc
*crtc
;
4862 struct drm_crtc_state
*cstate
;
4863 uint32_t i
, ret
= 0;
4865 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
)
4866 ret
|= drm_crtc_mask(crtc
);
4872 skl_ddb_add_affected_planes(struct intel_crtc_state
*cstate
)
4874 struct drm_atomic_state
*state
= cstate
->base
.state
;
4875 struct drm_device
*dev
= state
->dev
;
4876 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
4877 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4878 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4879 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
4880 struct skl_ddb_allocation
*new_ddb
= &intel_state
->wm_results
.ddb
;
4881 struct skl_ddb_allocation
*cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
4882 struct drm_plane_state
*plane_state
;
4883 struct drm_plane
*plane
;
4884 enum pipe pipe
= intel_crtc
->pipe
;
4886 WARN_ON(!drm_atomic_get_existing_crtc_state(state
, crtc
));
4888 drm_for_each_plane_mask(plane
, dev
, cstate
->base
.plane_mask
) {
4889 enum plane_id plane_id
= to_intel_plane(plane
)->id
;
4891 if (skl_ddb_entry_equal(&cur_ddb
->plane
[pipe
][plane_id
],
4892 &new_ddb
->plane
[pipe
][plane_id
]) &&
4893 skl_ddb_entry_equal(&cur_ddb
->y_plane
[pipe
][plane_id
],
4894 &new_ddb
->y_plane
[pipe
][plane_id
]))
4897 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4898 if (IS_ERR(plane_state
))
4899 return PTR_ERR(plane_state
);
4906 skl_compute_ddb(struct drm_atomic_state
*state
)
4908 struct drm_device
*dev
= state
->dev
;
4909 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4910 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
4911 struct intel_crtc
*intel_crtc
;
4912 struct skl_ddb_allocation
*ddb
= &intel_state
->wm_results
.ddb
;
4913 uint32_t realloc_pipes
= pipes_modified(state
);
4917 * If this is our first atomic update following hardware readout,
4918 * we can't trust the DDB that the BIOS programmed for us. Let's
4919 * pretend that all pipes switched active status so that we'll
4920 * ensure a full DDB recompute.
4922 if (dev_priv
->wm
.distrust_bios_wm
) {
4923 ret
= drm_modeset_lock(&dev
->mode_config
.connection_mutex
,
4924 state
->acquire_ctx
);
4928 intel_state
->active_pipe_changes
= ~0;
4931 * We usually only initialize intel_state->active_crtcs if we
4932 * we're doing a modeset; make sure this field is always
4933 * initialized during the sanitization process that happens
4934 * on the first commit too.
4936 if (!intel_state
->modeset
)
4937 intel_state
->active_crtcs
= dev_priv
->active_crtcs
;
4941 * If the modeset changes which CRTC's are active, we need to
4942 * recompute the DDB allocation for *all* active pipes, even
4943 * those that weren't otherwise being modified in any way by this
4944 * atomic commit. Due to the shrinking of the per-pipe allocations
4945 * when new active CRTC's are added, it's possible for a pipe that
4946 * we were already using and aren't changing at all here to suddenly
4947 * become invalid if its DDB needs exceeds its new allocation.
4949 * Note that if we wind up doing a full DDB recompute, we can't let
4950 * any other display updates race with this transaction, so we need
4951 * to grab the lock on *all* CRTC's.
4953 if (intel_state
->active_pipe_changes
) {
4955 intel_state
->wm_results
.dirty_pipes
= ~0;
4959 * We're not recomputing for the pipes not included in the commit, so
4960 * make sure we start with the current state.
4962 memcpy(ddb
, &dev_priv
->wm
.skl_hw
.ddb
, sizeof(*ddb
));
4964 for_each_intel_crtc_mask(dev
, intel_crtc
, realloc_pipes
) {
4965 struct intel_crtc_state
*cstate
;
4967 cstate
= intel_atomic_get_crtc_state(state
, intel_crtc
);
4969 return PTR_ERR(cstate
);
4971 ret
= skl_allocate_pipe_ddb(cstate
, ddb
);
4975 ret
= skl_ddb_add_affected_planes(cstate
);
4984 skl_copy_wm_for_pipe(struct skl_wm_values
*dst
,
4985 struct skl_wm_values
*src
,
4988 memcpy(dst
->ddb
.y_plane
[pipe
], src
->ddb
.y_plane
[pipe
],
4989 sizeof(dst
->ddb
.y_plane
[pipe
]));
4990 memcpy(dst
->ddb
.plane
[pipe
], src
->ddb
.plane
[pipe
],
4991 sizeof(dst
->ddb
.plane
[pipe
]));
4995 skl_print_wm_changes(const struct drm_atomic_state
*state
)
4997 const struct drm_device
*dev
= state
->dev
;
4998 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
4999 const struct intel_atomic_state
*intel_state
=
5000 to_intel_atomic_state(state
);
5001 const struct drm_crtc
*crtc
;
5002 const struct drm_crtc_state
*cstate
;
5003 const struct intel_plane
*intel_plane
;
5004 const struct skl_ddb_allocation
*old_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
5005 const struct skl_ddb_allocation
*new_ddb
= &intel_state
->wm_results
.ddb
;
5008 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
) {
5009 const struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5010 enum pipe pipe
= intel_crtc
->pipe
;
5012 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
5013 enum plane_id plane_id
= intel_plane
->id
;
5014 const struct skl_ddb_entry
*old
, *new;
5016 old
= &old_ddb
->plane
[pipe
][plane_id
];
5017 new = &new_ddb
->plane
[pipe
][plane_id
];
5019 if (skl_ddb_entry_equal(old
, new))
5022 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
5023 intel_plane
->base
.base
.id
,
5024 intel_plane
->base
.name
,
5025 old
->start
, old
->end
,
5026 new->start
, new->end
);
5032 skl_compute_wm(struct drm_atomic_state
*state
)
5034 struct drm_crtc
*crtc
;
5035 struct drm_crtc_state
*cstate
;
5036 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
5037 struct skl_wm_values
*results
= &intel_state
->wm_results
;
5038 struct drm_device
*dev
= state
->dev
;
5039 struct skl_pipe_wm
*pipe_wm
;
5040 bool changed
= false;
5044 * When we distrust bios wm we always need to recompute to set the
5045 * expected DDB allocations for each CRTC.
5047 if (to_i915(dev
)->wm
.distrust_bios_wm
)
5051 * If this transaction isn't actually touching any CRTC's, don't
5052 * bother with watermark calculation. Note that if we pass this
5053 * test, we're guaranteed to hold at least one CRTC state mutex,
5054 * which means we can safely use values like dev_priv->active_crtcs
5055 * since any racing commits that want to update them would need to
5056 * hold _all_ CRTC state mutexes.
5058 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
)
5064 /* Clear all dirty flags */
5065 results
->dirty_pipes
= 0;
5067 ret
= skl_compute_ddb(state
);
5072 * Calculate WM's for all pipes that are part of this transaction.
5073 * Note that the DDB allocation above may have added more CRTC's that
5074 * weren't otherwise being modified (and set bits in dirty_pipes) if
5075 * pipe allocations had to change.
5077 * FIXME: Now that we're doing this in the atomic check phase, we
5078 * should allow skl_update_pipe_wm() to return failure in cases where
5079 * no suitable watermark values can be found.
5081 for_each_new_crtc_in_state(state
, crtc
, cstate
, i
) {
5082 struct intel_crtc_state
*intel_cstate
=
5083 to_intel_crtc_state(cstate
);
5084 const struct skl_pipe_wm
*old_pipe_wm
=
5085 &to_intel_crtc_state(crtc
->state
)->wm
.skl
.optimal
;
5087 pipe_wm
= &intel_cstate
->wm
.skl
.optimal
;
5088 ret
= skl_update_pipe_wm(cstate
, old_pipe_wm
, pipe_wm
,
5089 &results
->ddb
, &changed
);
5094 results
->dirty_pipes
|= drm_crtc_mask(crtc
);
5096 if ((results
->dirty_pipes
& drm_crtc_mask(crtc
)) == 0)
5097 /* This pipe's WM's did not change */
5100 intel_cstate
->update_wm_pre
= true;
5103 skl_print_wm_changes(state
);
5108 static void skl_atomic_update_crtc_wm(struct intel_atomic_state
*state
,
5109 struct intel_crtc_state
*cstate
)
5111 struct intel_crtc
*crtc
= to_intel_crtc(cstate
->base
.crtc
);
5112 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5113 struct skl_pipe_wm
*pipe_wm
= &cstate
->wm
.skl
.optimal
;
5114 const struct skl_ddb_allocation
*ddb
= &state
->wm_results
.ddb
;
5115 enum pipe pipe
= crtc
->pipe
;
5116 enum plane_id plane_id
;
5118 if (!(state
->wm_results
.dirty_pipes
& drm_crtc_mask(&crtc
->base
)))
5121 I915_WRITE(PIPE_WM_LINETIME(pipe
), pipe_wm
->linetime
);
5123 for_each_plane_id_on_crtc(crtc
, plane_id
) {
5124 if (plane_id
!= PLANE_CURSOR
)
5125 skl_write_plane_wm(crtc
, &pipe_wm
->planes
[plane_id
],
5128 skl_write_cursor_wm(crtc
, &pipe_wm
->planes
[plane_id
],
5133 static void skl_initial_wm(struct intel_atomic_state
*state
,
5134 struct intel_crtc_state
*cstate
)
5136 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
5137 struct drm_device
*dev
= intel_crtc
->base
.dev
;
5138 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5139 struct skl_wm_values
*results
= &state
->wm_results
;
5140 struct skl_wm_values
*hw_vals
= &dev_priv
->wm
.skl_hw
;
5141 enum pipe pipe
= intel_crtc
->pipe
;
5143 if ((results
->dirty_pipes
& drm_crtc_mask(&intel_crtc
->base
)) == 0)
5146 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5148 if (cstate
->base
.active_changed
)
5149 skl_atomic_update_crtc_wm(state
, cstate
);
5151 skl_copy_wm_for_pipe(hw_vals
, results
, pipe
);
5153 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5156 static void ilk_compute_wm_config(struct drm_device
*dev
,
5157 struct intel_wm_config
*config
)
5159 struct intel_crtc
*crtc
;
5161 /* Compute the currently _active_ config */
5162 for_each_intel_crtc(dev
, crtc
) {
5163 const struct intel_pipe_wm
*wm
= &crtc
->wm
.active
.ilk
;
5165 if (!wm
->pipe_enabled
)
5168 config
->sprites_enabled
|= wm
->sprites_enabled
;
5169 config
->sprites_scaled
|= wm
->sprites_scaled
;
5170 config
->num_pipes_active
++;
5174 static void ilk_program_watermarks(struct drm_i915_private
*dev_priv
)
5176 struct drm_device
*dev
= &dev_priv
->drm
;
5177 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
5178 struct ilk_wm_maximums max
;
5179 struct intel_wm_config config
= {};
5180 struct ilk_wm_values results
= {};
5181 enum intel_ddb_partitioning partitioning
;
5183 ilk_compute_wm_config(dev
, &config
);
5185 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
5186 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
5188 /* 5/6 split only in single pipe config on IVB+ */
5189 if (INTEL_GEN(dev_priv
) >= 7 &&
5190 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
5191 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
5192 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
5194 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
5196 best_lp_wm
= &lp_wm_1_2
;
5199 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
5200 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
5202 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
5204 ilk_write_wm_values(dev_priv
, &results
);
5207 static void ilk_initial_watermarks(struct intel_atomic_state
*state
,
5208 struct intel_crtc_state
*cstate
)
5210 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
5211 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
5213 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5214 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.ilk
.intermediate
;
5215 ilk_program_watermarks(dev_priv
);
5216 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5219 static void ilk_optimize_watermarks(struct intel_atomic_state
*state
,
5220 struct intel_crtc_state
*cstate
)
5222 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
5223 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
5225 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5226 if (cstate
->wm
.need_postvbl_update
) {
5227 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.ilk
.optimal
;
5228 ilk_program_watermarks(dev_priv
);
5230 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5233 static inline void skl_wm_level_from_reg_val(uint32_t val
,
5234 struct skl_wm_level
*level
)
5236 level
->plane_en
= val
& PLANE_WM_EN
;
5237 level
->plane_res_b
= val
& PLANE_WM_BLOCKS_MASK
;
5238 level
->plane_res_l
= (val
>> PLANE_WM_LINES_SHIFT
) &
5239 PLANE_WM_LINES_MASK
;
5242 void skl_pipe_wm_get_hw_state(struct drm_crtc
*crtc
,
5243 struct skl_pipe_wm
*out
)
5245 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
5246 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5247 enum pipe pipe
= intel_crtc
->pipe
;
5248 int level
, max_level
;
5249 enum plane_id plane_id
;
5252 max_level
= ilk_wm_max_level(dev_priv
);
5254 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
5255 struct skl_plane_wm
*wm
= &out
->planes
[plane_id
];
5257 for (level
= 0; level
<= max_level
; level
++) {
5258 if (plane_id
!= PLANE_CURSOR
)
5259 val
= I915_READ(PLANE_WM(pipe
, plane_id
, level
));
5261 val
= I915_READ(CUR_WM(pipe
, level
));
5263 skl_wm_level_from_reg_val(val
, &wm
->wm
[level
]);
5266 if (plane_id
!= PLANE_CURSOR
)
5267 val
= I915_READ(PLANE_WM_TRANS(pipe
, plane_id
));
5269 val
= I915_READ(CUR_WM_TRANS(pipe
));
5271 skl_wm_level_from_reg_val(val
, &wm
->trans_wm
);
5274 if (!intel_crtc
->active
)
5277 out
->linetime
= I915_READ(PIPE_WM_LINETIME(pipe
));
5280 void skl_wm_get_hw_state(struct drm_device
*dev
)
5282 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5283 struct skl_wm_values
*hw
= &dev_priv
->wm
.skl_hw
;
5284 struct skl_ddb_allocation
*ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
5285 struct drm_crtc
*crtc
;
5286 struct intel_crtc
*intel_crtc
;
5287 struct intel_crtc_state
*cstate
;
5289 skl_ddb_get_hw_state(dev_priv
, ddb
);
5290 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
5291 intel_crtc
= to_intel_crtc(crtc
);
5292 cstate
= to_intel_crtc_state(crtc
->state
);
5294 skl_pipe_wm_get_hw_state(crtc
, &cstate
->wm
.skl
.optimal
);
5296 if (intel_crtc
->active
)
5297 hw
->dirty_pipes
|= drm_crtc_mask(crtc
);
5300 if (dev_priv
->active_crtcs
) {
5301 /* Fully recompute DDB on first atomic commit */
5302 dev_priv
->wm
.distrust_bios_wm
= true;
5304 /* Easy/common case; just sanitize DDB now if everything off */
5305 memset(ddb
, 0, sizeof(*ddb
));
5309 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
5311 struct drm_device
*dev
= crtc
->dev
;
5312 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5313 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
5314 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5315 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
5316 struct intel_pipe_wm
*active
= &cstate
->wm
.ilk
.optimal
;
5317 enum pipe pipe
= intel_crtc
->pipe
;
5318 static const i915_reg_t wm0_pipe_reg
[] = {
5319 [PIPE_A
] = WM0_PIPEA_ILK
,
5320 [PIPE_B
] = WM0_PIPEB_ILK
,
5321 [PIPE_C
] = WM0_PIPEC_IVB
,
5324 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
5325 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
5326 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
5328 memset(active
, 0, sizeof(*active
));
5330 active
->pipe_enabled
= intel_crtc
->active
;
5332 if (active
->pipe_enabled
) {
5333 u32 tmp
= hw
->wm_pipe
[pipe
];
5336 * For active pipes LP0 watermark is marked as
5337 * enabled, and LP1+ watermaks as disabled since
5338 * we can't really reverse compute them in case
5339 * multiple pipes are active.
5341 active
->wm
[0].enable
= true;
5342 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
5343 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
5344 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
5345 active
->linetime
= hw
->wm_linetime
[pipe
];
5347 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5350 * For inactive pipes, all watermark levels
5351 * should be marked as enabled but zeroed,
5352 * which is what we'd compute them to.
5354 for (level
= 0; level
<= max_level
; level
++)
5355 active
->wm
[level
].enable
= true;
5358 intel_crtc
->wm
.active
.ilk
= *active
;
5361 #define _FW_WM(value, plane) \
5362 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5363 #define _FW_WM_VLV(value, plane) \
5364 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5366 static void g4x_read_wm_values(struct drm_i915_private
*dev_priv
,
5367 struct g4x_wm_values
*wm
)
5371 tmp
= I915_READ(DSPFW1
);
5372 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
5373 wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORB
);
5374 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] = _FW_WM(tmp
, PLANEB
);
5375 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] = _FW_WM(tmp
, PLANEA
);
5377 tmp
= I915_READ(DSPFW2
);
5378 wm
->fbc_en
= tmp
& DSPFW_FBC_SR_EN
;
5379 wm
->sr
.fbc
= _FW_WM(tmp
, FBC_SR
);
5380 wm
->hpll
.fbc
= _FW_WM(tmp
, FBC_HPLL_SR
);
5381 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM(tmp
, SPRITEB
);
5382 wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORA
);
5383 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] = _FW_WM(tmp
, SPRITEA
);
5385 tmp
= I915_READ(DSPFW3
);
5386 wm
->hpll_en
= tmp
& DSPFW_HPLL_SR_EN
;
5387 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
5388 wm
->hpll
.cursor
= _FW_WM(tmp
, HPLL_CURSOR
);
5389 wm
->hpll
.plane
= _FW_WM(tmp
, HPLL_SR
);
5392 static void vlv_read_wm_values(struct drm_i915_private
*dev_priv
,
5393 struct vlv_wm_values
*wm
)
5398 for_each_pipe(dev_priv
, pipe
) {
5399 tmp
= I915_READ(VLV_DDL(pipe
));
5401 wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] =
5402 (tmp
>> DDL_PLANE_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
5403 wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] =
5404 (tmp
>> DDL_CURSOR_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
5405 wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] =
5406 (tmp
>> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
5407 wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] =
5408 (tmp
>> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
5411 tmp
= I915_READ(DSPFW1
);
5412 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
5413 wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORB
);
5414 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEB
);
5415 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEA
);
5417 tmp
= I915_READ(DSPFW2
);
5418 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITEB
);
5419 wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORA
);
5420 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEA
);
5422 tmp
= I915_READ(DSPFW3
);
5423 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
5425 if (IS_CHERRYVIEW(dev_priv
)) {
5426 tmp
= I915_READ(DSPFW7_CHV
);
5427 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITED
);
5428 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEC
);
5430 tmp
= I915_READ(DSPFW8_CHV
);
5431 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITEF
);
5432 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEE
);
5434 tmp
= I915_READ(DSPFW9_CHV
);
5435 wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEC
);
5436 wm
->pipe
[PIPE_C
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORC
);
5438 tmp
= I915_READ(DSPHOWM
);
5439 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
5440 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEF_HI
) << 8;
5441 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEE_HI
) << 8;
5442 wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEC_HI
) << 8;
5443 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
5444 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
5445 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEB_HI
) << 8;
5446 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
5447 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
5448 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEA_HI
) << 8;
5450 tmp
= I915_READ(DSPFW7
);
5451 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITED
);
5452 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEC
);
5454 tmp
= I915_READ(DSPHOWM
);
5455 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
5456 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
5457 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
5458 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEB_HI
) << 8;
5459 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
5460 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
5461 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEA_HI
) << 8;
5468 void g4x_wm_get_hw_state(struct drm_device
*dev
)
5470 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5471 struct g4x_wm_values
*wm
= &dev_priv
->wm
.g4x
;
5472 struct intel_crtc
*crtc
;
5474 g4x_read_wm_values(dev_priv
, wm
);
5476 wm
->cxsr
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
5478 for_each_intel_crtc(dev
, crtc
) {
5479 struct intel_crtc_state
*crtc_state
=
5480 to_intel_crtc_state(crtc
->base
.state
);
5481 struct g4x_wm_state
*active
= &crtc
->wm
.active
.g4x
;
5482 struct g4x_pipe_wm
*raw
;
5483 enum pipe pipe
= crtc
->pipe
;
5484 enum plane_id plane_id
;
5485 int level
, max_level
;
5487 active
->cxsr
= wm
->cxsr
;
5488 active
->hpll_en
= wm
->hpll_en
;
5489 active
->fbc_en
= wm
->fbc_en
;
5491 active
->sr
= wm
->sr
;
5492 active
->hpll
= wm
->hpll
;
5494 for_each_plane_id_on_crtc(crtc
, plane_id
) {
5495 active
->wm
.plane
[plane_id
] =
5496 wm
->pipe
[pipe
].plane
[plane_id
];
5499 if (wm
->cxsr
&& wm
->hpll_en
)
5500 max_level
= G4X_WM_LEVEL_HPLL
;
5502 max_level
= G4X_WM_LEVEL_SR
;
5504 max_level
= G4X_WM_LEVEL_NORMAL
;
5506 level
= G4X_WM_LEVEL_NORMAL
;
5507 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
5508 for_each_plane_id_on_crtc(crtc
, plane_id
)
5509 raw
->plane
[plane_id
] = active
->wm
.plane
[plane_id
];
5511 if (++level
> max_level
)
5514 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
5515 raw
->plane
[PLANE_PRIMARY
] = active
->sr
.plane
;
5516 raw
->plane
[PLANE_CURSOR
] = active
->sr
.cursor
;
5517 raw
->plane
[PLANE_SPRITE0
] = 0;
5518 raw
->fbc
= active
->sr
.fbc
;
5520 if (++level
> max_level
)
5523 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
5524 raw
->plane
[PLANE_PRIMARY
] = active
->hpll
.plane
;
5525 raw
->plane
[PLANE_CURSOR
] = active
->hpll
.cursor
;
5526 raw
->plane
[PLANE_SPRITE0
] = 0;
5527 raw
->fbc
= active
->hpll
.fbc
;
5530 for_each_plane_id_on_crtc(crtc
, plane_id
)
5531 g4x_raw_plane_wm_set(crtc_state
, level
,
5532 plane_id
, USHRT_MAX
);
5533 g4x_raw_fbc_wm_set(crtc_state
, level
, USHRT_MAX
);
5535 crtc_state
->wm
.g4x
.optimal
= *active
;
5536 crtc_state
->wm
.g4x
.intermediate
= *active
;
5538 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
5540 wm
->pipe
[pipe
].plane
[PLANE_PRIMARY
],
5541 wm
->pipe
[pipe
].plane
[PLANE_CURSOR
],
5542 wm
->pipe
[pipe
].plane
[PLANE_SPRITE0
]);
5545 DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
5546 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->sr
.fbc
);
5547 DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
5548 wm
->hpll
.plane
, wm
->hpll
.cursor
, wm
->hpll
.fbc
);
5549 DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
5550 yesno(wm
->cxsr
), yesno(wm
->hpll_en
), yesno(wm
->fbc_en
));
5553 void g4x_wm_sanitize(struct drm_i915_private
*dev_priv
)
5555 struct intel_plane
*plane
;
5556 struct intel_crtc
*crtc
;
5558 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5560 for_each_intel_plane(&dev_priv
->drm
, plane
) {
5561 struct intel_crtc
*crtc
=
5562 intel_get_crtc_for_pipe(dev_priv
, plane
->pipe
);
5563 struct intel_crtc_state
*crtc_state
=
5564 to_intel_crtc_state(crtc
->base
.state
);
5565 struct intel_plane_state
*plane_state
=
5566 to_intel_plane_state(plane
->base
.state
);
5567 struct g4x_wm_state
*wm_state
= &crtc_state
->wm
.g4x
.optimal
;
5568 enum plane_id plane_id
= plane
->id
;
5571 if (plane_state
->base
.visible
)
5574 for (level
= 0; level
< 3; level
++) {
5575 struct g4x_pipe_wm
*raw
=
5576 &crtc_state
->wm
.g4x
.raw
[level
];
5578 raw
->plane
[plane_id
] = 0;
5579 wm_state
->wm
.plane
[plane_id
] = 0;
5582 if (plane_id
== PLANE_PRIMARY
) {
5583 for (level
= 0; level
< 3; level
++) {
5584 struct g4x_pipe_wm
*raw
=
5585 &crtc_state
->wm
.g4x
.raw
[level
];
5589 wm_state
->sr
.fbc
= 0;
5590 wm_state
->hpll
.fbc
= 0;
5591 wm_state
->fbc_en
= false;
5595 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5596 struct intel_crtc_state
*crtc_state
=
5597 to_intel_crtc_state(crtc
->base
.state
);
5599 crtc_state
->wm
.g4x
.intermediate
=
5600 crtc_state
->wm
.g4x
.optimal
;
5601 crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.optimal
;
5604 g4x_program_watermarks(dev_priv
);
5606 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5609 void vlv_wm_get_hw_state(struct drm_device
*dev
)
5611 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5612 struct vlv_wm_values
*wm
= &dev_priv
->wm
.vlv
;
5613 struct intel_crtc
*crtc
;
5616 vlv_read_wm_values(dev_priv
, wm
);
5618 wm
->cxsr
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
5619 wm
->level
= VLV_WM_LEVEL_PM2
;
5621 if (IS_CHERRYVIEW(dev_priv
)) {
5622 mutex_lock(&dev_priv
->rps
.hw_lock
);
5624 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
5625 if (val
& DSP_MAXFIFO_PM5_ENABLE
)
5626 wm
->level
= VLV_WM_LEVEL_PM5
;
5629 * If DDR DVFS is disabled in the BIOS, Punit
5630 * will never ack the request. So if that happens
5631 * assume we don't have to enable/disable DDR DVFS
5632 * dynamically. To test that just set the REQ_ACK
5633 * bit to poke the Punit, but don't change the
5634 * HIGH/LOW bits so that we don't actually change
5635 * the current state.
5637 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
5638 val
|= FORCE_DDR_FREQ_REQ_ACK
;
5639 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
5641 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
5642 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3)) {
5643 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
5644 "assuming DDR DVFS is disabled\n");
5645 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM5
;
5647 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
5648 if ((val
& FORCE_DDR_HIGH_FREQ
) == 0)
5649 wm
->level
= VLV_WM_LEVEL_DDR_DVFS
;
5652 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5655 for_each_intel_crtc(dev
, crtc
) {
5656 struct intel_crtc_state
*crtc_state
=
5657 to_intel_crtc_state(crtc
->base
.state
);
5658 struct vlv_wm_state
*active
= &crtc
->wm
.active
.vlv
;
5659 const struct vlv_fifo_state
*fifo_state
=
5660 &crtc_state
->wm
.vlv
.fifo_state
;
5661 enum pipe pipe
= crtc
->pipe
;
5662 enum plane_id plane_id
;
5665 vlv_get_fifo_size(crtc_state
);
5667 active
->num_levels
= wm
->level
+ 1;
5668 active
->cxsr
= wm
->cxsr
;
5670 for (level
= 0; level
< active
->num_levels
; level
++) {
5671 struct g4x_pipe_wm
*raw
=
5672 &crtc_state
->wm
.vlv
.raw
[level
];
5674 active
->sr
[level
].plane
= wm
->sr
.plane
;
5675 active
->sr
[level
].cursor
= wm
->sr
.cursor
;
5677 for_each_plane_id_on_crtc(crtc
, plane_id
) {
5678 active
->wm
[level
].plane
[plane_id
] =
5679 wm
->pipe
[pipe
].plane
[plane_id
];
5681 raw
->plane
[plane_id
] =
5682 vlv_invert_wm_value(active
->wm
[level
].plane
[plane_id
],
5683 fifo_state
->plane
[plane_id
]);
5687 for_each_plane_id_on_crtc(crtc
, plane_id
)
5688 vlv_raw_plane_wm_set(crtc_state
, level
,
5689 plane_id
, USHRT_MAX
);
5690 vlv_invalidate_wms(crtc
, active
, level
);
5692 crtc_state
->wm
.vlv
.optimal
= *active
;
5693 crtc_state
->wm
.vlv
.intermediate
= *active
;
5695 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
5697 wm
->pipe
[pipe
].plane
[PLANE_PRIMARY
],
5698 wm
->pipe
[pipe
].plane
[PLANE_CURSOR
],
5699 wm
->pipe
[pipe
].plane
[PLANE_SPRITE0
],
5700 wm
->pipe
[pipe
].plane
[PLANE_SPRITE1
]);
5703 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
5704 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->level
, wm
->cxsr
);
5707 void vlv_wm_sanitize(struct drm_i915_private
*dev_priv
)
5709 struct intel_plane
*plane
;
5710 struct intel_crtc
*crtc
;
5712 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5714 for_each_intel_plane(&dev_priv
->drm
, plane
) {
5715 struct intel_crtc
*crtc
=
5716 intel_get_crtc_for_pipe(dev_priv
, plane
->pipe
);
5717 struct intel_crtc_state
*crtc_state
=
5718 to_intel_crtc_state(crtc
->base
.state
);
5719 struct intel_plane_state
*plane_state
=
5720 to_intel_plane_state(plane
->base
.state
);
5721 struct vlv_wm_state
*wm_state
= &crtc_state
->wm
.vlv
.optimal
;
5722 const struct vlv_fifo_state
*fifo_state
=
5723 &crtc_state
->wm
.vlv
.fifo_state
;
5724 enum plane_id plane_id
= plane
->id
;
5727 if (plane_state
->base
.visible
)
5730 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
5731 struct g4x_pipe_wm
*raw
=
5732 &crtc_state
->wm
.vlv
.raw
[level
];
5734 raw
->plane
[plane_id
] = 0;
5736 wm_state
->wm
[level
].plane
[plane_id
] =
5737 vlv_invert_wm_value(raw
->plane
[plane_id
],
5738 fifo_state
->plane
[plane_id
]);
5742 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5743 struct intel_crtc_state
*crtc_state
=
5744 to_intel_crtc_state(crtc
->base
.state
);
5746 crtc_state
->wm
.vlv
.intermediate
=
5747 crtc_state
->wm
.vlv
.optimal
;
5748 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.optimal
;
5751 vlv_program_watermarks(dev_priv
);
5753 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5756 void ilk_wm_get_hw_state(struct drm_device
*dev
)
5758 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5759 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
5760 struct drm_crtc
*crtc
;
5762 for_each_crtc(dev
, crtc
)
5763 ilk_pipe_wm_get_hw_state(crtc
);
5765 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
5766 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
5767 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
5769 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
5770 if (INTEL_GEN(dev_priv
) >= 7) {
5771 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
5772 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
5775 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
5776 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
5777 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
5778 else if (IS_IVYBRIDGE(dev_priv
))
5779 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
5780 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
5783 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
5787 * intel_update_watermarks - update FIFO watermark values based on current modes
5789 * Calculate watermark values for the various WM regs based on current mode
5790 * and plane configuration.
5792 * There are several cases to deal with here:
5793 * - normal (i.e. non-self-refresh)
5794 * - self-refresh (SR) mode
5795 * - lines are large relative to FIFO size (buffer can hold up to 2)
5796 * - lines are small relative to FIFO size (buffer can hold more than 2
5797 * lines), so need to account for TLB latency
5799 * The normal calculation is:
5800 * watermark = dotclock * bytes per pixel * latency
5801 * where latency is platform & configuration dependent (we assume pessimal
5804 * The SR calculation is:
5805 * watermark = (trunc(latency/line time)+1) * surface width *
5808 * line time = htotal / dotclock
5809 * surface width = hdisplay for normal plane and 64 for cursor
5810 * and latency is assumed to be high, as above.
5812 * The final value programmed to the register should always be rounded up,
5813 * and include an extra 2 entries to account for clock crossings.
5815 * We don't use the sprite, so we can ignore that. And on Crestline we have
5816 * to set the non-SR watermarks to 8.
5818 void intel_update_watermarks(struct intel_crtc
*crtc
)
5820 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5822 if (dev_priv
->display
.update_wm
)
5823 dev_priv
->display
.update_wm(crtc
);
5826 void intel_enable_ipc(struct drm_i915_private
*dev_priv
)
5830 /* Display WA #0477 WaDisableIPC: skl */
5831 if (IS_SKYLAKE(dev_priv
)) {
5832 dev_priv
->ipc_enabled
= false;
5836 val
= I915_READ(DISP_ARB_CTL2
);
5838 if (dev_priv
->ipc_enabled
)
5839 val
|= DISP_IPC_ENABLE
;
5841 val
&= ~DISP_IPC_ENABLE
;
5843 I915_WRITE(DISP_ARB_CTL2
, val
);
5846 void intel_init_ipc(struct drm_i915_private
*dev_priv
)
5848 dev_priv
->ipc_enabled
= false;
5849 if (!HAS_IPC(dev_priv
))
5852 dev_priv
->ipc_enabled
= true;
5853 intel_enable_ipc(dev_priv
);
5857 * Lock protecting IPS related data structures
5859 DEFINE_SPINLOCK(mchdev_lock
);
5861 /* Global for IPS driver to get at the current i915 device. Protected by
5863 static struct drm_i915_private
*i915_mch_dev
;
5865 bool ironlake_set_drps(struct drm_i915_private
*dev_priv
, u8 val
)
5869 lockdep_assert_held(&mchdev_lock
);
5871 rgvswctl
= I915_READ16(MEMSWCTL
);
5872 if (rgvswctl
& MEMCTL_CMD_STS
) {
5873 DRM_DEBUG("gpu busy, RCS change rejected\n");
5874 return false; /* still busy with another command */
5877 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
5878 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
5879 I915_WRITE16(MEMSWCTL
, rgvswctl
);
5880 POSTING_READ16(MEMSWCTL
);
5882 rgvswctl
|= MEMCTL_CMD_STS
;
5883 I915_WRITE16(MEMSWCTL
, rgvswctl
);
5888 static void ironlake_enable_drps(struct drm_i915_private
*dev_priv
)
5891 u8 fmax
, fmin
, fstart
, vstart
;
5893 spin_lock_irq(&mchdev_lock
);
5895 rgvmodectl
= I915_READ(MEMMODECTL
);
5897 /* Enable temp reporting */
5898 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
5899 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
5901 /* 100ms RC evaluation intervals */
5902 I915_WRITE(RCUPEI
, 100000);
5903 I915_WRITE(RCDNEI
, 100000);
5905 /* Set max/min thresholds to 90ms and 80ms respectively */
5906 I915_WRITE(RCBMAXAVG
, 90000);
5907 I915_WRITE(RCBMINAVG
, 80000);
5909 I915_WRITE(MEMIHYST
, 1);
5911 /* Set up min, max, and cur for interrupt handling */
5912 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
5913 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
5914 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
5915 MEMMODE_FSTART_SHIFT
;
5917 vstart
= (I915_READ(PXVFREQ(fstart
)) & PXVFREQ_PX_MASK
) >>
5920 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
5921 dev_priv
->ips
.fstart
= fstart
;
5923 dev_priv
->ips
.max_delay
= fstart
;
5924 dev_priv
->ips
.min_delay
= fmin
;
5925 dev_priv
->ips
.cur_delay
= fstart
;
5927 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5928 fmax
, fmin
, fstart
);
5930 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
5933 * Interrupts will be enabled in ironlake_irq_postinstall
5936 I915_WRITE(VIDSTART
, vstart
);
5937 POSTING_READ(VIDSTART
);
5939 rgvmodectl
|= MEMMODE_SWMODE_EN
;
5940 I915_WRITE(MEMMODECTL
, rgvmodectl
);
5942 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
5943 DRM_ERROR("stuck trying to change perf mode\n");
5946 ironlake_set_drps(dev_priv
, fstart
);
5948 dev_priv
->ips
.last_count1
= I915_READ(DMIEC
) +
5949 I915_READ(DDREC
) + I915_READ(CSIEC
);
5950 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
5951 dev_priv
->ips
.last_count2
= I915_READ(GFXEC
);
5952 dev_priv
->ips
.last_time2
= ktime_get_raw_ns();
5954 spin_unlock_irq(&mchdev_lock
);
5957 static void ironlake_disable_drps(struct drm_i915_private
*dev_priv
)
5961 spin_lock_irq(&mchdev_lock
);
5963 rgvswctl
= I915_READ16(MEMSWCTL
);
5965 /* Ack interrupts, disable EFC interrupt */
5966 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
5967 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
5968 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
5969 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
5970 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
5972 /* Go back to the starting frequency */
5973 ironlake_set_drps(dev_priv
, dev_priv
->ips
.fstart
);
5975 rgvswctl
|= MEMCTL_CMD_STS
;
5976 I915_WRITE(MEMSWCTL
, rgvswctl
);
5979 spin_unlock_irq(&mchdev_lock
);
5982 /* There's a funny hw issue where the hw returns all 0 when reading from
5983 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
5984 * ourselves, instead of doing a rmw cycle (which might result in us clearing
5985 * all limits and the gpu stuck at whatever frequency it is at atm).
5987 static u32
intel_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
5991 /* Only set the down limit when we've reached the lowest level to avoid
5992 * getting more interrupts, otherwise leave this clear. This prevents a
5993 * race in the hw when coming out of rc6: There's a tiny window where
5994 * the hw runs at the minimal clock before selecting the desired
5995 * frequency, if the down threshold expires in that window we will not
5996 * receive a down interrupt. */
5997 if (INTEL_GEN(dev_priv
) >= 9) {
5998 limits
= (dev_priv
->rps
.max_freq_softlimit
) << 23;
5999 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
6000 limits
|= (dev_priv
->rps
.min_freq_softlimit
) << 14;
6002 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
6003 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
6004 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
6010 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
6013 u32 threshold_up
= 0, threshold_down
= 0; /* in % */
6014 u32 ei_up
= 0, ei_down
= 0;
6016 new_power
= dev_priv
->rps
.power
;
6017 switch (dev_priv
->rps
.power
) {
6019 if (val
> dev_priv
->rps
.efficient_freq
+ 1 &&
6020 val
> dev_priv
->rps
.cur_freq
)
6021 new_power
= BETWEEN
;
6025 if (val
<= dev_priv
->rps
.efficient_freq
&&
6026 val
< dev_priv
->rps
.cur_freq
)
6027 new_power
= LOW_POWER
;
6028 else if (val
>= dev_priv
->rps
.rp0_freq
&&
6029 val
> dev_priv
->rps
.cur_freq
)
6030 new_power
= HIGH_POWER
;
6034 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 &&
6035 val
< dev_priv
->rps
.cur_freq
)
6036 new_power
= BETWEEN
;
6039 /* Max/min bins are special */
6040 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
6041 new_power
= LOW_POWER
;
6042 if (val
>= dev_priv
->rps
.max_freq_softlimit
)
6043 new_power
= HIGH_POWER
;
6044 if (new_power
== dev_priv
->rps
.power
)
6047 /* Note the units here are not exactly 1us, but 1280ns. */
6048 switch (new_power
) {
6050 /* Upclock if more than 95% busy over 16ms */
6054 /* Downclock if less than 85% busy over 32ms */
6056 threshold_down
= 85;
6060 /* Upclock if more than 90% busy over 13ms */
6064 /* Downclock if less than 75% busy over 32ms */
6066 threshold_down
= 75;
6070 /* Upclock if more than 85% busy over 10ms */
6074 /* Downclock if less than 60% busy over 32ms */
6076 threshold_down
= 60;
6080 /* When byt can survive without system hang with dynamic
6081 * sw freq adjustments, this restriction can be lifted.
6083 if (IS_VALLEYVIEW(dev_priv
))
6086 I915_WRITE(GEN6_RP_UP_EI
,
6087 GT_INTERVAL_FROM_US(dev_priv
, ei_up
));
6088 I915_WRITE(GEN6_RP_UP_THRESHOLD
,
6089 GT_INTERVAL_FROM_US(dev_priv
,
6090 ei_up
* threshold_up
/ 100));
6092 I915_WRITE(GEN6_RP_DOWN_EI
,
6093 GT_INTERVAL_FROM_US(dev_priv
, ei_down
));
6094 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
,
6095 GT_INTERVAL_FROM_US(dev_priv
,
6096 ei_down
* threshold_down
/ 100));
6098 I915_WRITE(GEN6_RP_CONTROL
,
6099 GEN6_RP_MEDIA_TURBO
|
6100 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
6101 GEN6_RP_MEDIA_IS_GFX
|
6103 GEN6_RP_UP_BUSY_AVG
|
6104 GEN6_RP_DOWN_IDLE_AVG
);
6107 dev_priv
->rps
.power
= new_power
;
6108 dev_priv
->rps
.up_threshold
= threshold_up
;
6109 dev_priv
->rps
.down_threshold
= threshold_down
;
6110 dev_priv
->rps
.last_adj
= 0;
6113 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
6117 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
6118 if (val
> dev_priv
->rps
.min_freq_softlimit
)
6119 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
6120 if (val
< dev_priv
->rps
.max_freq_softlimit
)
6121 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_UP_THRESHOLD
;
6123 mask
&= dev_priv
->pm_rps_events
;
6125 return gen6_sanitize_rps_pm_mask(dev_priv
, ~mask
);
6128 /* gen6_set_rps is called to update the frequency request, but should also be
6129 * called when the range (min_delay and max_delay) is modified so that we can
6130 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
6131 static int gen6_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
6133 /* min/max delay may still have been modified so be sure to
6134 * write the limits value.
6136 if (val
!= dev_priv
->rps
.cur_freq
) {
6137 gen6_set_rps_thresholds(dev_priv
, val
);
6139 if (INTEL_GEN(dev_priv
) >= 9)
6140 I915_WRITE(GEN6_RPNSWREQ
,
6141 GEN9_FREQUENCY(val
));
6142 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
6143 I915_WRITE(GEN6_RPNSWREQ
,
6144 HSW_FREQUENCY(val
));
6146 I915_WRITE(GEN6_RPNSWREQ
,
6147 GEN6_FREQUENCY(val
) |
6149 GEN6_AGGRESSIVE_TURBO
);
6152 /* Make sure we continue to get interrupts
6153 * until we hit the minimum or maximum frequencies.
6155 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, intel_rps_limits(dev_priv
, val
));
6156 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
6158 dev_priv
->rps
.cur_freq
= val
;
6159 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
6164 static int valleyview_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
6168 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv
) && (val
& 1),
6169 "Odd GPU freq value\n"))
6172 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
6174 if (val
!= dev_priv
->rps
.cur_freq
) {
6175 err
= vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
6179 gen6_set_rps_thresholds(dev_priv
, val
);
6182 dev_priv
->rps
.cur_freq
= val
;
6183 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
6188 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
6190 * * If Gfx is Idle, then
6191 * 1. Forcewake Media well.
6192 * 2. Request idle freq.
6193 * 3. Release Forcewake of Media well.
6195 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
6197 u32 val
= dev_priv
->rps
.idle_freq
;
6200 if (dev_priv
->rps
.cur_freq
<= val
)
6203 /* The punit delays the write of the frequency and voltage until it
6204 * determines the GPU is awake. During normal usage we don't want to
6205 * waste power changing the frequency if the GPU is sleeping (rc6).
6206 * However, the GPU and driver is now idle and we do not want to delay
6207 * switching to minimum voltage (reducing power whilst idle) as we do
6208 * not expect to be woken in the near future and so must flush the
6209 * change by waking the device.
6211 * We choose to take the media powerwell (either would do to trick the
6212 * punit into committing the voltage change) as that takes a lot less
6213 * power than the render powerwell.
6215 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_MEDIA
);
6216 err
= valleyview_set_rps(dev_priv
, val
);
6217 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_MEDIA
);
6220 DRM_ERROR("Failed to set RPS for idle\n");
6223 void gen6_rps_busy(struct drm_i915_private
*dev_priv
)
6225 mutex_lock(&dev_priv
->rps
.hw_lock
);
6226 if (dev_priv
->rps
.enabled
) {
6229 if (dev_priv
->pm_rps_events
& GEN6_PM_RP_UP_EI_EXPIRED
)
6230 gen6_rps_reset_ei(dev_priv
);
6231 I915_WRITE(GEN6_PMINTRMSK
,
6232 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
6234 gen6_enable_rps_interrupts(dev_priv
);
6236 /* Use the user's desired frequency as a guide, but for better
6237 * performance, jump directly to RPe as our starting frequency.
6239 freq
= max(dev_priv
->rps
.cur_freq
,
6240 dev_priv
->rps
.efficient_freq
);
6242 if (intel_set_rps(dev_priv
,
6244 dev_priv
->rps
.min_freq_softlimit
,
6245 dev_priv
->rps
.max_freq_softlimit
)))
6246 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
6248 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6251 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
6253 /* Flush our bottom-half so that it does not race with us
6254 * setting the idle frequency and so that it is bounded by
6255 * our rpm wakeref. And then disable the interrupts to stop any
6256 * futher RPS reclocking whilst we are asleep.
6258 gen6_disable_rps_interrupts(dev_priv
);
6260 mutex_lock(&dev_priv
->rps
.hw_lock
);
6261 if (dev_priv
->rps
.enabled
) {
6262 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
6263 vlv_set_rps_idle(dev_priv
);
6265 gen6_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
6266 dev_priv
->rps
.last_adj
= 0;
6267 I915_WRITE(GEN6_PMINTRMSK
,
6268 gen6_sanitize_rps_pm_mask(dev_priv
, ~0));
6270 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6273 void gen6_rps_boost(struct drm_i915_gem_request
*rq
,
6274 struct intel_rps_client
*rps
)
6276 struct drm_i915_private
*i915
= rq
->i915
;
6277 unsigned long flags
;
6280 /* This is intentionally racy! We peek at the state here, then
6281 * validate inside the RPS worker.
6283 if (!i915
->rps
.enabled
)
6287 spin_lock_irqsave(&rq
->lock
, flags
);
6288 if (!rq
->waitboost
&& !i915_gem_request_completed(rq
)) {
6289 atomic_inc(&i915
->rps
.num_waiters
);
6290 rq
->waitboost
= true;
6293 spin_unlock_irqrestore(&rq
->lock
, flags
);
6297 if (READ_ONCE(i915
->rps
.cur_freq
) < i915
->rps
.boost_freq
)
6298 schedule_work(&i915
->rps
.work
);
6300 atomic_inc(rps
? &rps
->boosts
: &i915
->rps
.boosts
);
6303 int intel_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
6307 lockdep_assert_held(&dev_priv
->rps
.hw_lock
);
6308 GEM_BUG_ON(val
> dev_priv
->rps
.max_freq
);
6309 GEM_BUG_ON(val
< dev_priv
->rps
.min_freq
);
6311 if (!dev_priv
->rps
.enabled
) {
6312 dev_priv
->rps
.cur_freq
= val
;
6316 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
6317 err
= valleyview_set_rps(dev_priv
, val
);
6319 err
= gen6_set_rps(dev_priv
, val
);
6324 static void gen9_disable_rc6(struct drm_i915_private
*dev_priv
)
6326 I915_WRITE(GEN6_RC_CONTROL
, 0);
6327 I915_WRITE(GEN9_PG_ENABLE
, 0);
6330 static void gen9_disable_rps(struct drm_i915_private
*dev_priv
)
6332 I915_WRITE(GEN6_RP_CONTROL
, 0);
6335 static void gen6_disable_rps(struct drm_i915_private
*dev_priv
)
6337 I915_WRITE(GEN6_RC_CONTROL
, 0);
6338 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
6339 I915_WRITE(GEN6_RP_CONTROL
, 0);
6342 static void cherryview_disable_rps(struct drm_i915_private
*dev_priv
)
6344 I915_WRITE(GEN6_RC_CONTROL
, 0);
6347 static void valleyview_disable_rps(struct drm_i915_private
*dev_priv
)
6349 /* we're doing forcewake before Disabling RC6,
6350 * This what the BIOS expects when going into suspend */
6351 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6353 I915_WRITE(GEN6_RC_CONTROL
, 0);
6355 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6358 static void intel_print_rc6_info(struct drm_i915_private
*dev_priv
, u32 mode
)
6360 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
6361 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
6362 mode
= GEN6_RC_CTL_RC6_ENABLE
;
6366 if (HAS_RC6p(dev_priv
))
6367 DRM_DEBUG_DRIVER("Enabling RC6 states: "
6368 "RC6 %s RC6p %s RC6pp %s\n",
6369 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
),
6370 onoff(mode
& GEN6_RC_CTL_RC6p_ENABLE
),
6371 onoff(mode
& GEN6_RC_CTL_RC6pp_ENABLE
));
6374 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
6375 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
));
6378 static bool bxt_check_bios_rc6_setup(struct drm_i915_private
*dev_priv
)
6380 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
6381 bool enable_rc6
= true;
6382 unsigned long rc6_ctx_base
;
6386 rc_ctl
= I915_READ(GEN6_RC_CONTROL
);
6387 rc_sw_target
= (I915_READ(GEN6_RC_STATE
) & RC_SW_TARGET_STATE_MASK
) >>
6388 RC_SW_TARGET_STATE_SHIFT
;
6389 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
6390 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
6391 onoff(rc_ctl
& GEN6_RC_CTL_HW_ENABLE
),
6392 onoff(rc_ctl
& GEN6_RC_CTL_RC6_ENABLE
),
6395 if (!(I915_READ(RC6_LOCATION
) & RC6_CTX_IN_DRAM
)) {
6396 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
6401 * The exact context size is not known for BXT, so assume a page size
6404 rc6_ctx_base
= I915_READ(RC6_CTX_BASE
) & RC6_CTX_BASE_MASK
;
6405 if (!((rc6_ctx_base
>= ggtt
->stolen_reserved_base
) &&
6406 (rc6_ctx_base
+ PAGE_SIZE
<= ggtt
->stolen_reserved_base
+
6407 ggtt
->stolen_reserved_size
))) {
6408 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
6412 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
6413 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0
) & IDLE_TIME_MASK
) > 1) &&
6414 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
6415 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT
) & IDLE_TIME_MASK
) > 1))) {
6416 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
6420 if (!I915_READ(GEN8_PUSHBUS_CONTROL
) ||
6421 !I915_READ(GEN8_PUSHBUS_ENABLE
) ||
6422 !I915_READ(GEN8_PUSHBUS_SHIFT
)) {
6423 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
6427 if (!I915_READ(GEN6_GFXPAUSE
)) {
6428 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
6432 if (!I915_READ(GEN8_MISC_CTRL0
)) {
6433 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
6440 int sanitize_rc6_option(struct drm_i915_private
*dev_priv
, int enable_rc6
)
6442 /* No RC6 before Ironlake and code is gone for ilk. */
6443 if (INTEL_INFO(dev_priv
)->gen
< 6)
6449 if (IS_GEN9_LP(dev_priv
) && !bxt_check_bios_rc6_setup(dev_priv
)) {
6450 DRM_INFO("RC6 disabled by BIOS\n");
6454 /* Respect the kernel parameter if it is set */
6455 if (enable_rc6
>= 0) {
6458 if (HAS_RC6p(dev_priv
))
6459 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
6462 mask
= INTEL_RC6_ENABLE
;
6464 if ((enable_rc6
& mask
) != enable_rc6
)
6465 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
6466 "(requested %d, valid %d)\n",
6467 enable_rc6
& mask
, enable_rc6
, mask
);
6469 return enable_rc6
& mask
;
6472 if (IS_IVYBRIDGE(dev_priv
))
6473 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
6475 return INTEL_RC6_ENABLE
;
6478 static void gen6_init_rps_frequencies(struct drm_i915_private
*dev_priv
)
6480 /* All of these values are in units of 50MHz */
6482 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
6483 if (IS_GEN9_LP(dev_priv
)) {
6484 u32 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
6485 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 16) & 0xff;
6486 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
6487 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 0) & 0xff;
6489 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
6490 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
6491 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
6492 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
6494 /* hw_max = RP0 until we check for overclocking */
6495 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
6497 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
6498 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
) ||
6499 IS_GEN9_BC(dev_priv
) || IS_CANNONLAKE(dev_priv
)) {
6500 u32 ddcc_status
= 0;
6502 if (sandybridge_pcode_read(dev_priv
,
6503 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL
,
6505 dev_priv
->rps
.efficient_freq
=
6507 ((ddcc_status
>> 8) & 0xff),
6508 dev_priv
->rps
.min_freq
,
6509 dev_priv
->rps
.max_freq
);
6512 if (IS_GEN9_BC(dev_priv
) || IS_CANNONLAKE(dev_priv
)) {
6513 /* Store the frequency values in 16.66 MHZ units, which is
6514 * the natural hardware unit for SKL
6516 dev_priv
->rps
.rp0_freq
*= GEN9_FREQ_SCALER
;
6517 dev_priv
->rps
.rp1_freq
*= GEN9_FREQ_SCALER
;
6518 dev_priv
->rps
.min_freq
*= GEN9_FREQ_SCALER
;
6519 dev_priv
->rps
.max_freq
*= GEN9_FREQ_SCALER
;
6520 dev_priv
->rps
.efficient_freq
*= GEN9_FREQ_SCALER
;
6524 static void reset_rps(struct drm_i915_private
*dev_priv
,
6525 int (*set
)(struct drm_i915_private
*, u8
))
6527 u8 freq
= dev_priv
->rps
.cur_freq
;
6530 dev_priv
->rps
.power
= -1;
6531 dev_priv
->rps
.cur_freq
= -1;
6533 if (set(dev_priv
, freq
))
6534 DRM_ERROR("Failed to reset RPS to initial values\n");
6537 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
6538 static void gen9_enable_rps(struct drm_i915_private
*dev_priv
)
6540 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6542 /* Program defaults and thresholds for RPS*/
6543 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
6544 GEN9_FREQUENCY(dev_priv
->rps
.rp1_freq
));
6546 /* 1 second timeout*/
6547 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
,
6548 GT_INTERVAL_FROM_US(dev_priv
, 1000000));
6550 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 0xa);
6552 /* Leaning on the below call to gen6_set_rps to program/setup the
6553 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
6554 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
6555 reset_rps(dev_priv
, gen6_set_rps
);
6557 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6560 static void gen9_enable_rc6(struct drm_i915_private
*dev_priv
)
6562 struct intel_engine_cs
*engine
;
6563 enum intel_engine_id id
;
6564 uint32_t rc6_mask
= 0;
6566 /* 1a: Software RC state - RC0 */
6567 I915_WRITE(GEN6_RC_STATE
, 0);
6569 /* 1b: Get forcewake during program sequence. Although the driver
6570 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6571 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6573 /* 2a: Disable RC states. */
6574 I915_WRITE(GEN6_RC_CONTROL
, 0);
6576 /* 2b: Program RC6 thresholds.*/
6578 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
6579 if (IS_SKYLAKE(dev_priv
))
6580 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 108 << 16);
6582 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16);
6583 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
6584 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
6585 for_each_engine(engine
, dev_priv
, id
)
6586 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6588 if (HAS_GUC(dev_priv
))
6589 I915_WRITE(GUC_MAX_IDLE_COUNT
, 0xA);
6591 I915_WRITE(GEN6_RC_SLEEP
, 0);
6593 /* 2c: Program Coarse Power Gating Policies. */
6594 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS
, 25);
6595 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS
, 25);
6597 /* 3a: Enable RC6 */
6598 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
6599 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
6600 DRM_INFO("RC6 %s\n", onoff(rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
));
6601 I915_WRITE(GEN6_RC6_THRESHOLD
, 37500); /* 37.5/125ms per EI */
6602 I915_WRITE(GEN6_RC_CONTROL
,
6603 GEN6_RC_CTL_HW_ENABLE
| GEN6_RC_CTL_EI_MODE(1) | rc6_mask
);
6606 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
6607 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
6609 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv
))
6610 I915_WRITE(GEN9_PG_ENABLE
, 0);
6612 I915_WRITE(GEN9_PG_ENABLE
, (rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
) ?
6613 (GEN9_RENDER_PG_ENABLE
| GEN9_MEDIA_PG_ENABLE
) : 0);
6615 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6618 static void gen8_enable_rps(struct drm_i915_private
*dev_priv
)
6620 struct intel_engine_cs
*engine
;
6621 enum intel_engine_id id
;
6622 uint32_t rc6_mask
= 0;
6624 /* 1a: Software RC state - RC0 */
6625 I915_WRITE(GEN6_RC_STATE
, 0);
6627 /* 1c & 1d: Get forcewake during program sequence. Although the driver
6628 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6629 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6631 /* 2a: Disable RC states. */
6632 I915_WRITE(GEN6_RC_CONTROL
, 0);
6634 /* 2b: Program RC6 thresholds.*/
6635 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
6636 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
6637 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
6638 for_each_engine(engine
, dev_priv
, id
)
6639 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6640 I915_WRITE(GEN6_RC_SLEEP
, 0);
6641 if (IS_BROADWELL(dev_priv
))
6642 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
6644 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
6647 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
6648 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
6649 intel_print_rc6_info(dev_priv
, rc6_mask
);
6650 if (IS_BROADWELL(dev_priv
))
6651 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
6652 GEN7_RC_CTL_TO_MODE
|
6655 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
6656 GEN6_RC_CTL_EI_MODE(1) |
6659 /* 4 Program defaults and thresholds for RPS*/
6660 I915_WRITE(GEN6_RPNSWREQ
,
6661 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
6662 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
6663 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
6664 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
6665 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 100000000 / 128); /* 1 second timeout */
6667 /* Docs recommend 900MHz, and 300 MHz respectively */
6668 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
6669 dev_priv
->rps
.max_freq_softlimit
<< 24 |
6670 dev_priv
->rps
.min_freq_softlimit
<< 16);
6672 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 7600000 / 128); /* 76ms busyness per EI, 90% */
6673 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 31300000 / 128); /* 313ms busyness per EI, 70%*/
6674 I915_WRITE(GEN6_RP_UP_EI
, 66000); /* 84.48ms, XXX: random? */
6675 I915_WRITE(GEN6_RP_DOWN_EI
, 350000); /* 448ms, XXX: random? */
6677 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
6680 I915_WRITE(GEN6_RP_CONTROL
,
6681 GEN6_RP_MEDIA_TURBO
|
6682 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
6683 GEN6_RP_MEDIA_IS_GFX
|
6685 GEN6_RP_UP_BUSY_AVG
|
6686 GEN6_RP_DOWN_IDLE_AVG
);
6688 /* 6: Ring frequency + overclocking (our driver does this later */
6690 reset_rps(dev_priv
, gen6_set_rps
);
6692 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6695 static void gen6_enable_rps(struct drm_i915_private
*dev_priv
)
6697 struct intel_engine_cs
*engine
;
6698 enum intel_engine_id id
;
6699 u32 rc6vids
, rc6_mask
= 0;
6704 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6706 /* Here begins a magic sequence of register writes to enable
6707 * auto-downclocking.
6709 * Perhaps there might be some value in exposing these to
6712 I915_WRITE(GEN6_RC_STATE
, 0);
6714 /* Clear the DBG now so we don't confuse earlier errors */
6715 gtfifodbg
= I915_READ(GTFIFODBG
);
6717 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
6718 I915_WRITE(GTFIFODBG
, gtfifodbg
);
6721 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6723 /* disable the counters and set deterministic thresholds */
6724 I915_WRITE(GEN6_RC_CONTROL
, 0);
6726 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
6727 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
6728 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
6729 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
6730 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
6732 for_each_engine(engine
, dev_priv
, id
)
6733 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6735 I915_WRITE(GEN6_RC_SLEEP
, 0);
6736 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
6737 if (IS_IVYBRIDGE(dev_priv
))
6738 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
6740 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
6741 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
6742 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
6744 /* Check if we are enabling RC6 */
6745 rc6_mode
= intel_enable_rc6();
6746 if (rc6_mode
& INTEL_RC6_ENABLE
)
6747 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
6749 /* We don't use those on Haswell */
6750 if (!IS_HASWELL(dev_priv
)) {
6751 if (rc6_mode
& INTEL_RC6p_ENABLE
)
6752 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
6754 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
6755 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
6758 intel_print_rc6_info(dev_priv
, rc6_mask
);
6760 I915_WRITE(GEN6_RC_CONTROL
,
6762 GEN6_RC_CTL_EI_MODE(1) |
6763 GEN6_RC_CTL_HW_ENABLE
);
6765 /* Power down if completely idle for over 50ms */
6766 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
6767 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
6769 reset_rps(dev_priv
, gen6_set_rps
);
6772 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
6773 if (IS_GEN6(dev_priv
) && ret
) {
6774 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
6775 } else if (IS_GEN6(dev_priv
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
6776 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
6777 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
6778 rc6vids
&= 0xffff00;
6779 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
6780 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
6782 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
6785 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6788 static void gen6_update_ring_freq(struct drm_i915_private
*dev_priv
)
6791 unsigned int gpu_freq
;
6792 unsigned int max_ia_freq
, min_ring_freq
;
6793 unsigned int max_gpu_freq
, min_gpu_freq
;
6794 int scaling_factor
= 180;
6795 struct cpufreq_policy
*policy
;
6797 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6799 policy
= cpufreq_cpu_get(0);
6801 max_ia_freq
= policy
->cpuinfo
.max_freq
;
6802 cpufreq_cpu_put(policy
);
6805 * Default to measured freq if none found, PCU will ensure we
6808 max_ia_freq
= tsc_khz
;
6811 /* Convert from kHz to MHz */
6812 max_ia_freq
/= 1000;
6814 min_ring_freq
= I915_READ(DCLK
) & 0xf;
6815 /* convert DDR frequency from units of 266.6MHz to bandwidth */
6816 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
6818 if (IS_GEN9_BC(dev_priv
) || IS_CANNONLAKE(dev_priv
)) {
6819 /* Convert GT frequency to 50 HZ units */
6820 min_gpu_freq
= dev_priv
->rps
.min_freq
/ GEN9_FREQ_SCALER
;
6821 max_gpu_freq
= dev_priv
->rps
.max_freq
/ GEN9_FREQ_SCALER
;
6823 min_gpu_freq
= dev_priv
->rps
.min_freq
;
6824 max_gpu_freq
= dev_priv
->rps
.max_freq
;
6828 * For each potential GPU frequency, load a ring frequency we'd like
6829 * to use for memory access. We do this by specifying the IA frequency
6830 * the PCU should use as a reference to determine the ring frequency.
6832 for (gpu_freq
= max_gpu_freq
; gpu_freq
>= min_gpu_freq
; gpu_freq
--) {
6833 int diff
= max_gpu_freq
- gpu_freq
;
6834 unsigned int ia_freq
= 0, ring_freq
= 0;
6836 if (IS_GEN9_BC(dev_priv
) || IS_CANNONLAKE(dev_priv
)) {
6838 * ring_freq = 2 * GT. ring_freq is in 100MHz units
6839 * No floor required for ring frequency on SKL.
6841 ring_freq
= gpu_freq
;
6842 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
6843 /* max(2 * GT, DDR). NB: GT is 50MHz units */
6844 ring_freq
= max(min_ring_freq
, gpu_freq
);
6845 } else if (IS_HASWELL(dev_priv
)) {
6846 ring_freq
= mult_frac(gpu_freq
, 5, 4);
6847 ring_freq
= max(min_ring_freq
, ring_freq
);
6848 /* leave ia_freq as the default, chosen by cpufreq */
6850 /* On older processors, there is no separate ring
6851 * clock domain, so in order to boost the bandwidth
6852 * of the ring, we need to upclock the CPU (ia_freq).
6854 * For GPU frequencies less than 750MHz,
6855 * just use the lowest ring freq.
6857 if (gpu_freq
< min_freq
)
6860 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
6861 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
6864 sandybridge_pcode_write(dev_priv
,
6865 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
6866 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
6867 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
6872 static int cherryview_rps_max_freq(struct drm_i915_private
*dev_priv
)
6876 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
6878 switch (INTEL_INFO(dev_priv
)->sseu
.eu_total
) {
6880 /* (2 * 4) config */
6881 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT
);
6884 /* (2 * 6) config */
6885 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT
);
6888 /* (2 * 8) config */
6890 /* Setting (2 * 8) Min RP0 for any other combination */
6891 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT
);
6895 rp0
= (rp0
& FB_GFX_FREQ_FUSE_MASK
);
6900 static int cherryview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
6904 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_DUTYCYCLE_REG
);
6905 rpe
= (val
>> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
6910 static int cherryview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
6914 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
6915 rp1
= (val
& FB_GFX_FREQ_FUSE_MASK
);
6920 static u32
cherryview_rps_min_freq(struct drm_i915_private
*dev_priv
)
6924 val
= vlv_punit_read(dev_priv
, FB_GFX_FMIN_AT_VMIN_FUSE
);
6925 rpn
= ((val
>> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT
) &
6926 FB_GFX_FREQ_FUSE_MASK
);
6931 static int valleyview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
6935 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
6937 rp1
= (val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
6942 static int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
6946 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
6948 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
6950 rp0
= min_t(u32
, rp0
, 0xea);
6955 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
6959 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
6960 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
6961 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
6962 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
6967 static int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
6971 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
6973 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
6974 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
6975 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
6976 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
6977 * to make sure it matches what Punit accepts.
6979 return max_t(u32
, val
, 0xc0);
6982 /* Check that the pctx buffer wasn't move under us. */
6983 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
6985 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
6987 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
6988 dev_priv
->vlv_pctx
->stolen
->start
);
6992 /* Check that the pcbr address is not empty. */
6993 static void cherryview_check_pctx(struct drm_i915_private
*dev_priv
)
6995 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
6997 WARN_ON((pctx_addr
>> VLV_PCBR_ADDR_SHIFT
) == 0);
7000 static void cherryview_setup_pctx(struct drm_i915_private
*dev_priv
)
7002 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
7003 unsigned long pctx_paddr
, paddr
;
7005 int pctx_size
= 32*1024;
7007 pcbr
= I915_READ(VLV_PCBR
);
7008 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
7009 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7010 paddr
= (dev_priv
->mm
.stolen_base
+
7011 (ggtt
->stolen_size
- pctx_size
));
7013 pctx_paddr
= (paddr
& (~4095));
7014 I915_WRITE(VLV_PCBR
, pctx_paddr
);
7017 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
7020 static void valleyview_setup_pctx(struct drm_i915_private
*dev_priv
)
7022 struct drm_i915_gem_object
*pctx
;
7023 unsigned long pctx_paddr
;
7025 int pctx_size
= 24*1024;
7027 pcbr
= I915_READ(VLV_PCBR
);
7029 /* BIOS set it up already, grab the pre-alloc'd space */
7032 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
7033 pctx
= i915_gem_object_create_stolen_for_preallocated(dev_priv
,
7035 I915_GTT_OFFSET_NONE
,
7040 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7043 * From the Gunit register HAS:
7044 * The Gfx driver is expected to program this register and ensure
7045 * proper allocation within Gfx stolen memory. For example, this
7046 * register should be programmed such than the PCBR range does not
7047 * overlap with other ranges, such as the frame buffer, protected
7048 * memory, or any other relevant ranges.
7050 pctx
= i915_gem_object_create_stolen(dev_priv
, pctx_size
);
7052 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
7056 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
7057 I915_WRITE(VLV_PCBR
, pctx_paddr
);
7060 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
7061 dev_priv
->vlv_pctx
= pctx
;
7064 static void valleyview_cleanup_pctx(struct drm_i915_private
*dev_priv
)
7066 if (WARN_ON(!dev_priv
->vlv_pctx
))
7069 i915_gem_object_put(dev_priv
->vlv_pctx
);
7070 dev_priv
->vlv_pctx
= NULL
;
7073 static void vlv_init_gpll_ref_freq(struct drm_i915_private
*dev_priv
)
7075 dev_priv
->rps
.gpll_ref_freq
=
7076 vlv_get_cck_clock(dev_priv
, "GPLL ref",
7077 CCK_GPLL_CLOCK_CONTROL
,
7078 dev_priv
->czclk_freq
);
7080 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
7081 dev_priv
->rps
.gpll_ref_freq
);
7084 static void valleyview_init_gt_powersave(struct drm_i915_private
*dev_priv
)
7088 valleyview_setup_pctx(dev_priv
);
7090 vlv_init_gpll_ref_freq(dev_priv
);
7092 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
7093 switch ((val
>> 6) & 3) {
7096 dev_priv
->mem_freq
= 800;
7099 dev_priv
->mem_freq
= 1066;
7102 dev_priv
->mem_freq
= 1333;
7105 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
7107 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
7108 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
7109 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7110 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
7111 dev_priv
->rps
.max_freq
);
7113 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
7114 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7115 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
7116 dev_priv
->rps
.efficient_freq
);
7118 dev_priv
->rps
.rp1_freq
= valleyview_rps_guar_freq(dev_priv
);
7119 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7120 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
7121 dev_priv
->rps
.rp1_freq
);
7123 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
7124 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7125 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
7126 dev_priv
->rps
.min_freq
);
7129 static void cherryview_init_gt_powersave(struct drm_i915_private
*dev_priv
)
7133 cherryview_setup_pctx(dev_priv
);
7135 vlv_init_gpll_ref_freq(dev_priv
);
7137 mutex_lock(&dev_priv
->sb_lock
);
7138 val
= vlv_cck_read(dev_priv
, CCK_FUSE_REG
);
7139 mutex_unlock(&dev_priv
->sb_lock
);
7141 switch ((val
>> 2) & 0x7) {
7143 dev_priv
->mem_freq
= 2000;
7146 dev_priv
->mem_freq
= 1600;
7149 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
7151 dev_priv
->rps
.max_freq
= cherryview_rps_max_freq(dev_priv
);
7152 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
7153 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7154 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
7155 dev_priv
->rps
.max_freq
);
7157 dev_priv
->rps
.efficient_freq
= cherryview_rps_rpe_freq(dev_priv
);
7158 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7159 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
7160 dev_priv
->rps
.efficient_freq
);
7162 dev_priv
->rps
.rp1_freq
= cherryview_rps_guar_freq(dev_priv
);
7163 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7164 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
7165 dev_priv
->rps
.rp1_freq
);
7167 dev_priv
->rps
.min_freq
= cherryview_rps_min_freq(dev_priv
);
7168 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7169 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
7170 dev_priv
->rps
.min_freq
);
7172 WARN_ONCE((dev_priv
->rps
.max_freq
|
7173 dev_priv
->rps
.efficient_freq
|
7174 dev_priv
->rps
.rp1_freq
|
7175 dev_priv
->rps
.min_freq
) & 1,
7176 "Odd GPU freq values\n");
7179 static void valleyview_cleanup_gt_powersave(struct drm_i915_private
*dev_priv
)
7181 valleyview_cleanup_pctx(dev_priv
);
7184 static void cherryview_enable_rps(struct drm_i915_private
*dev_priv
)
7186 struct intel_engine_cs
*engine
;
7187 enum intel_engine_id id
;
7188 u32 gtfifodbg
, val
, rc6_mode
= 0, pcbr
;
7190 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7192 gtfifodbg
= I915_READ(GTFIFODBG
) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV
|
7193 GT_FIFO_FREE_ENTRIES_CHV
);
7195 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7197 I915_WRITE(GTFIFODBG
, gtfifodbg
);
7200 cherryview_check_pctx(dev_priv
);
7202 /* 1a & 1b: Get forcewake during program sequence. Although the driver
7203 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7204 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
7206 /* Disable RC states. */
7207 I915_WRITE(GEN6_RC_CONTROL
, 0);
7209 /* 2a: Program RC6 thresholds.*/
7210 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
7211 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
7212 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
7214 for_each_engine(engine
, dev_priv
, id
)
7215 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
7216 I915_WRITE(GEN6_RC_SLEEP
, 0);
7218 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
7219 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x186);
7221 /* allows RC6 residency counter to work */
7222 I915_WRITE(VLV_COUNTER_CONTROL
,
7223 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
7224 VLV_MEDIA_RC6_COUNT_EN
|
7225 VLV_RENDER_RC6_COUNT_EN
));
7227 /* For now we assume BIOS is allocating and populating the PCBR */
7228 pcbr
= I915_READ(VLV_PCBR
);
7231 if ((intel_enable_rc6() & INTEL_RC6_ENABLE
) &&
7232 (pcbr
>> VLV_PCBR_ADDR_SHIFT
))
7233 rc6_mode
= GEN7_RC_CTL_TO_MODE
;
7235 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
7237 /* 4 Program defaults and thresholds for RPS*/
7238 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
7239 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
7240 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
7241 I915_WRITE(GEN6_RP_UP_EI
, 66000);
7242 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
7244 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
7247 I915_WRITE(GEN6_RP_CONTROL
,
7248 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
7249 GEN6_RP_MEDIA_IS_GFX
|
7251 GEN6_RP_UP_BUSY_AVG
|
7252 GEN6_RP_DOWN_IDLE_AVG
);
7254 /* Setting Fixed Bias */
7255 val
= VLV_OVERRIDE_EN
|
7257 CHV_BIAS_CPU_50_SOC_50
;
7258 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
7260 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
7262 /* RPS code assumes GPLL is used */
7263 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
7265 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
7266 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
7268 reset_rps(dev_priv
, valleyview_set_rps
);
7270 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
7273 static void valleyview_enable_rps(struct drm_i915_private
*dev_priv
)
7275 struct intel_engine_cs
*engine
;
7276 enum intel_engine_id id
;
7277 u32 gtfifodbg
, val
, rc6_mode
= 0;
7279 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7281 valleyview_check_pctx(dev_priv
);
7283 gtfifodbg
= I915_READ(GTFIFODBG
);
7285 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7287 I915_WRITE(GTFIFODBG
, gtfifodbg
);
7290 /* If VLV, Forcewake all wells, else re-direct to regular path */
7291 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
7293 /* Disable RC states. */
7294 I915_WRITE(GEN6_RC_CONTROL
, 0);
7296 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
7297 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
7298 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
7299 I915_WRITE(GEN6_RP_UP_EI
, 66000);
7300 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
7302 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
7304 I915_WRITE(GEN6_RP_CONTROL
,
7305 GEN6_RP_MEDIA_TURBO
|
7306 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
7307 GEN6_RP_MEDIA_IS_GFX
|
7309 GEN6_RP_UP_BUSY_AVG
|
7310 GEN6_RP_DOWN_IDLE_CONT
);
7312 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
7313 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
7314 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
7316 for_each_engine(engine
, dev_priv
, id
)
7317 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
7319 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
7321 /* allows RC6 residency counter to work */
7322 I915_WRITE(VLV_COUNTER_CONTROL
,
7323 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
7324 VLV_MEDIA_RC0_COUNT_EN
|
7325 VLV_RENDER_RC0_COUNT_EN
|
7326 VLV_MEDIA_RC6_COUNT_EN
|
7327 VLV_RENDER_RC6_COUNT_EN
));
7329 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
7330 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
7332 intel_print_rc6_info(dev_priv
, rc6_mode
);
7334 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
7336 /* Setting Fixed Bias */
7337 val
= VLV_OVERRIDE_EN
|
7339 VLV_BIAS_CPU_125_SOC_875
;
7340 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
7342 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
7344 /* RPS code assumes GPLL is used */
7345 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
7347 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
7348 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
7350 reset_rps(dev_priv
, valleyview_set_rps
);
7352 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
7355 static unsigned long intel_pxfreq(u32 vidfreq
)
7358 int div
= (vidfreq
& 0x3f0000) >> 16;
7359 int post
= (vidfreq
& 0x3000) >> 12;
7360 int pre
= (vidfreq
& 0x7);
7365 freq
= ((div
* 133333) / ((1<<post
) * pre
));
7370 static const struct cparams
{
7376 { 1, 1333, 301, 28664 },
7377 { 1, 1066, 294, 24460 },
7378 { 1, 800, 294, 25192 },
7379 { 0, 1333, 276, 27605 },
7380 { 0, 1066, 276, 27605 },
7381 { 0, 800, 231, 23784 },
7384 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
7386 u64 total_count
, diff
, ret
;
7387 u32 count1
, count2
, count3
, m
= 0, c
= 0;
7388 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
7391 lockdep_assert_held(&mchdev_lock
);
7393 diff1
= now
- dev_priv
->ips
.last_time1
;
7395 /* Prevent division-by-zero if we are asking too fast.
7396 * Also, we don't get interesting results if we are polling
7397 * faster than once in 10ms, so just return the saved value
7401 return dev_priv
->ips
.chipset_power
;
7403 count1
= I915_READ(DMIEC
);
7404 count2
= I915_READ(DDREC
);
7405 count3
= I915_READ(CSIEC
);
7407 total_count
= count1
+ count2
+ count3
;
7409 /* FIXME: handle per-counter overflow */
7410 if (total_count
< dev_priv
->ips
.last_count1
) {
7411 diff
= ~0UL - dev_priv
->ips
.last_count1
;
7412 diff
+= total_count
;
7414 diff
= total_count
- dev_priv
->ips
.last_count1
;
7417 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
7418 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
7419 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
7426 diff
= div_u64(diff
, diff1
);
7427 ret
= ((m
* diff
) + c
);
7428 ret
= div_u64(ret
, 10);
7430 dev_priv
->ips
.last_count1
= total_count
;
7431 dev_priv
->ips
.last_time1
= now
;
7433 dev_priv
->ips
.chipset_power
= ret
;
7438 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
7442 if (INTEL_INFO(dev_priv
)->gen
!= 5)
7445 spin_lock_irq(&mchdev_lock
);
7447 val
= __i915_chipset_val(dev_priv
);
7449 spin_unlock_irq(&mchdev_lock
);
7454 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
7456 unsigned long m
, x
, b
;
7459 tsfs
= I915_READ(TSFS
);
7461 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
7462 x
= I915_READ8(TR1
);
7464 b
= tsfs
& TSFS_INTR_MASK
;
7466 return ((m
* x
) / 127) - b
;
7469 static int _pxvid_to_vd(u8 pxvid
)
7474 if (pxvid
>= 8 && pxvid
< 31)
7477 return (pxvid
+ 2) * 125;
7480 static u32
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
7482 const int vd
= _pxvid_to_vd(pxvid
);
7483 const int vm
= vd
- 1125;
7485 if (INTEL_INFO(dev_priv
)->is_mobile
)
7486 return vm
> 0 ? vm
: 0;
7491 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
7493 u64 now
, diff
, diffms
;
7496 lockdep_assert_held(&mchdev_lock
);
7498 now
= ktime_get_raw_ns();
7499 diffms
= now
- dev_priv
->ips
.last_time2
;
7500 do_div(diffms
, NSEC_PER_MSEC
);
7502 /* Don't divide by 0 */
7506 count
= I915_READ(GFXEC
);
7508 if (count
< dev_priv
->ips
.last_count2
) {
7509 diff
= ~0UL - dev_priv
->ips
.last_count2
;
7512 diff
= count
- dev_priv
->ips
.last_count2
;
7515 dev_priv
->ips
.last_count2
= count
;
7516 dev_priv
->ips
.last_time2
= now
;
7518 /* More magic constants... */
7520 diff
= div_u64(diff
, diffms
* 10);
7521 dev_priv
->ips
.gfx_power
= diff
;
7524 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
7526 if (INTEL_INFO(dev_priv
)->gen
!= 5)
7529 spin_lock_irq(&mchdev_lock
);
7531 __i915_update_gfx_val(dev_priv
);
7533 spin_unlock_irq(&mchdev_lock
);
7536 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
7538 unsigned long t
, corr
, state1
, corr2
, state2
;
7541 lockdep_assert_held(&mchdev_lock
);
7543 pxvid
= I915_READ(PXVFREQ(dev_priv
->rps
.cur_freq
));
7544 pxvid
= (pxvid
>> 24) & 0x7f;
7545 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
7549 t
= i915_mch_val(dev_priv
);
7551 /* Revel in the empirically derived constants */
7553 /* Correction factor in 1/100000 units */
7555 corr
= ((t
* 2349) + 135940);
7557 corr
= ((t
* 964) + 29317);
7559 corr
= ((t
* 301) + 1004);
7561 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
7563 corr2
= (corr
* dev_priv
->ips
.corr
);
7565 state2
= (corr2
* state1
) / 10000;
7566 state2
/= 100; /* convert to mW */
7568 __i915_update_gfx_val(dev_priv
);
7570 return dev_priv
->ips
.gfx_power
+ state2
;
7573 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
7577 if (INTEL_INFO(dev_priv
)->gen
!= 5)
7580 spin_lock_irq(&mchdev_lock
);
7582 val
= __i915_gfx_val(dev_priv
);
7584 spin_unlock_irq(&mchdev_lock
);
7590 * i915_read_mch_val - return value for IPS use
7592 * Calculate and return a value for the IPS driver to use when deciding whether
7593 * we have thermal and power headroom to increase CPU or GPU power budget.
7595 unsigned long i915_read_mch_val(void)
7597 struct drm_i915_private
*dev_priv
;
7598 unsigned long chipset_val
, graphics_val
, ret
= 0;
7600 spin_lock_irq(&mchdev_lock
);
7603 dev_priv
= i915_mch_dev
;
7605 chipset_val
= __i915_chipset_val(dev_priv
);
7606 graphics_val
= __i915_gfx_val(dev_priv
);
7608 ret
= chipset_val
+ graphics_val
;
7611 spin_unlock_irq(&mchdev_lock
);
7615 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
7618 * i915_gpu_raise - raise GPU frequency limit
7620 * Raise the limit; IPS indicates we have thermal headroom.
7622 bool i915_gpu_raise(void)
7624 struct drm_i915_private
*dev_priv
;
7627 spin_lock_irq(&mchdev_lock
);
7628 if (!i915_mch_dev
) {
7632 dev_priv
= i915_mch_dev
;
7634 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
7635 dev_priv
->ips
.max_delay
--;
7638 spin_unlock_irq(&mchdev_lock
);
7642 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
7645 * i915_gpu_lower - lower GPU frequency limit
7647 * IPS indicates we're close to a thermal limit, so throttle back the GPU
7648 * frequency maximum.
7650 bool i915_gpu_lower(void)
7652 struct drm_i915_private
*dev_priv
;
7655 spin_lock_irq(&mchdev_lock
);
7656 if (!i915_mch_dev
) {
7660 dev_priv
= i915_mch_dev
;
7662 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
7663 dev_priv
->ips
.max_delay
++;
7666 spin_unlock_irq(&mchdev_lock
);
7670 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
7673 * i915_gpu_busy - indicate GPU business to IPS
7675 * Tell the IPS driver whether or not the GPU is busy.
7677 bool i915_gpu_busy(void)
7681 spin_lock_irq(&mchdev_lock
);
7683 ret
= i915_mch_dev
->gt
.awake
;
7684 spin_unlock_irq(&mchdev_lock
);
7688 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
7691 * i915_gpu_turbo_disable - disable graphics turbo
7693 * Disable graphics turbo by resetting the max frequency and setting the
7694 * current frequency to the default.
7696 bool i915_gpu_turbo_disable(void)
7698 struct drm_i915_private
*dev_priv
;
7701 spin_lock_irq(&mchdev_lock
);
7702 if (!i915_mch_dev
) {
7706 dev_priv
= i915_mch_dev
;
7708 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
7710 if (!ironlake_set_drps(dev_priv
, dev_priv
->ips
.fstart
))
7714 spin_unlock_irq(&mchdev_lock
);
7718 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
7721 * Tells the intel_ips driver that the i915 driver is now loaded, if
7722 * IPS got loaded first.
7724 * This awkward dance is so that neither module has to depend on the
7725 * other in order for IPS to do the appropriate communication of
7726 * GPU turbo limits to i915.
7729 ips_ping_for_i915_load(void)
7733 link
= symbol_get(ips_link_to_i915_driver
);
7736 symbol_put(ips_link_to_i915_driver
);
7740 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
7742 /* We only register the i915 ips part with intel-ips once everything is
7743 * set up, to avoid intel-ips sneaking in and reading bogus values. */
7744 spin_lock_irq(&mchdev_lock
);
7745 i915_mch_dev
= dev_priv
;
7746 spin_unlock_irq(&mchdev_lock
);
7748 ips_ping_for_i915_load();
7751 void intel_gpu_ips_teardown(void)
7753 spin_lock_irq(&mchdev_lock
);
7754 i915_mch_dev
= NULL
;
7755 spin_unlock_irq(&mchdev_lock
);
7758 static void intel_init_emon(struct drm_i915_private
*dev_priv
)
7764 /* Disable to program */
7768 /* Program energy weights for various events */
7769 I915_WRITE(SDEW
, 0x15040d00);
7770 I915_WRITE(CSIEW0
, 0x007f0000);
7771 I915_WRITE(CSIEW1
, 0x1e220004);
7772 I915_WRITE(CSIEW2
, 0x04000004);
7774 for (i
= 0; i
< 5; i
++)
7775 I915_WRITE(PEW(i
), 0);
7776 for (i
= 0; i
< 3; i
++)
7777 I915_WRITE(DEW(i
), 0);
7779 /* Program P-state weights to account for frequency power adjustment */
7780 for (i
= 0; i
< 16; i
++) {
7781 u32 pxvidfreq
= I915_READ(PXVFREQ(i
));
7782 unsigned long freq
= intel_pxfreq(pxvidfreq
);
7783 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
7788 val
*= (freq
/ 1000);
7790 val
/= (127*127*900);
7792 DRM_ERROR("bad pxval: %ld\n", val
);
7795 /* Render standby states get 0 weight */
7799 for (i
= 0; i
< 4; i
++) {
7800 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
7801 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
7802 I915_WRITE(PXW(i
), val
);
7805 /* Adjust magic regs to magic values (more experimental results) */
7806 I915_WRITE(OGW0
, 0);
7807 I915_WRITE(OGW1
, 0);
7808 I915_WRITE(EG0
, 0x00007f00);
7809 I915_WRITE(EG1
, 0x0000000e);
7810 I915_WRITE(EG2
, 0x000e0000);
7811 I915_WRITE(EG3
, 0x68000300);
7812 I915_WRITE(EG4
, 0x42000000);
7813 I915_WRITE(EG5
, 0x00140031);
7817 for (i
= 0; i
< 8; i
++)
7818 I915_WRITE(PXWL(i
), 0);
7820 /* Enable PMON + select events */
7821 I915_WRITE(ECR
, 0x80000019);
7823 lcfuse
= I915_READ(LCFUSE02
);
7825 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
7828 void intel_init_gt_powersave(struct drm_i915_private
*dev_priv
)
7831 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7834 if (!i915_modparams
.enable_rc6
) {
7835 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7836 intel_runtime_pm_get(dev_priv
);
7839 mutex_lock(&dev_priv
->drm
.struct_mutex
);
7840 mutex_lock(&dev_priv
->rps
.hw_lock
);
7842 /* Initialize RPS limits (for userspace) */
7843 if (IS_CHERRYVIEW(dev_priv
))
7844 cherryview_init_gt_powersave(dev_priv
);
7845 else if (IS_VALLEYVIEW(dev_priv
))
7846 valleyview_init_gt_powersave(dev_priv
);
7847 else if (INTEL_GEN(dev_priv
) >= 6)
7848 gen6_init_rps_frequencies(dev_priv
);
7850 /* Derive initial user preferences/limits from the hardware limits */
7851 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
7852 dev_priv
->rps
.cur_freq
= dev_priv
->rps
.idle_freq
;
7854 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
7855 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
7857 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
7858 dev_priv
->rps
.min_freq_softlimit
=
7860 dev_priv
->rps
.efficient_freq
,
7861 intel_freq_opcode(dev_priv
, 450));
7863 /* After setting max-softlimit, find the overclock max freq */
7864 if (IS_GEN6(dev_priv
) ||
7865 IS_IVYBRIDGE(dev_priv
) || IS_HASWELL(dev_priv
)) {
7868 sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, ¶ms
);
7869 if (params
& BIT(31)) { /* OC supported */
7870 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
7871 (dev_priv
->rps
.max_freq
& 0xff) * 50,
7872 (params
& 0xff) * 50);
7873 dev_priv
->rps
.max_freq
= params
& 0xff;
7877 /* Finally allow us to boost to max by default */
7878 dev_priv
->rps
.boost_freq
= dev_priv
->rps
.max_freq
;
7880 mutex_unlock(&dev_priv
->rps
.hw_lock
);
7881 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
7883 intel_autoenable_gt_powersave(dev_priv
);
7886 void intel_cleanup_gt_powersave(struct drm_i915_private
*dev_priv
)
7888 if (IS_VALLEYVIEW(dev_priv
))
7889 valleyview_cleanup_gt_powersave(dev_priv
);
7891 if (!i915_modparams
.enable_rc6
)
7892 intel_runtime_pm_put(dev_priv
);
7896 * intel_suspend_gt_powersave - suspend PM work and helper threads
7897 * @dev_priv: i915 device
7899 * We don't want to disable RC6 or other features here, we just want
7900 * to make sure any work we've queued has finished and won't bother
7901 * us while we're suspended.
7903 void intel_suspend_gt_powersave(struct drm_i915_private
*dev_priv
)
7905 if (INTEL_GEN(dev_priv
) < 6)
7908 if (cancel_delayed_work_sync(&dev_priv
->rps
.autoenable_work
))
7909 intel_runtime_pm_put(dev_priv
);
7911 /* gen6_rps_idle() will be called later to disable interrupts */
7914 void intel_sanitize_gt_powersave(struct drm_i915_private
*dev_priv
)
7916 dev_priv
->rps
.enabled
= true; /* force disabling */
7917 intel_disable_gt_powersave(dev_priv
);
7919 gen6_reset_rps_interrupts(dev_priv
);
7922 void intel_disable_gt_powersave(struct drm_i915_private
*dev_priv
)
7924 if (!READ_ONCE(dev_priv
->rps
.enabled
))
7927 mutex_lock(&dev_priv
->rps
.hw_lock
);
7929 if (INTEL_GEN(dev_priv
) >= 9) {
7930 gen9_disable_rc6(dev_priv
);
7931 gen9_disable_rps(dev_priv
);
7932 } else if (IS_CHERRYVIEW(dev_priv
)) {
7933 cherryview_disable_rps(dev_priv
);
7934 } else if (IS_VALLEYVIEW(dev_priv
)) {
7935 valleyview_disable_rps(dev_priv
);
7936 } else if (INTEL_GEN(dev_priv
) >= 6) {
7937 gen6_disable_rps(dev_priv
);
7938 } else if (IS_IRONLAKE_M(dev_priv
)) {
7939 ironlake_disable_drps(dev_priv
);
7942 dev_priv
->rps
.enabled
= false;
7943 mutex_unlock(&dev_priv
->rps
.hw_lock
);
7946 void intel_enable_gt_powersave(struct drm_i915_private
*dev_priv
)
7948 /* We shouldn't be disabling as we submit, so this should be less
7949 * racy than it appears!
7951 if (READ_ONCE(dev_priv
->rps
.enabled
))
7954 /* Powersaving is controlled by the host when inside a VM */
7955 if (intel_vgpu_active(dev_priv
))
7958 mutex_lock(&dev_priv
->rps
.hw_lock
);
7960 if (IS_CHERRYVIEW(dev_priv
)) {
7961 cherryview_enable_rps(dev_priv
);
7962 } else if (IS_VALLEYVIEW(dev_priv
)) {
7963 valleyview_enable_rps(dev_priv
);
7964 } else if (INTEL_GEN(dev_priv
) >= 9) {
7965 gen9_enable_rc6(dev_priv
);
7966 gen9_enable_rps(dev_priv
);
7967 if (IS_GEN9_BC(dev_priv
) || IS_CANNONLAKE(dev_priv
))
7968 gen6_update_ring_freq(dev_priv
);
7969 } else if (IS_BROADWELL(dev_priv
)) {
7970 gen8_enable_rps(dev_priv
);
7971 gen6_update_ring_freq(dev_priv
);
7972 } else if (INTEL_GEN(dev_priv
) >= 6) {
7973 gen6_enable_rps(dev_priv
);
7974 gen6_update_ring_freq(dev_priv
);
7975 } else if (IS_IRONLAKE_M(dev_priv
)) {
7976 ironlake_enable_drps(dev_priv
);
7977 intel_init_emon(dev_priv
);
7980 WARN_ON(dev_priv
->rps
.max_freq
< dev_priv
->rps
.min_freq
);
7981 WARN_ON(dev_priv
->rps
.idle_freq
> dev_priv
->rps
.max_freq
);
7983 WARN_ON(dev_priv
->rps
.efficient_freq
< dev_priv
->rps
.min_freq
);
7984 WARN_ON(dev_priv
->rps
.efficient_freq
> dev_priv
->rps
.max_freq
);
7986 dev_priv
->rps
.enabled
= true;
7987 mutex_unlock(&dev_priv
->rps
.hw_lock
);
7990 static void __intel_autoenable_gt_powersave(struct work_struct
*work
)
7992 struct drm_i915_private
*dev_priv
=
7993 container_of(work
, typeof(*dev_priv
), rps
.autoenable_work
.work
);
7994 struct intel_engine_cs
*rcs
;
7995 struct drm_i915_gem_request
*req
;
7997 if (READ_ONCE(dev_priv
->rps
.enabled
))
8000 rcs
= dev_priv
->engine
[RCS
];
8001 if (rcs
->last_retired_context
)
8004 if (!rcs
->init_context
)
8007 mutex_lock(&dev_priv
->drm
.struct_mutex
);
8009 req
= i915_gem_request_alloc(rcs
, dev_priv
->kernel_context
);
8013 if (!i915_modparams
.enable_execlists
&& i915_switch_context(req
) == 0)
8014 rcs
->init_context(req
);
8016 /* Mark the device busy, calling intel_enable_gt_powersave() */
8017 i915_add_request(req
);
8020 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
8022 intel_runtime_pm_put(dev_priv
);
8025 void intel_autoenable_gt_powersave(struct drm_i915_private
*dev_priv
)
8027 if (READ_ONCE(dev_priv
->rps
.enabled
))
8030 if (IS_IRONLAKE_M(dev_priv
)) {
8031 ironlake_enable_drps(dev_priv
);
8032 intel_init_emon(dev_priv
);
8033 } else if (INTEL_INFO(dev_priv
)->gen
>= 6) {
8035 * PCU communication is slow and this doesn't need to be
8036 * done at any specific time, so do this out of our fast path
8037 * to make resume and init faster.
8039 * We depend on the HW RC6 power context save/restore
8040 * mechanism when entering D3 through runtime PM suspend. So
8041 * disable RPM until RPS/RC6 is properly setup. We can only
8042 * get here via the driver load/system resume/runtime resume
8043 * paths, so the _noresume version is enough (and in case of
8044 * runtime resume it's necessary).
8046 if (queue_delayed_work(dev_priv
->wq
,
8047 &dev_priv
->rps
.autoenable_work
,
8048 round_jiffies_up_relative(HZ
)))
8049 intel_runtime_pm_get_noresume(dev_priv
);
8053 static void ibx_init_clock_gating(struct drm_i915_private
*dev_priv
)
8056 * On Ibex Peak and Cougar Point, we need to disable clock
8057 * gating for the panel power sequencer or it will fail to
8058 * start up when no ports are active.
8060 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
8063 static void g4x_disable_trickle_feed(struct drm_i915_private
*dev_priv
)
8067 for_each_pipe(dev_priv
, pipe
) {
8068 I915_WRITE(DSPCNTR(pipe
),
8069 I915_READ(DSPCNTR(pipe
)) |
8070 DISPPLANE_TRICKLE_FEED_DISABLE
);
8072 I915_WRITE(DSPSURF(pipe
), I915_READ(DSPSURF(pipe
)));
8073 POSTING_READ(DSPSURF(pipe
));
8077 static void ilk_init_lp_watermarks(struct drm_i915_private
*dev_priv
)
8079 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
8080 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
8081 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
8084 * Don't touch WM1S_LP_EN here.
8085 * Doing so could cause underruns.
8089 static void ilk_init_clock_gating(struct drm_i915_private
*dev_priv
)
8091 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
8095 * WaFbcDisableDpfcClockGating:ilk
8097 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
8098 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
8099 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
8101 I915_WRITE(PCH_3DCGDIS0
,
8102 MARIUNIT_CLOCK_GATE_DISABLE
|
8103 SVSMUNIT_CLOCK_GATE_DISABLE
);
8104 I915_WRITE(PCH_3DCGDIS1
,
8105 VFMUNIT_CLOCK_GATE_DISABLE
);
8108 * According to the spec the following bits should be set in
8109 * order to enable memory self-refresh
8110 * The bit 22/21 of 0x42004
8111 * The bit 5 of 0x42020
8112 * The bit 15 of 0x45000
8114 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
8115 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
8116 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
8117 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
8118 I915_WRITE(DISP_ARB_CTL
,
8119 (I915_READ(DISP_ARB_CTL
) |
8122 ilk_init_lp_watermarks(dev_priv
);
8125 * Based on the document from hardware guys the following bits
8126 * should be set unconditionally in order to enable FBC.
8127 * The bit 22 of 0x42000
8128 * The bit 22 of 0x42004
8129 * The bit 7,8,9 of 0x42020.
8131 if (IS_IRONLAKE_M(dev_priv
)) {
8132 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
8133 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
8134 I915_READ(ILK_DISPLAY_CHICKEN1
) |
8136 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
8137 I915_READ(ILK_DISPLAY_CHICKEN2
) |
8141 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
8143 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
8144 I915_READ(ILK_DISPLAY_CHICKEN2
) |
8145 ILK_ELPIN_409_SELECT
);
8146 I915_WRITE(_3D_CHICKEN2
,
8147 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
8148 _3D_CHICKEN2_WM_READ_PIPELINED
);
8150 /* WaDisableRenderCachePipelinedFlush:ilk */
8151 I915_WRITE(CACHE_MODE_0
,
8152 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
8154 /* WaDisable_RenderCache_OperationalFlush:ilk */
8155 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
8157 g4x_disable_trickle_feed(dev_priv
);
8159 ibx_init_clock_gating(dev_priv
);
8162 static void cpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
8168 * On Ibex Peak and Cougar Point, we need to disable clock
8169 * gating for the panel power sequencer or it will fail to
8170 * start up when no ports are active.
8172 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
8173 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
8174 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
8175 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
8176 DPLS_EDP_PPS_FIX_DIS
);
8177 /* The below fixes the weird display corruption, a few pixels shifted
8178 * downward, on (only) LVDS of some HP laptops with IVY.
8180 for_each_pipe(dev_priv
, pipe
) {
8181 val
= I915_READ(TRANS_CHICKEN2(pipe
));
8182 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
8183 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
8184 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
8185 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
8186 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
8187 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
8188 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
8189 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
8191 /* WADP0ClockGatingDisable */
8192 for_each_pipe(dev_priv
, pipe
) {
8193 I915_WRITE(TRANS_CHICKEN1(pipe
),
8194 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
8198 static void gen6_check_mch_setup(struct drm_i915_private
*dev_priv
)
8202 tmp
= I915_READ(MCH_SSKPD
);
8203 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
8204 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
8208 static void gen6_init_clock_gating(struct drm_i915_private
*dev_priv
)
8210 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
8212 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
8214 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
8215 I915_READ(ILK_DISPLAY_CHICKEN2
) |
8216 ILK_ELPIN_409_SELECT
);
8218 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
8219 I915_WRITE(_3D_CHICKEN
,
8220 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
8222 /* WaDisable_RenderCache_OperationalFlush:snb */
8223 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
8226 * BSpec recoomends 8x4 when MSAA is used,
8227 * however in practice 16x4 seems fastest.
8229 * Note that PS/WM thread counts depend on the WIZ hashing
8230 * disable bit, which we don't touch here, but it's good
8231 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8233 I915_WRITE(GEN6_GT_MODE
,
8234 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
8236 ilk_init_lp_watermarks(dev_priv
);
8238 I915_WRITE(CACHE_MODE_0
,
8239 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
8241 I915_WRITE(GEN6_UCGCTL1
,
8242 I915_READ(GEN6_UCGCTL1
) |
8243 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
8244 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
8246 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8247 * gating disable must be set. Failure to set it results in
8248 * flickering pixels due to Z write ordering failures after
8249 * some amount of runtime in the Mesa "fire" demo, and Unigine
8250 * Sanctuary and Tropics, and apparently anything else with
8251 * alpha test or pixel discard.
8253 * According to the spec, bit 11 (RCCUNIT) must also be set,
8254 * but we didn't debug actual testcases to find it out.
8256 * WaDisableRCCUnitClockGating:snb
8257 * WaDisableRCPBUnitClockGating:snb
8259 I915_WRITE(GEN6_UCGCTL2
,
8260 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
8261 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
8263 /* WaStripsFansDisableFastClipPerformanceFix:snb */
8264 I915_WRITE(_3D_CHICKEN3
,
8265 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
8269 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
8270 * 3DSTATE_SF number of SF output attributes is more than 16."
8272 I915_WRITE(_3D_CHICKEN3
,
8273 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
8276 * According to the spec the following bits should be
8277 * set in order to enable memory self-refresh and fbc:
8278 * The bit21 and bit22 of 0x42000
8279 * The bit21 and bit22 of 0x42004
8280 * The bit5 and bit7 of 0x42020
8281 * The bit14 of 0x70180
8282 * The bit14 of 0x71180
8284 * WaFbcAsynchFlipDisableFbcQueue:snb
8286 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
8287 I915_READ(ILK_DISPLAY_CHICKEN1
) |
8288 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
8289 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
8290 I915_READ(ILK_DISPLAY_CHICKEN2
) |
8291 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
8292 I915_WRITE(ILK_DSPCLK_GATE_D
,
8293 I915_READ(ILK_DSPCLK_GATE_D
) |
8294 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
8295 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
8297 g4x_disable_trickle_feed(dev_priv
);
8299 cpt_init_clock_gating(dev_priv
);
8301 gen6_check_mch_setup(dev_priv
);
8304 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
8306 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
8309 * WaVSThreadDispatchOverride:ivb,vlv
8311 * This actually overrides the dispatch
8312 * mode for all thread types.
8314 reg
&= ~GEN7_FF_SCHED_MASK
;
8315 reg
|= GEN7_FF_TS_SCHED_HW
;
8316 reg
|= GEN7_FF_VS_SCHED_HW
;
8317 reg
|= GEN7_FF_DS_SCHED_HW
;
8319 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
8322 static void lpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
8325 * TODO: this bit should only be enabled when really needed, then
8326 * disabled when not needed anymore in order to save power.
8328 if (HAS_PCH_LPT_LP(dev_priv
))
8329 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
8330 I915_READ(SOUTH_DSPCLK_GATE_D
) |
8331 PCH_LP_PARTITION_LEVEL_DISABLE
);
8333 /* WADPOClockGatingDisable:hsw */
8334 I915_WRITE(TRANS_CHICKEN1(PIPE_A
),
8335 I915_READ(TRANS_CHICKEN1(PIPE_A
)) |
8336 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
8339 static void lpt_suspend_hw(struct drm_i915_private
*dev_priv
)
8341 if (HAS_PCH_LPT_LP(dev_priv
)) {
8342 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
8344 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
8345 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
8349 static void gen8_set_l3sqc_credits(struct drm_i915_private
*dev_priv
,
8350 int general_prio_credits
,
8351 int high_prio_credits
)
8355 /* WaTempDisableDOPClkGating:bdw */
8356 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
8357 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
8359 I915_WRITE(GEN8_L3SQCREG1
,
8360 L3_GENERAL_PRIO_CREDITS(general_prio_credits
) |
8361 L3_HIGH_PRIO_CREDITS(high_prio_credits
));
8364 * Wait at least 100 clocks before re-enabling clock gating.
8365 * See the definition of L3SQCREG1 in BSpec.
8367 POSTING_READ(GEN8_L3SQCREG1
);
8369 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
8372 static void cnp_init_clock_gating(struct drm_i915_private
*dev_priv
)
8374 if (!HAS_PCH_CNP(dev_priv
))
8378 I915_WRITE(SOUTH_DSPCLK_GATE_D
, I915_READ(SOUTH_DSPCLK_GATE_D
) |
8379 CNP_PWM_CGE_GATING_DISABLE
);
8382 static void cnl_init_clock_gating(struct drm_i915_private
*dev_priv
)
8385 cnp_init_clock_gating(dev_priv
);
8387 /* This is not an Wa. Enable for better image quality */
8388 I915_WRITE(_3D_CHICKEN3
,
8389 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE
));
8391 /* WaEnableChickenDCPR:cnl */
8392 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
8393 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
8395 /* WaFbcWakeMemOn:cnl */
8396 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
8397 DISP_FBC_MEMORY_WAKE
);
8399 /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
8400 if (IS_CNL_REVID(dev_priv
, CNL_REVID_A0
, CNL_REVID_B0
))
8401 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE
,
8402 I915_READ(SLICE_UNIT_LEVEL_CLKGATE
) |
8403 SARBUNIT_CLKGATE_DIS
);
8405 /* Display WA #1133: WaFbcSkipSegments:cnl */
8406 val
= I915_READ(ILK_DPFC_CHICKEN
);
8407 val
&= ~GLK_SKIP_SEG_COUNT_MASK
;
8408 val
|= GLK_SKIP_SEG_EN
| GLK_SKIP_SEG_COUNT(1);
8409 I915_WRITE(ILK_DPFC_CHICKEN
, val
);
8412 static void cfl_init_clock_gating(struct drm_i915_private
*dev_priv
)
8414 cnp_init_clock_gating(dev_priv
);
8415 gen9_init_clock_gating(dev_priv
);
8417 /* WaFbcNukeOnHostModify:cfl */
8418 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
8419 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
8422 static void kbl_init_clock_gating(struct drm_i915_private
*dev_priv
)
8424 gen9_init_clock_gating(dev_priv
);
8426 /* WaDisableSDEUnitClockGating:kbl */
8427 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
8428 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
8429 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
8431 /* WaDisableGamClockGating:kbl */
8432 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
8433 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
8434 GEN6_GAMUNIT_CLOCK_GATE_DISABLE
);
8436 /* WaFbcNukeOnHostModify:kbl */
8437 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
8438 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
8441 static void skl_init_clock_gating(struct drm_i915_private
*dev_priv
)
8443 gen9_init_clock_gating(dev_priv
);
8445 /* WAC6entrylatency:skl */
8446 I915_WRITE(FBC_LLC_READ_CTRL
, I915_READ(FBC_LLC_READ_CTRL
) |
8447 FBC_LLC_FULLY_OPEN
);
8449 /* WaFbcNukeOnHostModify:skl */
8450 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
8451 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
8454 static void bdw_init_clock_gating(struct drm_i915_private
*dev_priv
)
8458 ilk_init_lp_watermarks(dev_priv
);
8460 /* WaSwitchSolVfFArbitrationPriority:bdw */
8461 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
8463 /* WaPsrDPAMaskVBlankInSRD:bdw */
8464 I915_WRITE(CHICKEN_PAR1_1
,
8465 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
8467 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
8468 for_each_pipe(dev_priv
, pipe
) {
8469 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
8470 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
8471 BDW_DPRS_MASK_VBLANK_SRD
);
8474 /* WaVSRefCountFullforceMissDisable:bdw */
8475 /* WaDSRefCountFullforceMissDisable:bdw */
8476 I915_WRITE(GEN7_FF_THREAD_MODE
,
8477 I915_READ(GEN7_FF_THREAD_MODE
) &
8478 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
8480 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
8481 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
8483 /* WaDisableSDEUnitClockGating:bdw */
8484 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
8485 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
8487 /* WaProgramL3SqcReg1Default:bdw */
8488 gen8_set_l3sqc_credits(dev_priv
, 30, 2);
8491 * WaGttCachingOffByDefault:bdw
8492 * GTT cache may not work with big pages, so if those
8493 * are ever enabled GTT cache may need to be disabled.
8495 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
8497 /* WaKVMNotificationOnConfigChange:bdw */
8498 I915_WRITE(CHICKEN_PAR2_1
, I915_READ(CHICKEN_PAR2_1
)
8499 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT
);
8501 lpt_init_clock_gating(dev_priv
);
8503 /* WaDisableDopClockGating:bdw
8505 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
8508 I915_WRITE(GEN6_UCGCTL1
,
8509 I915_READ(GEN6_UCGCTL1
) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE
);
8512 static void hsw_init_clock_gating(struct drm_i915_private
*dev_priv
)
8514 ilk_init_lp_watermarks(dev_priv
);
8516 /* L3 caching of data atomics doesn't work -- disable it. */
8517 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
8518 I915_WRITE(HSW_ROW_CHICKEN3
,
8519 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
8521 /* This is required by WaCatErrorRejectionIssue:hsw */
8522 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
8523 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
8524 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
8526 /* WaVSRefCountFullforceMissDisable:hsw */
8527 I915_WRITE(GEN7_FF_THREAD_MODE
,
8528 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
8530 /* WaDisable_RenderCache_OperationalFlush:hsw */
8531 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
8533 /* enable HiZ Raw Stall Optimization */
8534 I915_WRITE(CACHE_MODE_0_GEN7
,
8535 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
8537 /* WaDisable4x2SubspanOptimization:hsw */
8538 I915_WRITE(CACHE_MODE_1
,
8539 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
8542 * BSpec recommends 8x4 when MSAA is used,
8543 * however in practice 16x4 seems fastest.
8545 * Note that PS/WM thread counts depend on the WIZ hashing
8546 * disable bit, which we don't touch here, but it's good
8547 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8549 I915_WRITE(GEN7_GT_MODE
,
8550 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
8552 /* WaSampleCChickenBitEnable:hsw */
8553 I915_WRITE(HALF_SLICE_CHICKEN3
,
8554 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE
));
8556 /* WaSwitchSolVfFArbitrationPriority:hsw */
8557 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
8559 /* WaRsPkgCStateDisplayPMReq:hsw */
8560 I915_WRITE(CHICKEN_PAR1_1
,
8561 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
8563 lpt_init_clock_gating(dev_priv
);
8566 static void ivb_init_clock_gating(struct drm_i915_private
*dev_priv
)
8570 ilk_init_lp_watermarks(dev_priv
);
8572 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
8574 /* WaDisableEarlyCull:ivb */
8575 I915_WRITE(_3D_CHICKEN3
,
8576 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
8578 /* WaDisableBackToBackFlipFix:ivb */
8579 I915_WRITE(IVB_CHICKEN3
,
8580 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
8581 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
8583 /* WaDisablePSDDualDispatchEnable:ivb */
8584 if (IS_IVB_GT1(dev_priv
))
8585 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
8586 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
8588 /* WaDisable_RenderCache_OperationalFlush:ivb */
8589 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
8591 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
8592 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
8593 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
8595 /* WaApplyL3ControlAndL3ChickenMode:ivb */
8596 I915_WRITE(GEN7_L3CNTLREG1
,
8597 GEN7_WA_FOR_GEN7_L3_CONTROL
);
8598 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
8599 GEN7_WA_L3_CHICKEN_MODE
);
8600 if (IS_IVB_GT1(dev_priv
))
8601 I915_WRITE(GEN7_ROW_CHICKEN2
,
8602 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
8604 /* must write both registers */
8605 I915_WRITE(GEN7_ROW_CHICKEN2
,
8606 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
8607 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
8608 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
8611 /* WaForceL3Serialization:ivb */
8612 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
8613 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
8616 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8617 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
8619 I915_WRITE(GEN6_UCGCTL2
,
8620 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
8622 /* This is required by WaCatErrorRejectionIssue:ivb */
8623 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
8624 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
8625 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
8627 g4x_disable_trickle_feed(dev_priv
);
8629 gen7_setup_fixed_func_scheduler(dev_priv
);
8631 if (0) { /* causes HiZ corruption on ivb:gt1 */
8632 /* enable HiZ Raw Stall Optimization */
8633 I915_WRITE(CACHE_MODE_0_GEN7
,
8634 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
8637 /* WaDisable4x2SubspanOptimization:ivb */
8638 I915_WRITE(CACHE_MODE_1
,
8639 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
8642 * BSpec recommends 8x4 when MSAA is used,
8643 * however in practice 16x4 seems fastest.
8645 * Note that PS/WM thread counts depend on the WIZ hashing
8646 * disable bit, which we don't touch here, but it's good
8647 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8649 I915_WRITE(GEN7_GT_MODE
,
8650 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
8652 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
8653 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
8654 snpcr
|= GEN6_MBC_SNPCR_MED
;
8655 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
8657 if (!HAS_PCH_NOP(dev_priv
))
8658 cpt_init_clock_gating(dev_priv
);
8660 gen6_check_mch_setup(dev_priv
);
8663 static void vlv_init_clock_gating(struct drm_i915_private
*dev_priv
)
8665 /* WaDisableEarlyCull:vlv */
8666 I915_WRITE(_3D_CHICKEN3
,
8667 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
8669 /* WaDisableBackToBackFlipFix:vlv */
8670 I915_WRITE(IVB_CHICKEN3
,
8671 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
8672 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
8674 /* WaPsdDispatchEnable:vlv */
8675 /* WaDisablePSDDualDispatchEnable:vlv */
8676 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
8677 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
8678 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
8680 /* WaDisable_RenderCache_OperationalFlush:vlv */
8681 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
8683 /* WaForceL3Serialization:vlv */
8684 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
8685 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
8687 /* WaDisableDopClockGating:vlv */
8688 I915_WRITE(GEN7_ROW_CHICKEN2
,
8689 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
8691 /* This is required by WaCatErrorRejectionIssue:vlv */
8692 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
8693 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
8694 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
8696 gen7_setup_fixed_func_scheduler(dev_priv
);
8699 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8700 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
8702 I915_WRITE(GEN6_UCGCTL2
,
8703 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
8705 /* WaDisableL3Bank2xClockGate:vlv
8706 * Disabling L3 clock gating- MMIO 940c[25] = 1
8707 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
8708 I915_WRITE(GEN7_UCGCTL4
,
8709 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
8712 * BSpec says this must be set, even though
8713 * WaDisable4x2SubspanOptimization isn't listed for VLV.
8715 I915_WRITE(CACHE_MODE_1
,
8716 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
8719 * BSpec recommends 8x4 when MSAA is used,
8720 * however in practice 16x4 seems fastest.
8722 * Note that PS/WM thread counts depend on the WIZ hashing
8723 * disable bit, which we don't touch here, but it's good
8724 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8726 I915_WRITE(GEN7_GT_MODE
,
8727 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
8730 * WaIncreaseL3CreditsForVLVB0:vlv
8731 * This is the hardware default actually.
8733 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
8736 * WaDisableVLVClockGating_VBIIssue:vlv
8737 * Disable clock gating on th GCFG unit to prevent a delay
8738 * in the reporting of vblank events.
8740 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
8743 static void chv_init_clock_gating(struct drm_i915_private
*dev_priv
)
8745 /* WaVSRefCountFullforceMissDisable:chv */
8746 /* WaDSRefCountFullforceMissDisable:chv */
8747 I915_WRITE(GEN7_FF_THREAD_MODE
,
8748 I915_READ(GEN7_FF_THREAD_MODE
) &
8749 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
8751 /* WaDisableSemaphoreAndSyncFlipWait:chv */
8752 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
8753 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
8755 /* WaDisableCSUnitClockGating:chv */
8756 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
8757 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
8759 /* WaDisableSDEUnitClockGating:chv */
8760 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
8761 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
8764 * WaProgramL3SqcReg1Default:chv
8765 * See gfxspecs/Related Documents/Performance Guide/
8766 * LSQC Setting Recommendations.
8768 gen8_set_l3sqc_credits(dev_priv
, 38, 2);
8771 * GTT cache may not work with big pages, so if those
8772 * are ever enabled GTT cache may need to be disabled.
8774 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
8777 static void g4x_init_clock_gating(struct drm_i915_private
*dev_priv
)
8779 uint32_t dspclk_gate
;
8781 I915_WRITE(RENCLK_GATE_D1
, 0);
8782 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
8783 GS_UNIT_CLOCK_GATE_DISABLE
|
8784 CL_UNIT_CLOCK_GATE_DISABLE
);
8785 I915_WRITE(RAMCLK_GATE_D
, 0);
8786 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
8787 OVRUNIT_CLOCK_GATE_DISABLE
|
8788 OVCUNIT_CLOCK_GATE_DISABLE
;
8789 if (IS_GM45(dev_priv
))
8790 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
8791 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
8793 /* WaDisableRenderCachePipelinedFlush */
8794 I915_WRITE(CACHE_MODE_0
,
8795 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
8797 /* WaDisable_RenderCache_OperationalFlush:g4x */
8798 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
8800 g4x_disable_trickle_feed(dev_priv
);
8803 static void i965gm_init_clock_gating(struct drm_i915_private
*dev_priv
)
8805 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
8806 I915_WRITE(RENCLK_GATE_D2
, 0);
8807 I915_WRITE(DSPCLK_GATE_D
, 0);
8808 I915_WRITE(RAMCLK_GATE_D
, 0);
8809 I915_WRITE16(DEUC
, 0);
8810 I915_WRITE(MI_ARB_STATE
,
8811 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
8813 /* WaDisable_RenderCache_OperationalFlush:gen4 */
8814 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
8817 static void i965g_init_clock_gating(struct drm_i915_private
*dev_priv
)
8819 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
8820 I965_RCC_CLOCK_GATE_DISABLE
|
8821 I965_RCPB_CLOCK_GATE_DISABLE
|
8822 I965_ISC_CLOCK_GATE_DISABLE
|
8823 I965_FBC_CLOCK_GATE_DISABLE
);
8824 I915_WRITE(RENCLK_GATE_D2
, 0);
8825 I915_WRITE(MI_ARB_STATE
,
8826 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
8828 /* WaDisable_RenderCache_OperationalFlush:gen4 */
8829 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
8832 static void gen3_init_clock_gating(struct drm_i915_private
*dev_priv
)
8834 u32 dstate
= I915_READ(D_STATE
);
8836 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
8837 DSTATE_DOT_CLOCK_GATING
;
8838 I915_WRITE(D_STATE
, dstate
);
8840 if (IS_PINEVIEW(dev_priv
))
8841 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
8843 /* IIR "flip pending" means done if this bit is set */
8844 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
8846 /* interrupts should cause a wake up from C3 */
8847 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
8849 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
8850 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
8852 I915_WRITE(MI_ARB_STATE
,
8853 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
8856 static void i85x_init_clock_gating(struct drm_i915_private
*dev_priv
)
8858 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
8860 /* interrupts should cause a wake up from C3 */
8861 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
8862 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
8864 I915_WRITE(MEM_MODE
,
8865 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
8868 static void i830_init_clock_gating(struct drm_i915_private
*dev_priv
)
8870 I915_WRITE(MEM_MODE
,
8871 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
8872 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
8875 void intel_init_clock_gating(struct drm_i915_private
*dev_priv
)
8877 dev_priv
->display
.init_clock_gating(dev_priv
);
8880 void intel_suspend_hw(struct drm_i915_private
*dev_priv
)
8882 if (HAS_PCH_LPT(dev_priv
))
8883 lpt_suspend_hw(dev_priv
);
8886 static void nop_init_clock_gating(struct drm_i915_private
*dev_priv
)
8888 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
8892 * intel_init_clock_gating_hooks - setup the clock gating hooks
8893 * @dev_priv: device private
8895 * Setup the hooks that configure which clocks of a given platform can be
8896 * gated and also apply various GT and display specific workarounds for these
8897 * platforms. Note that some GT specific workarounds are applied separately
8898 * when GPU contexts or batchbuffers start their execution.
8900 void intel_init_clock_gating_hooks(struct drm_i915_private
*dev_priv
)
8902 if (IS_CANNONLAKE(dev_priv
))
8903 dev_priv
->display
.init_clock_gating
= cnl_init_clock_gating
;
8904 else if (IS_COFFEELAKE(dev_priv
))
8905 dev_priv
->display
.init_clock_gating
= cfl_init_clock_gating
;
8906 else if (IS_SKYLAKE(dev_priv
))
8907 dev_priv
->display
.init_clock_gating
= skl_init_clock_gating
;
8908 else if (IS_KABYLAKE(dev_priv
))
8909 dev_priv
->display
.init_clock_gating
= kbl_init_clock_gating
;
8910 else if (IS_BROXTON(dev_priv
))
8911 dev_priv
->display
.init_clock_gating
= bxt_init_clock_gating
;
8912 else if (IS_GEMINILAKE(dev_priv
))
8913 dev_priv
->display
.init_clock_gating
= glk_init_clock_gating
;
8914 else if (IS_BROADWELL(dev_priv
))
8915 dev_priv
->display
.init_clock_gating
= bdw_init_clock_gating
;
8916 else if (IS_CHERRYVIEW(dev_priv
))
8917 dev_priv
->display
.init_clock_gating
= chv_init_clock_gating
;
8918 else if (IS_HASWELL(dev_priv
))
8919 dev_priv
->display
.init_clock_gating
= hsw_init_clock_gating
;
8920 else if (IS_IVYBRIDGE(dev_priv
))
8921 dev_priv
->display
.init_clock_gating
= ivb_init_clock_gating
;
8922 else if (IS_VALLEYVIEW(dev_priv
))
8923 dev_priv
->display
.init_clock_gating
= vlv_init_clock_gating
;
8924 else if (IS_GEN6(dev_priv
))
8925 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
8926 else if (IS_GEN5(dev_priv
))
8927 dev_priv
->display
.init_clock_gating
= ilk_init_clock_gating
;
8928 else if (IS_G4X(dev_priv
))
8929 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
8930 else if (IS_I965GM(dev_priv
))
8931 dev_priv
->display
.init_clock_gating
= i965gm_init_clock_gating
;
8932 else if (IS_I965G(dev_priv
))
8933 dev_priv
->display
.init_clock_gating
= i965g_init_clock_gating
;
8934 else if (IS_GEN3(dev_priv
))
8935 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
8936 else if (IS_I85X(dev_priv
) || IS_I865G(dev_priv
))
8937 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
8938 else if (IS_GEN2(dev_priv
))
8939 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
8941 MISSING_CASE(INTEL_DEVID(dev_priv
));
8942 dev_priv
->display
.init_clock_gating
= nop_init_clock_gating
;
8946 /* Set up chip specific power management-related functions */
8947 void intel_init_pm(struct drm_i915_private
*dev_priv
)
8949 intel_fbc_init(dev_priv
);
8952 if (IS_PINEVIEW(dev_priv
))
8953 i915_pineview_get_mem_freq(dev_priv
);
8954 else if (IS_GEN5(dev_priv
))
8955 i915_ironlake_get_mem_freq(dev_priv
);
8957 /* For FIFO watermark updates */
8958 if (INTEL_GEN(dev_priv
) >= 9) {
8959 skl_setup_wm_latency(dev_priv
);
8960 dev_priv
->display
.initial_watermarks
= skl_initial_wm
;
8961 dev_priv
->display
.atomic_update_watermarks
= skl_atomic_update_crtc_wm
;
8962 dev_priv
->display
.compute_global_watermarks
= skl_compute_wm
;
8963 } else if (HAS_PCH_SPLIT(dev_priv
)) {
8964 ilk_setup_wm_latency(dev_priv
);
8966 if ((IS_GEN5(dev_priv
) && dev_priv
->wm
.pri_latency
[1] &&
8967 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
8968 (!IS_GEN5(dev_priv
) && dev_priv
->wm
.pri_latency
[0] &&
8969 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
8970 dev_priv
->display
.compute_pipe_wm
= ilk_compute_pipe_wm
;
8971 dev_priv
->display
.compute_intermediate_wm
=
8972 ilk_compute_intermediate_wm
;
8973 dev_priv
->display
.initial_watermarks
=
8974 ilk_initial_watermarks
;
8975 dev_priv
->display
.optimize_watermarks
=
8976 ilk_optimize_watermarks
;
8978 DRM_DEBUG_KMS("Failed to read display plane latency. "
8981 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
8982 vlv_setup_wm_latency(dev_priv
);
8983 dev_priv
->display
.compute_pipe_wm
= vlv_compute_pipe_wm
;
8984 dev_priv
->display
.compute_intermediate_wm
= vlv_compute_intermediate_wm
;
8985 dev_priv
->display
.initial_watermarks
= vlv_initial_watermarks
;
8986 dev_priv
->display
.optimize_watermarks
= vlv_optimize_watermarks
;
8987 dev_priv
->display
.atomic_update_watermarks
= vlv_atomic_update_fifo
;
8988 } else if (IS_G4X(dev_priv
)) {
8989 g4x_setup_wm_latency(dev_priv
);
8990 dev_priv
->display
.compute_pipe_wm
= g4x_compute_pipe_wm
;
8991 dev_priv
->display
.compute_intermediate_wm
= g4x_compute_intermediate_wm
;
8992 dev_priv
->display
.initial_watermarks
= g4x_initial_watermarks
;
8993 dev_priv
->display
.optimize_watermarks
= g4x_optimize_watermarks
;
8994 } else if (IS_PINEVIEW(dev_priv
)) {
8995 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv
),
8998 dev_priv
->mem_freq
)) {
8999 DRM_INFO("failed to find known CxSR latency "
9000 "(found ddr%s fsb freq %d, mem freq %d), "
9002 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
9003 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
9004 /* Disable CxSR and never update its watermark again */
9005 intel_set_memory_cxsr(dev_priv
, false);
9006 dev_priv
->display
.update_wm
= NULL
;
9008 dev_priv
->display
.update_wm
= pineview_update_wm
;
9009 } else if (IS_GEN4(dev_priv
)) {
9010 dev_priv
->display
.update_wm
= i965_update_wm
;
9011 } else if (IS_GEN3(dev_priv
)) {
9012 dev_priv
->display
.update_wm
= i9xx_update_wm
;
9013 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
9014 } else if (IS_GEN2(dev_priv
)) {
9015 if (INTEL_INFO(dev_priv
)->num_pipes
== 1) {
9016 dev_priv
->display
.update_wm
= i845_update_wm
;
9017 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
9019 dev_priv
->display
.update_wm
= i9xx_update_wm
;
9020 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
9023 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
9027 static inline int gen6_check_mailbox_status(struct drm_i915_private
*dev_priv
)
9030 I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_ERROR_MASK
;
9033 case GEN6_PCODE_SUCCESS
:
9035 case GEN6_PCODE_UNIMPLEMENTED_CMD
:
9037 case GEN6_PCODE_ILLEGAL_CMD
:
9039 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
9040 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
9042 case GEN6_PCODE_TIMEOUT
:
9045 MISSING_CASE(flags
);
9050 static inline int gen7_check_mailbox_status(struct drm_i915_private
*dev_priv
)
9053 I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_ERROR_MASK
;
9056 case GEN6_PCODE_SUCCESS
:
9058 case GEN6_PCODE_ILLEGAL_CMD
:
9060 case GEN7_PCODE_TIMEOUT
:
9062 case GEN7_PCODE_ILLEGAL_DATA
:
9064 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
9067 MISSING_CASE(flags
);
9072 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u32 mbox
, u32
*val
)
9076 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
9078 /* GEN6_PCODE_* are outside of the forcewake domain, we can
9079 * use te fw I915_READ variants to reduce the amount of work
9080 * required when reading/writing.
9083 if (I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
9084 DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
9085 mbox
, __builtin_return_address(0));
9089 I915_WRITE_FW(GEN6_PCODE_DATA
, *val
);
9090 I915_WRITE_FW(GEN6_PCODE_DATA1
, 0);
9091 I915_WRITE_FW(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
9093 if (__intel_wait_for_register_fw(dev_priv
,
9094 GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
, 0,
9096 DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
9097 mbox
, __builtin_return_address(0));
9101 *val
= I915_READ_FW(GEN6_PCODE_DATA
);
9102 I915_WRITE_FW(GEN6_PCODE_DATA
, 0);
9104 if (INTEL_GEN(dev_priv
) > 6)
9105 status
= gen7_check_mailbox_status(dev_priv
);
9107 status
= gen6_check_mailbox_status(dev_priv
);
9110 DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
9111 mbox
, __builtin_return_address(0), status
);
9118 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
,
9123 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
9125 /* GEN6_PCODE_* are outside of the forcewake domain, we can
9126 * use te fw I915_READ variants to reduce the amount of work
9127 * required when reading/writing.
9130 if (I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
9131 DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
9132 val
, mbox
, __builtin_return_address(0));
9136 I915_WRITE_FW(GEN6_PCODE_DATA
, val
);
9137 I915_WRITE_FW(GEN6_PCODE_DATA1
, 0);
9138 I915_WRITE_FW(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
9140 if (__intel_wait_for_register_fw(dev_priv
,
9141 GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
, 0,
9143 DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
9144 val
, mbox
, __builtin_return_address(0));
9148 I915_WRITE_FW(GEN6_PCODE_DATA
, 0);
9150 if (INTEL_GEN(dev_priv
) > 6)
9151 status
= gen7_check_mailbox_status(dev_priv
);
9153 status
= gen6_check_mailbox_status(dev_priv
);
9156 DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
9157 val
, mbox
, __builtin_return_address(0), status
);
9164 static bool skl_pcode_try_request(struct drm_i915_private
*dev_priv
, u32 mbox
,
9165 u32 request
, u32 reply_mask
, u32 reply
,
9170 *status
= sandybridge_pcode_read(dev_priv
, mbox
, &val
);
9172 return *status
|| ((val
& reply_mask
) == reply
);
9176 * skl_pcode_request - send PCODE request until acknowledgment
9177 * @dev_priv: device private
9178 * @mbox: PCODE mailbox ID the request is targeted for
9179 * @request: request ID
9180 * @reply_mask: mask used to check for request acknowledgment
9181 * @reply: value used to check for request acknowledgment
9182 * @timeout_base_ms: timeout for polling with preemption enabled
9184 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
9185 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
9186 * The request is acknowledged once the PCODE reply dword equals @reply after
9187 * applying @reply_mask. Polling is first attempted with preemption enabled
9188 * for @timeout_base_ms and if this times out for another 50 ms with
9189 * preemption disabled.
9191 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
9192 * other error as reported by PCODE.
9194 int skl_pcode_request(struct drm_i915_private
*dev_priv
, u32 mbox
, u32 request
,
9195 u32 reply_mask
, u32 reply
, int timeout_base_ms
)
9200 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
9202 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
9206 * Prime the PCODE by doing a request first. Normally it guarantees
9207 * that a subsequent request, at most @timeout_base_ms later, succeeds.
9208 * _wait_for() doesn't guarantee when its passed condition is evaluated
9209 * first, so send the first request explicitly.
9215 ret
= _wait_for(COND
, timeout_base_ms
* 1000, 10);
9220 * The above can time out if the number of requests was low (2 in the
9221 * worst case) _and_ PCODE was busy for some reason even after a
9222 * (queued) request and @timeout_base_ms delay. As a workaround retry
9223 * the poll with preemption disabled to maximize the number of
9224 * requests. Increase the timeout from @timeout_base_ms to 50ms to
9225 * account for interrupts that could reduce the number of these
9226 * requests, and for any quirks of the PCODE firmware that delays
9227 * the request completion.
9229 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
9230 WARN_ON_ONCE(timeout_base_ms
> 3);
9232 ret
= wait_for_atomic(COND
, 50);
9236 return ret
? ret
: status
;
9240 static int byt_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
9244 * Slow = Fast = GPLL ref * N
9246 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* (val
- 0xb7), 1000);
9249 static int byt_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
9251 return DIV_ROUND_CLOSEST(1000 * val
, dev_priv
->rps
.gpll_ref_freq
) + 0xb7;
9254 static int chv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
9258 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
9260 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* val
, 2 * 2 * 1000);
9263 static int chv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
9265 /* CHV needs even values */
9266 return DIV_ROUND_CLOSEST(2 * 1000 * val
, dev_priv
->rps
.gpll_ref_freq
) * 2;
9269 int intel_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
9271 if (INTEL_GEN(dev_priv
) >= 9)
9272 return DIV_ROUND_CLOSEST(val
* GT_FREQUENCY_MULTIPLIER
,
9274 else if (IS_CHERRYVIEW(dev_priv
))
9275 return chv_gpu_freq(dev_priv
, val
);
9276 else if (IS_VALLEYVIEW(dev_priv
))
9277 return byt_gpu_freq(dev_priv
, val
);
9279 return val
* GT_FREQUENCY_MULTIPLIER
;
9282 int intel_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
9284 if (INTEL_GEN(dev_priv
) >= 9)
9285 return DIV_ROUND_CLOSEST(val
* GEN9_FREQ_SCALER
,
9286 GT_FREQUENCY_MULTIPLIER
);
9287 else if (IS_CHERRYVIEW(dev_priv
))
9288 return chv_freq_opcode(dev_priv
, val
);
9289 else if (IS_VALLEYVIEW(dev_priv
))
9290 return byt_freq_opcode(dev_priv
, val
);
9292 return DIV_ROUND_CLOSEST(val
, GT_FREQUENCY_MULTIPLIER
);
9295 void intel_pm_setup(struct drm_i915_private
*dev_priv
)
9297 mutex_init(&dev_priv
->rps
.hw_lock
);
9299 INIT_DELAYED_WORK(&dev_priv
->rps
.autoenable_work
,
9300 __intel_autoenable_gt_powersave
);
9301 atomic_set(&dev_priv
->rps
.num_waiters
, 0);
9303 dev_priv
->pm
.suspended
= false;
9304 atomic_set(&dev_priv
->pm
.wakeref_count
, 0);
9307 static u64
vlv_residency_raw(struct drm_i915_private
*dev_priv
,
9308 const i915_reg_t reg
)
9310 u32 lower
, upper
, tmp
;
9313 /* The register accessed do not need forcewake. We borrow
9314 * uncore lock to prevent concurrent access to range reg.
9316 spin_lock_irq(&dev_priv
->uncore
.lock
);
9318 /* vlv and chv residency counters are 40 bits in width.
9319 * With a control bit, we can choose between upper or lower
9320 * 32bit window into this counter.
9322 * Although we always use the counter in high-range mode elsewhere,
9323 * userspace may attempt to read the value before rc6 is initialised,
9324 * before we have set the default VLV_COUNTER_CONTROL value. So always
9325 * set the high bit to be safe.
9327 I915_WRITE_FW(VLV_COUNTER_CONTROL
,
9328 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
));
9329 upper
= I915_READ_FW(reg
);
9333 I915_WRITE_FW(VLV_COUNTER_CONTROL
,
9334 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH
));
9335 lower
= I915_READ_FW(reg
);
9337 I915_WRITE_FW(VLV_COUNTER_CONTROL
,
9338 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
));
9339 upper
= I915_READ_FW(reg
);
9340 } while (upper
!= tmp
&& --loop
);
9342 /* Everywhere else we always use VLV_COUNTER_CONTROL with the
9343 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
9347 spin_unlock_irq(&dev_priv
->uncore
.lock
);
9349 return lower
| (u64
)upper
<< 8;
9352 u64
intel_rc6_residency_us(struct drm_i915_private
*dev_priv
,
9353 const i915_reg_t reg
)
9355 u64 time_hw
, units
, div
;
9357 if (!intel_enable_rc6())
9360 intel_runtime_pm_get(dev_priv
);
9362 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
9363 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
9365 div
= dev_priv
->czclk_freq
;
9367 time_hw
= vlv_residency_raw(dev_priv
, reg
);
9368 } else if (IS_GEN9_LP(dev_priv
)) {
9370 div
= 1200; /* 833.33ns */
9372 time_hw
= I915_READ(reg
);
9374 units
= 128000; /* 1.28us */
9377 time_hw
= I915_READ(reg
);
9380 intel_runtime_pm_put(dev_priv
);
9381 return DIV_ROUND_UP_ULL(time_hw
* units
, div
);