2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_i915_private
*dev_priv
)
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 I915_WRITE(CHICKEN_PAR1_1
,
63 I915_READ(CHICKEN_PAR1_1
) | SKL_EDP_PSR_FIX_RDWRAP
);
65 I915_WRITE(GEN8_CONFIG0
,
66 I915_READ(GEN8_CONFIG0
) | GEN9_DEFAULT_FIXES
);
68 /* WaEnableChickenDCPR:skl,bxt,kbl */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
70 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl */
74 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
76 DISP_FBC_MEMORY_WAKE
);
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
79 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
80 ILK_DPFC_DISABLE_DUMMY0
);
83 static void bxt_init_clock_gating(struct drm_i915_private
*dev_priv
)
85 gen9_init_clock_gating(dev_priv
);
87 /* WaDisableSDEUnitClockGating:bxt */
88 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
89 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
93 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
95 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
96 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ
);
99 * Wa: Backlight PWM may stop in the asserted state, causing backlight
102 if (IS_BXT_REVID(dev_priv
, BXT_REVID_B0
, REVID_FOREVER
))
103 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
104 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
107 static void i915_pineview_get_mem_freq(struct drm_i915_private
*dev_priv
)
111 tmp
= I915_READ(CLKCFG
);
113 switch (tmp
& CLKCFG_FSB_MASK
) {
115 dev_priv
->fsb_freq
= 533; /* 133*4 */
118 dev_priv
->fsb_freq
= 800; /* 200*4 */
121 dev_priv
->fsb_freq
= 667; /* 167*4 */
124 dev_priv
->fsb_freq
= 400; /* 100*4 */
128 switch (tmp
& CLKCFG_MEM_MASK
) {
130 dev_priv
->mem_freq
= 533;
133 dev_priv
->mem_freq
= 667;
136 dev_priv
->mem_freq
= 800;
140 /* detect pineview DDR3 setting */
141 tmp
= I915_READ(CSHRDDR3CTL
);
142 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
145 static void i915_ironlake_get_mem_freq(struct drm_i915_private
*dev_priv
)
149 ddrpll
= I915_READ16(DDRMPLL1
);
150 csipll
= I915_READ16(CSIPLL0
);
152 switch (ddrpll
& 0xff) {
154 dev_priv
->mem_freq
= 800;
157 dev_priv
->mem_freq
= 1066;
160 dev_priv
->mem_freq
= 1333;
163 dev_priv
->mem_freq
= 1600;
166 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
168 dev_priv
->mem_freq
= 0;
172 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
174 switch (csipll
& 0x3ff) {
176 dev_priv
->fsb_freq
= 3200;
179 dev_priv
->fsb_freq
= 3733;
182 dev_priv
->fsb_freq
= 4266;
185 dev_priv
->fsb_freq
= 4800;
188 dev_priv
->fsb_freq
= 5333;
191 dev_priv
->fsb_freq
= 5866;
194 dev_priv
->fsb_freq
= 6400;
197 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
199 dev_priv
->fsb_freq
= 0;
203 if (dev_priv
->fsb_freq
== 3200) {
204 dev_priv
->ips
.c_m
= 0;
205 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
206 dev_priv
->ips
.c_m
= 1;
208 dev_priv
->ips
.c_m
= 2;
212 static const struct cxsr_latency cxsr_latency_table
[] = {
213 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
214 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
215 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
216 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
217 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
219 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
220 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
221 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
222 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
223 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
225 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
226 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
227 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
228 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
229 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
231 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
232 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
233 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
234 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
235 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
237 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
238 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
239 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
240 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
241 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
243 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
244 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
245 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
246 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
247 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
250 static const struct cxsr_latency
*intel_get_cxsr_latency(bool is_desktop
,
255 const struct cxsr_latency
*latency
;
258 if (fsb
== 0 || mem
== 0)
261 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
262 latency
= &cxsr_latency_table
[i
];
263 if (is_desktop
== latency
->is_desktop
&&
264 is_ddr3
== latency
->is_ddr3
&&
265 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
269 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
274 static void chv_set_memory_dvfs(struct drm_i915_private
*dev_priv
, bool enable
)
278 mutex_lock(&dev_priv
->rps
.hw_lock
);
280 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
282 val
&= ~FORCE_DDR_HIGH_FREQ
;
284 val
|= FORCE_DDR_HIGH_FREQ
;
285 val
&= ~FORCE_DDR_LOW_FREQ
;
286 val
|= FORCE_DDR_FREQ_REQ_ACK
;
287 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
289 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
290 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3))
291 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
293 mutex_unlock(&dev_priv
->rps
.hw_lock
);
296 static void chv_set_memory_pm5(struct drm_i915_private
*dev_priv
, bool enable
)
300 mutex_lock(&dev_priv
->rps
.hw_lock
);
302 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
304 val
|= DSP_MAXFIFO_PM5_ENABLE
;
306 val
&= ~DSP_MAXFIFO_PM5_ENABLE
;
307 vlv_punit_write(dev_priv
, PUNIT_REG_DSPFREQ
, val
);
309 mutex_unlock(&dev_priv
->rps
.hw_lock
);
312 #define FW_WM(value, plane) \
313 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
315 void intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
319 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
320 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
321 POSTING_READ(FW_BLC_SELF_VLV
);
322 dev_priv
->wm
.vlv
.cxsr
= enable
;
323 } else if (IS_G4X(dev_priv
) || IS_CRESTLINE(dev_priv
)) {
324 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
325 POSTING_READ(FW_BLC_SELF
);
326 } else if (IS_PINEVIEW(dev_priv
)) {
327 val
= I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
;
328 val
|= enable
? PINEVIEW_SELF_REFRESH_EN
: 0;
329 I915_WRITE(DSPFW3
, val
);
330 POSTING_READ(DSPFW3
);
331 } else if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
)) {
332 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
333 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
334 I915_WRITE(FW_BLC_SELF
, val
);
335 POSTING_READ(FW_BLC_SELF
);
336 } else if (IS_I915GM(dev_priv
)) {
338 * FIXME can't find a bit like this for 915G, and
339 * and yet it does have the related watermark in
340 * FW_BLC_SELF. What's going on?
342 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
343 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
344 I915_WRITE(INSTPM
, val
);
345 POSTING_READ(INSTPM
);
350 DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable
));
355 * Latency for FIFO fetches is dependent on several factors:
356 * - memory configuration (speed, channels)
358 * - current MCH state
359 * It can be fairly high in some situations, so here we assume a fairly
360 * pessimal value. It's a tradeoff between extra memory fetches (if we
361 * set this value too high, the FIFO will fetch frequently to stay full)
362 * and power consumption (set it too low to save power and we might see
363 * FIFO underruns and display "flicker").
365 * A value of 5us seems to be a good balance; safe for very low end
366 * platforms but not overly aggressive on lower latency configs.
368 static const int pessimal_latency_ns
= 5000;
370 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
371 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
373 static int vlv_get_fifo_size(struct drm_i915_private
*dev_priv
,
374 enum pipe pipe
, int plane
)
376 int sprite0_start
, sprite1_start
, size
;
379 uint32_t dsparb
, dsparb2
, dsparb3
;
381 dsparb
= I915_READ(DSPARB
);
382 dsparb2
= I915_READ(DSPARB2
);
383 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 0, 0);
384 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 8, 4);
387 dsparb
= I915_READ(DSPARB
);
388 dsparb2
= I915_READ(DSPARB2
);
389 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 16, 8);
390 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 24, 12);
393 dsparb2
= I915_READ(DSPARB2
);
394 dsparb3
= I915_READ(DSPARB3
);
395 sprite0_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 0, 16);
396 sprite1_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 8, 20);
404 size
= sprite0_start
;
407 size
= sprite1_start
- sprite0_start
;
410 size
= 512 - 1 - sprite1_start
;
416 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
417 pipe_name(pipe
), plane
== 0 ? "primary" : "sprite",
418 plane
== 0 ? plane_name(pipe
) : sprite_name(pipe
, plane
- 1),
424 static int i9xx_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
426 uint32_t dsparb
= I915_READ(DSPARB
);
429 size
= dsparb
& 0x7f;
431 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
433 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
434 plane
? "B" : "A", size
);
439 static int i830_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
441 uint32_t dsparb
= I915_READ(DSPARB
);
444 size
= dsparb
& 0x1ff;
446 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
447 size
>>= 1; /* Convert to cachelines */
449 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
450 plane
? "B" : "A", size
);
455 static int i845_get_fifo_size(struct drm_i915_private
*dev_priv
, int plane
)
457 uint32_t dsparb
= I915_READ(DSPARB
);
460 size
= dsparb
& 0x7f;
461 size
>>= 2; /* Convert to cachelines */
463 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
470 /* Pineview has different values for various configs */
471 static const struct intel_watermark_params pineview_display_wm
= {
472 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
473 .max_wm
= PINEVIEW_MAX_WM
,
474 .default_wm
= PINEVIEW_DFT_WM
,
475 .guard_size
= PINEVIEW_GUARD_WM
,
476 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
478 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
479 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
480 .max_wm
= PINEVIEW_MAX_WM
,
481 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
482 .guard_size
= PINEVIEW_GUARD_WM
,
483 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
485 static const struct intel_watermark_params pineview_cursor_wm
= {
486 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
487 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
488 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
489 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
490 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
492 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
493 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
494 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
495 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
496 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
497 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
499 static const struct intel_watermark_params g4x_wm_info
= {
500 .fifo_size
= G4X_FIFO_SIZE
,
501 .max_wm
= G4X_MAX_WM
,
502 .default_wm
= G4X_MAX_WM
,
504 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
506 static const struct intel_watermark_params g4x_cursor_wm_info
= {
507 .fifo_size
= I965_CURSOR_FIFO
,
508 .max_wm
= I965_CURSOR_MAX_WM
,
509 .default_wm
= I965_CURSOR_DFT_WM
,
511 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
513 static const struct intel_watermark_params i965_cursor_wm_info
= {
514 .fifo_size
= I965_CURSOR_FIFO
,
515 .max_wm
= I965_CURSOR_MAX_WM
,
516 .default_wm
= I965_CURSOR_DFT_WM
,
518 .cacheline_size
= I915_FIFO_LINE_SIZE
,
520 static const struct intel_watermark_params i945_wm_info
= {
521 .fifo_size
= I945_FIFO_SIZE
,
522 .max_wm
= I915_MAX_WM
,
525 .cacheline_size
= I915_FIFO_LINE_SIZE
,
527 static const struct intel_watermark_params i915_wm_info
= {
528 .fifo_size
= I915_FIFO_SIZE
,
529 .max_wm
= I915_MAX_WM
,
532 .cacheline_size
= I915_FIFO_LINE_SIZE
,
534 static const struct intel_watermark_params i830_a_wm_info
= {
535 .fifo_size
= I855GM_FIFO_SIZE
,
536 .max_wm
= I915_MAX_WM
,
539 .cacheline_size
= I830_FIFO_LINE_SIZE
,
541 static const struct intel_watermark_params i830_bc_wm_info
= {
542 .fifo_size
= I855GM_FIFO_SIZE
,
543 .max_wm
= I915_MAX_WM
/2,
546 .cacheline_size
= I830_FIFO_LINE_SIZE
,
548 static const struct intel_watermark_params i845_wm_info
= {
549 .fifo_size
= I830_FIFO_SIZE
,
550 .max_wm
= I915_MAX_WM
,
553 .cacheline_size
= I830_FIFO_LINE_SIZE
,
557 * intel_calculate_wm - calculate watermark level
558 * @clock_in_khz: pixel clock
559 * @wm: chip FIFO params
560 * @cpp: bytes per pixel
561 * @latency_ns: memory latency for the platform
563 * Calculate the watermark level (the level at which the display plane will
564 * start fetching from memory again). Each chip has a different display
565 * FIFO size and allocation, so the caller needs to figure that out and pass
566 * in the correct intel_watermark_params structure.
568 * As the pixel clock runs, the FIFO will be drained at a rate that depends
569 * on the pixel size. When it reaches the watermark level, it'll start
570 * fetching FIFO line sized based chunks from memory until the FIFO fills
571 * past the watermark point. If the FIFO drains completely, a FIFO underrun
572 * will occur, and a display engine hang could result.
574 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
575 const struct intel_watermark_params
*wm
,
576 int fifo_size
, int cpp
,
577 unsigned long latency_ns
)
579 long entries_required
, wm_size
;
582 * Note: we need to make sure we don't overflow for various clock &
584 * clocks go from a few thousand to several hundred thousand.
585 * latency is usually a few thousand
587 entries_required
= ((clock_in_khz
/ 1000) * cpp
* latency_ns
) /
589 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
591 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
593 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
595 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
597 /* Don't promote wm_size to unsigned... */
598 if (wm_size
> (long)wm
->max_wm
)
599 wm_size
= wm
->max_wm
;
601 wm_size
= wm
->default_wm
;
604 * Bspec seems to indicate that the value shouldn't be lower than
605 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
606 * Lets go for 8 which is the burst size since certain platforms
607 * already use a hardcoded 8 (which is what the spec says should be
616 static struct intel_crtc
*single_enabled_crtc(struct drm_i915_private
*dev_priv
)
618 struct intel_crtc
*crtc
, *enabled
= NULL
;
620 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
621 if (intel_crtc_active(crtc
)) {
631 static void pineview_update_wm(struct intel_crtc
*unused_crtc
)
633 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
634 struct intel_crtc
*crtc
;
635 const struct cxsr_latency
*latency
;
639 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv
),
644 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
645 intel_set_memory_cxsr(dev_priv
, false);
649 crtc
= single_enabled_crtc(dev_priv
);
651 const struct drm_display_mode
*adjusted_mode
=
652 &crtc
->config
->base
.adjusted_mode
;
653 const struct drm_framebuffer
*fb
=
654 crtc
->base
.primary
->state
->fb
;
655 int cpp
= fb
->format
->cpp
[0];
656 int clock
= adjusted_mode
->crtc_clock
;
659 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
660 pineview_display_wm
.fifo_size
,
661 cpp
, latency
->display_sr
);
662 reg
= I915_READ(DSPFW1
);
663 reg
&= ~DSPFW_SR_MASK
;
664 reg
|= FW_WM(wm
, SR
);
665 I915_WRITE(DSPFW1
, reg
);
666 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
669 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
670 pineview_display_wm
.fifo_size
,
671 cpp
, latency
->cursor_sr
);
672 reg
= I915_READ(DSPFW3
);
673 reg
&= ~DSPFW_CURSOR_SR_MASK
;
674 reg
|= FW_WM(wm
, CURSOR_SR
);
675 I915_WRITE(DSPFW3
, reg
);
677 /* Display HPLL off SR */
678 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
679 pineview_display_hplloff_wm
.fifo_size
,
680 cpp
, latency
->display_hpll_disable
);
681 reg
= I915_READ(DSPFW3
);
682 reg
&= ~DSPFW_HPLL_SR_MASK
;
683 reg
|= FW_WM(wm
, HPLL_SR
);
684 I915_WRITE(DSPFW3
, reg
);
686 /* cursor HPLL off SR */
687 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
688 pineview_display_hplloff_wm
.fifo_size
,
689 cpp
, latency
->cursor_hpll_disable
);
690 reg
= I915_READ(DSPFW3
);
691 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
692 reg
|= FW_WM(wm
, HPLL_CURSOR
);
693 I915_WRITE(DSPFW3
, reg
);
694 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
696 intel_set_memory_cxsr(dev_priv
, true);
698 intel_set_memory_cxsr(dev_priv
, false);
702 static bool g4x_compute_wm0(struct drm_i915_private
*dev_priv
,
704 const struct intel_watermark_params
*display
,
705 int display_latency_ns
,
706 const struct intel_watermark_params
*cursor
,
707 int cursor_latency_ns
,
711 struct intel_crtc
*crtc
;
712 const struct drm_display_mode
*adjusted_mode
;
713 const struct drm_framebuffer
*fb
;
714 int htotal
, hdisplay
, clock
, cpp
;
715 int line_time_us
, line_count
;
716 int entries
, tlb_miss
;
718 crtc
= intel_get_crtc_for_plane(dev_priv
, plane
);
719 if (!intel_crtc_active(crtc
)) {
720 *cursor_wm
= cursor
->guard_size
;
721 *plane_wm
= display
->guard_size
;
725 adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
726 fb
= crtc
->base
.primary
->state
->fb
;
727 clock
= adjusted_mode
->crtc_clock
;
728 htotal
= adjusted_mode
->crtc_htotal
;
729 hdisplay
= crtc
->config
->pipe_src_w
;
730 cpp
= fb
->format
->cpp
[0];
732 /* Use the small buffer method to calculate plane watermark */
733 entries
= ((clock
* cpp
/ 1000) * display_latency_ns
) / 1000;
734 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
737 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
738 *plane_wm
= entries
+ display
->guard_size
;
739 if (*plane_wm
> (int)display
->max_wm
)
740 *plane_wm
= display
->max_wm
;
742 /* Use the large buffer method to calculate cursor watermark */
743 line_time_us
= max(htotal
* 1000 / clock
, 1);
744 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
745 entries
= line_count
* crtc
->base
.cursor
->state
->crtc_w
* cpp
;
746 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
749 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
750 *cursor_wm
= entries
+ cursor
->guard_size
;
751 if (*cursor_wm
> (int)cursor
->max_wm
)
752 *cursor_wm
= (int)cursor
->max_wm
;
758 * Check the wm result.
760 * If any calculated watermark values is larger than the maximum value that
761 * can be programmed into the associated watermark register, that watermark
764 static bool g4x_check_srwm(struct drm_i915_private
*dev_priv
,
765 int display_wm
, int cursor_wm
,
766 const struct intel_watermark_params
*display
,
767 const struct intel_watermark_params
*cursor
)
769 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
770 display_wm
, cursor_wm
);
772 if (display_wm
> display
->max_wm
) {
773 DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
774 display_wm
, display
->max_wm
);
778 if (cursor_wm
> cursor
->max_wm
) {
779 DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
780 cursor_wm
, cursor
->max_wm
);
784 if (!(display_wm
|| cursor_wm
)) {
785 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
792 static bool g4x_compute_srwm(struct drm_i915_private
*dev_priv
,
795 const struct intel_watermark_params
*display
,
796 const struct intel_watermark_params
*cursor
,
797 int *display_wm
, int *cursor_wm
)
799 struct intel_crtc
*crtc
;
800 const struct drm_display_mode
*adjusted_mode
;
801 const struct drm_framebuffer
*fb
;
802 int hdisplay
, htotal
, cpp
, clock
;
803 unsigned long line_time_us
;
804 int line_count
, line_size
;
809 *display_wm
= *cursor_wm
= 0;
813 crtc
= intel_get_crtc_for_plane(dev_priv
, plane
);
814 adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
815 fb
= crtc
->base
.primary
->state
->fb
;
816 clock
= adjusted_mode
->crtc_clock
;
817 htotal
= adjusted_mode
->crtc_htotal
;
818 hdisplay
= crtc
->config
->pipe_src_w
;
819 cpp
= fb
->format
->cpp
[0];
821 line_time_us
= max(htotal
* 1000 / clock
, 1);
822 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
823 line_size
= hdisplay
* cpp
;
825 /* Use the minimum of the small and large buffer method for primary */
826 small
= ((clock
* cpp
/ 1000) * latency_ns
) / 1000;
827 large
= line_count
* line_size
;
829 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
830 *display_wm
= entries
+ display
->guard_size
;
832 /* calculate the self-refresh watermark for display cursor */
833 entries
= line_count
* cpp
* crtc
->base
.cursor
->state
->crtc_w
;
834 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
835 *cursor_wm
= entries
+ cursor
->guard_size
;
837 return g4x_check_srwm(dev_priv
,
838 *display_wm
, *cursor_wm
,
842 #define FW_WM_VLV(value, plane) \
843 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
845 static void vlv_write_wm_values(struct intel_crtc
*crtc
,
846 const struct vlv_wm_values
*wm
)
848 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
849 enum pipe pipe
= crtc
->pipe
;
851 I915_WRITE(VLV_DDL(pipe
),
852 (wm
->ddl
[pipe
].cursor
<< DDL_CURSOR_SHIFT
) |
853 (wm
->ddl
[pipe
].sprite
[1] << DDL_SPRITE_SHIFT(1)) |
854 (wm
->ddl
[pipe
].sprite
[0] << DDL_SPRITE_SHIFT(0)) |
855 (wm
->ddl
[pipe
].primary
<< DDL_PLANE_SHIFT
));
858 FW_WM(wm
->sr
.plane
, SR
) |
859 FW_WM(wm
->pipe
[PIPE_B
].cursor
, CURSORB
) |
860 FW_WM_VLV(wm
->pipe
[PIPE_B
].primary
, PLANEB
) |
861 FW_WM_VLV(wm
->pipe
[PIPE_A
].primary
, PLANEA
));
863 FW_WM_VLV(wm
->pipe
[PIPE_A
].sprite
[1], SPRITEB
) |
864 FW_WM(wm
->pipe
[PIPE_A
].cursor
, CURSORA
) |
865 FW_WM_VLV(wm
->pipe
[PIPE_A
].sprite
[0], SPRITEA
));
867 FW_WM(wm
->sr
.cursor
, CURSOR_SR
));
869 if (IS_CHERRYVIEW(dev_priv
)) {
870 I915_WRITE(DSPFW7_CHV
,
871 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[1], SPRITED
) |
872 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[0], SPRITEC
));
873 I915_WRITE(DSPFW8_CHV
,
874 FW_WM_VLV(wm
->pipe
[PIPE_C
].sprite
[1], SPRITEF
) |
875 FW_WM_VLV(wm
->pipe
[PIPE_C
].sprite
[0], SPRITEE
));
876 I915_WRITE(DSPFW9_CHV
,
877 FW_WM_VLV(wm
->pipe
[PIPE_C
].primary
, PLANEC
) |
878 FW_WM(wm
->pipe
[PIPE_C
].cursor
, CURSORC
));
880 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
881 FW_WM(wm
->pipe
[PIPE_C
].sprite
[1] >> 8, SPRITEF_HI
) |
882 FW_WM(wm
->pipe
[PIPE_C
].sprite
[0] >> 8, SPRITEE_HI
) |
883 FW_WM(wm
->pipe
[PIPE_C
].primary
>> 8, PLANEC_HI
) |
884 FW_WM(wm
->pipe
[PIPE_B
].sprite
[1] >> 8, SPRITED_HI
) |
885 FW_WM(wm
->pipe
[PIPE_B
].sprite
[0] >> 8, SPRITEC_HI
) |
886 FW_WM(wm
->pipe
[PIPE_B
].primary
>> 8, PLANEB_HI
) |
887 FW_WM(wm
->pipe
[PIPE_A
].sprite
[1] >> 8, SPRITEB_HI
) |
888 FW_WM(wm
->pipe
[PIPE_A
].sprite
[0] >> 8, SPRITEA_HI
) |
889 FW_WM(wm
->pipe
[PIPE_A
].primary
>> 8, PLANEA_HI
));
892 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[1], SPRITED
) |
893 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[0], SPRITEC
));
895 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
896 FW_WM(wm
->pipe
[PIPE_B
].sprite
[1] >> 8, SPRITED_HI
) |
897 FW_WM(wm
->pipe
[PIPE_B
].sprite
[0] >> 8, SPRITEC_HI
) |
898 FW_WM(wm
->pipe
[PIPE_B
].primary
>> 8, PLANEB_HI
) |
899 FW_WM(wm
->pipe
[PIPE_A
].sprite
[1] >> 8, SPRITEB_HI
) |
900 FW_WM(wm
->pipe
[PIPE_A
].sprite
[0] >> 8, SPRITEA_HI
) |
901 FW_WM(wm
->pipe
[PIPE_A
].primary
>> 8, PLANEA_HI
));
904 /* zero (unused) WM1 watermarks */
905 I915_WRITE(DSPFW4
, 0);
906 I915_WRITE(DSPFW5
, 0);
907 I915_WRITE(DSPFW6
, 0);
908 I915_WRITE(DSPHOWM1
, 0);
910 POSTING_READ(DSPFW1
);
918 VLV_WM_LEVEL_DDR_DVFS
,
921 /* latency must be in 0.1us units. */
922 static unsigned int vlv_wm_method2(unsigned int pixel_rate
,
923 unsigned int pipe_htotal
,
924 unsigned int horiz_pixels
,
926 unsigned int latency
)
930 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
931 ret
= (ret
+ 1) * horiz_pixels
* cpp
;
932 ret
= DIV_ROUND_UP(ret
, 64);
937 static void vlv_setup_wm_latency(struct drm_i915_private
*dev_priv
)
939 /* all latencies in usec */
940 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM2
] = 3;
942 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM2
;
944 if (IS_CHERRYVIEW(dev_priv
)) {
945 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM5
] = 12;
946 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_DDR_DVFS
] = 33;
948 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_DDR_DVFS
;
952 static uint16_t vlv_compute_wm_level(struct intel_plane
*plane
,
953 struct intel_crtc
*crtc
,
954 const struct intel_plane_state
*state
,
957 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
958 int clock
, htotal
, cpp
, width
, wm
;
960 if (dev_priv
->wm
.pri_latency
[level
] == 0)
963 if (!state
->base
.visible
)
966 cpp
= state
->base
.fb
->format
->cpp
[0];
967 clock
= crtc
->config
->base
.adjusted_mode
.crtc_clock
;
968 htotal
= crtc
->config
->base
.adjusted_mode
.crtc_htotal
;
969 width
= crtc
->config
->pipe_src_w
;
970 if (WARN_ON(htotal
== 0))
973 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
975 * FIXME the formula gives values that are
976 * too big for the cursor FIFO, and hence we
977 * would never be able to use cursors. For
978 * now just hardcode the watermark.
982 wm
= vlv_wm_method2(clock
, htotal
, width
, cpp
,
983 dev_priv
->wm
.pri_latency
[level
] * 10);
986 return min_t(int, wm
, USHRT_MAX
);
989 static void vlv_compute_fifo(struct intel_crtc
*crtc
)
991 struct drm_device
*dev
= crtc
->base
.dev
;
992 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
993 struct intel_plane
*plane
;
994 unsigned int total_rate
= 0;
995 const int fifo_size
= 512 - 1;
996 int fifo_extra
, fifo_left
= fifo_size
;
998 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
999 struct intel_plane_state
*state
=
1000 to_intel_plane_state(plane
->base
.state
);
1002 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
1005 if (state
->base
.visible
) {
1006 wm_state
->num_active_planes
++;
1007 total_rate
+= state
->base
.fb
->format
->cpp
[0];
1011 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1012 struct intel_plane_state
*state
=
1013 to_intel_plane_state(plane
->base
.state
);
1016 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
1017 plane
->wm
.fifo_size
= 63;
1021 if (!state
->base
.visible
) {
1022 plane
->wm
.fifo_size
= 0;
1026 rate
= state
->base
.fb
->format
->cpp
[0];
1027 plane
->wm
.fifo_size
= fifo_size
* rate
/ total_rate
;
1028 fifo_left
-= plane
->wm
.fifo_size
;
1031 fifo_extra
= DIV_ROUND_UP(fifo_left
, wm_state
->num_active_planes
?: 1);
1033 /* spread the remainder evenly */
1034 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1040 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
1043 /* give it all to the first plane if none are active */
1044 if (plane
->wm
.fifo_size
== 0 &&
1045 wm_state
->num_active_planes
)
1048 plane_extra
= min(fifo_extra
, fifo_left
);
1049 plane
->wm
.fifo_size
+= plane_extra
;
1050 fifo_left
-= plane_extra
;
1053 WARN_ON(fifo_left
!= 0);
1056 static void vlv_invert_wms(struct intel_crtc
*crtc
)
1058 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1061 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1062 struct drm_device
*dev
= crtc
->base
.dev
;
1063 const int sr_fifo_size
=
1064 INTEL_INFO(to_i915(dev
))->num_pipes
* 512 - 1;
1065 struct intel_plane
*plane
;
1067 wm_state
->sr
[level
].plane
= sr_fifo_size
- wm_state
->sr
[level
].plane
;
1068 wm_state
->sr
[level
].cursor
= 63 - wm_state
->sr
[level
].cursor
;
1070 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1071 switch (plane
->base
.type
) {
1073 case DRM_PLANE_TYPE_CURSOR
:
1074 wm_state
->wm
[level
].cursor
= plane
->wm
.fifo_size
-
1075 wm_state
->wm
[level
].cursor
;
1077 case DRM_PLANE_TYPE_PRIMARY
:
1078 wm_state
->wm
[level
].primary
= plane
->wm
.fifo_size
-
1079 wm_state
->wm
[level
].primary
;
1081 case DRM_PLANE_TYPE_OVERLAY
:
1082 sprite
= plane
->plane
;
1083 wm_state
->wm
[level
].sprite
[sprite
] = plane
->wm
.fifo_size
-
1084 wm_state
->wm
[level
].sprite
[sprite
];
1091 static void vlv_compute_wm(struct intel_crtc
*crtc
)
1093 struct drm_device
*dev
= crtc
->base
.dev
;
1094 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1095 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1096 struct intel_plane
*plane
;
1097 int sr_fifo_size
= INTEL_INFO(dev_priv
)->num_pipes
* 512 - 1;
1100 memset(wm_state
, 0, sizeof(*wm_state
));
1102 wm_state
->cxsr
= crtc
->pipe
!= PIPE_C
&& crtc
->wm
.cxsr_allowed
;
1103 wm_state
->num_levels
= dev_priv
->wm
.max_level
+ 1;
1105 wm_state
->num_active_planes
= 0;
1107 vlv_compute_fifo(crtc
);
1109 if (wm_state
->num_active_planes
!= 1)
1110 wm_state
->cxsr
= false;
1112 if (wm_state
->cxsr
) {
1113 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1114 wm_state
->sr
[level
].plane
= sr_fifo_size
;
1115 wm_state
->sr
[level
].cursor
= 63;
1119 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1120 struct intel_plane_state
*state
=
1121 to_intel_plane_state(plane
->base
.state
);
1123 if (!state
->base
.visible
)
1126 /* normal watermarks */
1127 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1128 int wm
= vlv_compute_wm_level(plane
, crtc
, state
, level
);
1129 int max_wm
= plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
? 63 : 511;
1132 if (WARN_ON(level
== 0 && wm
> max_wm
))
1135 if (wm
> plane
->wm
.fifo_size
)
1138 switch (plane
->base
.type
) {
1140 case DRM_PLANE_TYPE_CURSOR
:
1141 wm_state
->wm
[level
].cursor
= wm
;
1143 case DRM_PLANE_TYPE_PRIMARY
:
1144 wm_state
->wm
[level
].primary
= wm
;
1146 case DRM_PLANE_TYPE_OVERLAY
:
1147 sprite
= plane
->plane
;
1148 wm_state
->wm
[level
].sprite
[sprite
] = wm
;
1153 wm_state
->num_levels
= level
;
1155 if (!wm_state
->cxsr
)
1158 /* maxfifo watermarks */
1159 switch (plane
->base
.type
) {
1161 case DRM_PLANE_TYPE_CURSOR
:
1162 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1163 wm_state
->sr
[level
].cursor
=
1164 wm_state
->wm
[level
].cursor
;
1166 case DRM_PLANE_TYPE_PRIMARY
:
1167 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1168 wm_state
->sr
[level
].plane
=
1169 min(wm_state
->sr
[level
].plane
,
1170 wm_state
->wm
[level
].primary
);
1172 case DRM_PLANE_TYPE_OVERLAY
:
1173 sprite
= plane
->plane
;
1174 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1175 wm_state
->sr
[level
].plane
=
1176 min(wm_state
->sr
[level
].plane
,
1177 wm_state
->wm
[level
].sprite
[sprite
]);
1182 /* clear any (partially) filled invalid levels */
1183 for (level
= wm_state
->num_levels
; level
< dev_priv
->wm
.max_level
+ 1; level
++) {
1184 memset(&wm_state
->wm
[level
], 0, sizeof(wm_state
->wm
[level
]));
1185 memset(&wm_state
->sr
[level
], 0, sizeof(wm_state
->sr
[level
]));
1188 vlv_invert_wms(crtc
);
1191 #define VLV_FIFO(plane, value) \
1192 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1194 static void vlv_pipe_set_fifo_size(struct intel_crtc
*crtc
)
1196 struct drm_device
*dev
= crtc
->base
.dev
;
1197 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1198 struct intel_plane
*plane
;
1199 int sprite0_start
= 0, sprite1_start
= 0, fifo_size
= 0;
1201 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1202 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
1203 WARN_ON(plane
->wm
.fifo_size
!= 63);
1207 if (plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
1208 sprite0_start
= plane
->wm
.fifo_size
;
1209 else if (plane
->plane
== 0)
1210 sprite1_start
= sprite0_start
+ plane
->wm
.fifo_size
;
1212 fifo_size
= sprite1_start
+ plane
->wm
.fifo_size
;
1215 WARN_ON(fifo_size
!= 512 - 1);
1217 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1218 pipe_name(crtc
->pipe
), sprite0_start
,
1219 sprite1_start
, fifo_size
);
1221 switch (crtc
->pipe
) {
1222 uint32_t dsparb
, dsparb2
, dsparb3
;
1224 dsparb
= I915_READ(DSPARB
);
1225 dsparb2
= I915_READ(DSPARB2
);
1227 dsparb
&= ~(VLV_FIFO(SPRITEA
, 0xff) |
1228 VLV_FIFO(SPRITEB
, 0xff));
1229 dsparb
|= (VLV_FIFO(SPRITEA
, sprite0_start
) |
1230 VLV_FIFO(SPRITEB
, sprite1_start
));
1232 dsparb2
&= ~(VLV_FIFO(SPRITEA_HI
, 0x1) |
1233 VLV_FIFO(SPRITEB_HI
, 0x1));
1234 dsparb2
|= (VLV_FIFO(SPRITEA_HI
, sprite0_start
>> 8) |
1235 VLV_FIFO(SPRITEB_HI
, sprite1_start
>> 8));
1237 I915_WRITE(DSPARB
, dsparb
);
1238 I915_WRITE(DSPARB2
, dsparb2
);
1241 dsparb
= I915_READ(DSPARB
);
1242 dsparb2
= I915_READ(DSPARB2
);
1244 dsparb
&= ~(VLV_FIFO(SPRITEC
, 0xff) |
1245 VLV_FIFO(SPRITED
, 0xff));
1246 dsparb
|= (VLV_FIFO(SPRITEC
, sprite0_start
) |
1247 VLV_FIFO(SPRITED
, sprite1_start
));
1249 dsparb2
&= ~(VLV_FIFO(SPRITEC_HI
, 0xff) |
1250 VLV_FIFO(SPRITED_HI
, 0xff));
1251 dsparb2
|= (VLV_FIFO(SPRITEC_HI
, sprite0_start
>> 8) |
1252 VLV_FIFO(SPRITED_HI
, sprite1_start
>> 8));
1254 I915_WRITE(DSPARB
, dsparb
);
1255 I915_WRITE(DSPARB2
, dsparb2
);
1258 dsparb3
= I915_READ(DSPARB3
);
1259 dsparb2
= I915_READ(DSPARB2
);
1261 dsparb3
&= ~(VLV_FIFO(SPRITEE
, 0xff) |
1262 VLV_FIFO(SPRITEF
, 0xff));
1263 dsparb3
|= (VLV_FIFO(SPRITEE
, sprite0_start
) |
1264 VLV_FIFO(SPRITEF
, sprite1_start
));
1266 dsparb2
&= ~(VLV_FIFO(SPRITEE_HI
, 0xff) |
1267 VLV_FIFO(SPRITEF_HI
, 0xff));
1268 dsparb2
|= (VLV_FIFO(SPRITEE_HI
, sprite0_start
>> 8) |
1269 VLV_FIFO(SPRITEF_HI
, sprite1_start
>> 8));
1271 I915_WRITE(DSPARB3
, dsparb3
);
1272 I915_WRITE(DSPARB2
, dsparb2
);
1281 static void vlv_merge_wm(struct drm_device
*dev
,
1282 struct vlv_wm_values
*wm
)
1284 struct intel_crtc
*crtc
;
1285 int num_active_crtcs
= 0;
1287 wm
->level
= to_i915(dev
)->wm
.max_level
;
1290 for_each_intel_crtc(dev
, crtc
) {
1291 const struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1296 if (!wm_state
->cxsr
)
1300 wm
->level
= min_t(int, wm
->level
, wm_state
->num_levels
- 1);
1303 if (num_active_crtcs
!= 1)
1306 if (num_active_crtcs
> 1)
1307 wm
->level
= VLV_WM_LEVEL_PM2
;
1309 for_each_intel_crtc(dev
, crtc
) {
1310 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1311 enum pipe pipe
= crtc
->pipe
;
1316 wm
->pipe
[pipe
] = wm_state
->wm
[wm
->level
];
1318 wm
->sr
= wm_state
->sr
[wm
->level
];
1320 wm
->ddl
[pipe
].primary
= DDL_PRECISION_HIGH
| 2;
1321 wm
->ddl
[pipe
].sprite
[0] = DDL_PRECISION_HIGH
| 2;
1322 wm
->ddl
[pipe
].sprite
[1] = DDL_PRECISION_HIGH
| 2;
1323 wm
->ddl
[pipe
].cursor
= DDL_PRECISION_HIGH
| 2;
1327 static void vlv_update_wm(struct intel_crtc
*crtc
)
1329 struct drm_device
*dev
= crtc
->base
.dev
;
1330 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1331 enum pipe pipe
= crtc
->pipe
;
1332 struct vlv_wm_values wm
= {};
1334 vlv_compute_wm(crtc
);
1335 vlv_merge_wm(dev
, &wm
);
1337 if (memcmp(&dev_priv
->wm
.vlv
, &wm
, sizeof(wm
)) == 0) {
1338 /* FIXME should be part of crtc atomic commit */
1339 vlv_pipe_set_fifo_size(crtc
);
1343 if (wm
.level
< VLV_WM_LEVEL_DDR_DVFS
&&
1344 dev_priv
->wm
.vlv
.level
>= VLV_WM_LEVEL_DDR_DVFS
)
1345 chv_set_memory_dvfs(dev_priv
, false);
1347 if (wm
.level
< VLV_WM_LEVEL_PM5
&&
1348 dev_priv
->wm
.vlv
.level
>= VLV_WM_LEVEL_PM5
)
1349 chv_set_memory_pm5(dev_priv
, false);
1351 if (!wm
.cxsr
&& dev_priv
->wm
.vlv
.cxsr
)
1352 intel_set_memory_cxsr(dev_priv
, false);
1354 /* FIXME should be part of crtc atomic commit */
1355 vlv_pipe_set_fifo_size(crtc
);
1357 vlv_write_wm_values(crtc
, &wm
);
1359 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1360 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1361 pipe_name(pipe
), wm
.pipe
[pipe
].primary
, wm
.pipe
[pipe
].cursor
,
1362 wm
.pipe
[pipe
].sprite
[0], wm
.pipe
[pipe
].sprite
[1],
1363 wm
.sr
.plane
, wm
.sr
.cursor
, wm
.level
, wm
.cxsr
);
1365 if (wm
.cxsr
&& !dev_priv
->wm
.vlv
.cxsr
)
1366 intel_set_memory_cxsr(dev_priv
, true);
1368 if (wm
.level
>= VLV_WM_LEVEL_PM5
&&
1369 dev_priv
->wm
.vlv
.level
< VLV_WM_LEVEL_PM5
)
1370 chv_set_memory_pm5(dev_priv
, true);
1372 if (wm
.level
>= VLV_WM_LEVEL_DDR_DVFS
&&
1373 dev_priv
->wm
.vlv
.level
< VLV_WM_LEVEL_DDR_DVFS
)
1374 chv_set_memory_dvfs(dev_priv
, true);
1376 dev_priv
->wm
.vlv
= wm
;
1379 #define single_plane_enabled(mask) is_power_of_2(mask)
1381 static void g4x_update_wm(struct intel_crtc
*crtc
)
1383 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1384 static const int sr_latency_ns
= 12000;
1385 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1386 int plane_sr
, cursor_sr
;
1387 unsigned int enabled
= 0;
1390 if (g4x_compute_wm0(dev_priv
, PIPE_A
,
1391 &g4x_wm_info
, pessimal_latency_ns
,
1392 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1393 &planea_wm
, &cursora_wm
))
1394 enabled
|= 1 << PIPE_A
;
1396 if (g4x_compute_wm0(dev_priv
, PIPE_B
,
1397 &g4x_wm_info
, pessimal_latency_ns
,
1398 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1399 &planeb_wm
, &cursorb_wm
))
1400 enabled
|= 1 << PIPE_B
;
1402 if (single_plane_enabled(enabled
) &&
1403 g4x_compute_srwm(dev_priv
, ffs(enabled
) - 1,
1406 &g4x_cursor_wm_info
,
1407 &plane_sr
, &cursor_sr
)) {
1408 cxsr_enabled
= true;
1410 cxsr_enabled
= false;
1411 intel_set_memory_cxsr(dev_priv
, false);
1412 plane_sr
= cursor_sr
= 0;
1415 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1416 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1417 planea_wm
, cursora_wm
,
1418 planeb_wm
, cursorb_wm
,
1419 plane_sr
, cursor_sr
);
1422 FW_WM(plane_sr
, SR
) |
1423 FW_WM(cursorb_wm
, CURSORB
) |
1424 FW_WM(planeb_wm
, PLANEB
) |
1425 FW_WM(planea_wm
, PLANEA
));
1427 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1428 FW_WM(cursora_wm
, CURSORA
));
1429 /* HPLL off in SR has some issues on G4x... disable it */
1431 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1432 FW_WM(cursor_sr
, CURSOR_SR
));
1435 intel_set_memory_cxsr(dev_priv
, true);
1438 static void i965_update_wm(struct intel_crtc
*unused_crtc
)
1440 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
1441 struct intel_crtc
*crtc
;
1446 /* Calc sr entries for one plane configs */
1447 crtc
= single_enabled_crtc(dev_priv
);
1449 /* self-refresh has much higher latency */
1450 static const int sr_latency_ns
= 12000;
1451 const struct drm_display_mode
*adjusted_mode
=
1452 &crtc
->config
->base
.adjusted_mode
;
1453 const struct drm_framebuffer
*fb
=
1454 crtc
->base
.primary
->state
->fb
;
1455 int clock
= adjusted_mode
->crtc_clock
;
1456 int htotal
= adjusted_mode
->crtc_htotal
;
1457 int hdisplay
= crtc
->config
->pipe_src_w
;
1458 int cpp
= fb
->format
->cpp
[0];
1459 unsigned long line_time_us
;
1462 line_time_us
= max(htotal
* 1000 / clock
, 1);
1464 /* Use ns/us then divide to preserve precision */
1465 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1467 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1468 srwm
= I965_FIFO_SIZE
- entries
;
1472 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1475 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1476 cpp
* crtc
->base
.cursor
->state
->crtc_w
;
1477 entries
= DIV_ROUND_UP(entries
,
1478 i965_cursor_wm_info
.cacheline_size
);
1479 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1480 (entries
+ i965_cursor_wm_info
.guard_size
);
1482 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1483 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1485 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1486 "cursor %d\n", srwm
, cursor_sr
);
1488 cxsr_enabled
= true;
1490 cxsr_enabled
= false;
1491 /* Turn off self refresh if both pipes are enabled */
1492 intel_set_memory_cxsr(dev_priv
, false);
1495 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1498 /* 965 has limitations... */
1499 I915_WRITE(DSPFW1
, FW_WM(srwm
, SR
) |
1503 I915_WRITE(DSPFW2
, FW_WM(8, CURSORA
) |
1504 FW_WM(8, PLANEC_OLD
));
1505 /* update cursor SR watermark */
1506 I915_WRITE(DSPFW3
, FW_WM(cursor_sr
, CURSOR_SR
));
1509 intel_set_memory_cxsr(dev_priv
, true);
1514 static void i9xx_update_wm(struct intel_crtc
*unused_crtc
)
1516 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
1517 const struct intel_watermark_params
*wm_info
;
1522 int planea_wm
, planeb_wm
;
1523 struct intel_crtc
*crtc
, *enabled
= NULL
;
1525 if (IS_I945GM(dev_priv
))
1526 wm_info
= &i945_wm_info
;
1527 else if (!IS_GEN2(dev_priv
))
1528 wm_info
= &i915_wm_info
;
1530 wm_info
= &i830_a_wm_info
;
1532 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, 0);
1533 crtc
= intel_get_crtc_for_plane(dev_priv
, 0);
1534 if (intel_crtc_active(crtc
)) {
1535 const struct drm_display_mode
*adjusted_mode
=
1536 &crtc
->config
->base
.adjusted_mode
;
1537 const struct drm_framebuffer
*fb
=
1538 crtc
->base
.primary
->state
->fb
;
1541 if (IS_GEN2(dev_priv
))
1544 cpp
= fb
->format
->cpp
[0];
1546 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1547 wm_info
, fifo_size
, cpp
,
1548 pessimal_latency_ns
);
1551 planea_wm
= fifo_size
- wm_info
->guard_size
;
1552 if (planea_wm
> (long)wm_info
->max_wm
)
1553 planea_wm
= wm_info
->max_wm
;
1556 if (IS_GEN2(dev_priv
))
1557 wm_info
= &i830_bc_wm_info
;
1559 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, 1);
1560 crtc
= intel_get_crtc_for_plane(dev_priv
, 1);
1561 if (intel_crtc_active(crtc
)) {
1562 const struct drm_display_mode
*adjusted_mode
=
1563 &crtc
->config
->base
.adjusted_mode
;
1564 const struct drm_framebuffer
*fb
=
1565 crtc
->base
.primary
->state
->fb
;
1568 if (IS_GEN2(dev_priv
))
1571 cpp
= fb
->format
->cpp
[0];
1573 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1574 wm_info
, fifo_size
, cpp
,
1575 pessimal_latency_ns
);
1576 if (enabled
== NULL
)
1581 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1582 if (planeb_wm
> (long)wm_info
->max_wm
)
1583 planeb_wm
= wm_info
->max_wm
;
1586 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1588 if (IS_I915GM(dev_priv
) && enabled
) {
1589 struct drm_i915_gem_object
*obj
;
1591 obj
= intel_fb_obj(enabled
->base
.primary
->state
->fb
);
1593 /* self-refresh seems busted with untiled */
1594 if (!i915_gem_object_is_tiled(obj
))
1599 * Overlay gets an aggressive default since video jitter is bad.
1603 /* Play safe and disable self-refresh before adjusting watermarks. */
1604 intel_set_memory_cxsr(dev_priv
, false);
1606 /* Calc sr entries for one plane configs */
1607 if (HAS_FW_BLC(dev_priv
) && enabled
) {
1608 /* self-refresh has much higher latency */
1609 static const int sr_latency_ns
= 6000;
1610 const struct drm_display_mode
*adjusted_mode
=
1611 &enabled
->config
->base
.adjusted_mode
;
1612 const struct drm_framebuffer
*fb
=
1613 enabled
->base
.primary
->state
->fb
;
1614 int clock
= adjusted_mode
->crtc_clock
;
1615 int htotal
= adjusted_mode
->crtc_htotal
;
1616 int hdisplay
= enabled
->config
->pipe_src_w
;
1618 unsigned long line_time_us
;
1621 if (IS_I915GM(dev_priv
) || IS_I945GM(dev_priv
))
1624 cpp
= fb
->format
->cpp
[0];
1626 line_time_us
= max(htotal
* 1000 / clock
, 1);
1628 /* Use ns/us then divide to preserve precision */
1629 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1631 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1632 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1633 srwm
= wm_info
->fifo_size
- entries
;
1637 if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
1638 I915_WRITE(FW_BLC_SELF
,
1639 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1641 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1644 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1645 planea_wm
, planeb_wm
, cwm
, srwm
);
1647 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1648 fwater_hi
= (cwm
& 0x1f);
1650 /* Set request length to 8 cachelines per fetch */
1651 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1652 fwater_hi
= fwater_hi
| (1 << 8);
1654 I915_WRITE(FW_BLC
, fwater_lo
);
1655 I915_WRITE(FW_BLC2
, fwater_hi
);
1658 intel_set_memory_cxsr(dev_priv
, true);
1661 static void i845_update_wm(struct intel_crtc
*unused_crtc
)
1663 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
1664 struct intel_crtc
*crtc
;
1665 const struct drm_display_mode
*adjusted_mode
;
1669 crtc
= single_enabled_crtc(dev_priv
);
1673 adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
1674 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1676 dev_priv
->display
.get_fifo_size(dev_priv
, 0),
1677 4, pessimal_latency_ns
);
1678 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1679 fwater_lo
|= (3<<8) | planea_wm
;
1681 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1683 I915_WRITE(FW_BLC
, fwater_lo
);
1686 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state
*pipe_config
)
1688 uint32_t pixel_rate
;
1690 pixel_rate
= pipe_config
->base
.adjusted_mode
.crtc_clock
;
1692 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1693 * adjust the pixel_rate here. */
1695 if (pipe_config
->pch_pfit
.enabled
) {
1696 uint64_t pipe_w
, pipe_h
, pfit_w
, pfit_h
;
1697 uint32_t pfit_size
= pipe_config
->pch_pfit
.size
;
1699 pipe_w
= pipe_config
->pipe_src_w
;
1700 pipe_h
= pipe_config
->pipe_src_h
;
1702 pfit_w
= (pfit_size
>> 16) & 0xFFFF;
1703 pfit_h
= pfit_size
& 0xFFFF;
1704 if (pipe_w
< pfit_w
)
1706 if (pipe_h
< pfit_h
)
1709 if (WARN_ON(!pfit_w
|| !pfit_h
))
1712 pixel_rate
= div_u64((uint64_t) pixel_rate
* pipe_w
* pipe_h
,
1719 /* latency must be in 0.1us units. */
1720 static uint32_t ilk_wm_method1(uint32_t pixel_rate
, uint8_t cpp
, uint32_t latency
)
1724 if (WARN(latency
== 0, "Latency value missing\n"))
1727 ret
= (uint64_t) pixel_rate
* cpp
* latency
;
1728 ret
= DIV_ROUND_UP_ULL(ret
, 64 * 10000) + 2;
1733 /* latency must be in 0.1us units. */
1734 static uint32_t ilk_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
1735 uint32_t horiz_pixels
, uint8_t cpp
,
1740 if (WARN(latency
== 0, "Latency value missing\n"))
1742 if (WARN_ON(!pipe_htotal
))
1745 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
1746 ret
= (ret
+ 1) * horiz_pixels
* cpp
;
1747 ret
= DIV_ROUND_UP(ret
, 64) + 2;
1751 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
1755 * Neither of these should be possible since this function shouldn't be
1756 * called if the CRTC is off or the plane is invisible. But let's be
1757 * extra paranoid to avoid a potential divide-by-zero if we screw up
1758 * elsewhere in the driver.
1762 if (WARN_ON(!horiz_pixels
))
1765 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* cpp
) + 2;
1768 struct ilk_wm_maximums
{
1776 * For both WM_PIPE and WM_LP.
1777 * mem_value must be in 0.1us units.
1779 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state
*cstate
,
1780 const struct intel_plane_state
*pstate
,
1784 uint32_t method1
, method2
;
1787 if (!cstate
->base
.active
|| !pstate
->base
.visible
)
1790 cpp
= pstate
->base
.fb
->format
->cpp
[0];
1792 method1
= ilk_wm_method1(ilk_pipe_pixel_rate(cstate
), cpp
, mem_value
);
1797 method2
= ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1798 cstate
->base
.adjusted_mode
.crtc_htotal
,
1799 drm_rect_width(&pstate
->base
.dst
),
1802 return min(method1
, method2
);
1806 * For both WM_PIPE and WM_LP.
1807 * mem_value must be in 0.1us units.
1809 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state
*cstate
,
1810 const struct intel_plane_state
*pstate
,
1813 uint32_t method1
, method2
;
1816 if (!cstate
->base
.active
|| !pstate
->base
.visible
)
1819 cpp
= pstate
->base
.fb
->format
->cpp
[0];
1821 method1
= ilk_wm_method1(ilk_pipe_pixel_rate(cstate
), cpp
, mem_value
);
1822 method2
= ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1823 cstate
->base
.adjusted_mode
.crtc_htotal
,
1824 drm_rect_width(&pstate
->base
.dst
),
1826 return min(method1
, method2
);
1830 * For both WM_PIPE and WM_LP.
1831 * mem_value must be in 0.1us units.
1833 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state
*cstate
,
1834 const struct intel_plane_state
*pstate
,
1838 * We treat the cursor plane as always-on for the purposes of watermark
1839 * calculation. Until we have two-stage watermark programming merged,
1840 * this is necessary to avoid flickering.
1843 int width
= pstate
->base
.visible
? pstate
->base
.crtc_w
: 64;
1845 if (!cstate
->base
.active
)
1848 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1849 cstate
->base
.adjusted_mode
.crtc_htotal
,
1850 width
, cpp
, mem_value
);
1853 /* Only for WM_LP. */
1854 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state
*cstate
,
1855 const struct intel_plane_state
*pstate
,
1860 if (!cstate
->base
.active
|| !pstate
->base
.visible
)
1863 cpp
= pstate
->base
.fb
->format
->cpp
[0];
1865 return ilk_wm_fbc(pri_val
, drm_rect_width(&pstate
->base
.dst
), cpp
);
1869 ilk_display_fifo_size(const struct drm_i915_private
*dev_priv
)
1871 if (INTEL_GEN(dev_priv
) >= 8)
1873 else if (INTEL_GEN(dev_priv
) >= 7)
1880 ilk_plane_wm_reg_max(const struct drm_i915_private
*dev_priv
,
1881 int level
, bool is_sprite
)
1883 if (INTEL_GEN(dev_priv
) >= 8)
1884 /* BDW primary/sprite plane watermarks */
1885 return level
== 0 ? 255 : 2047;
1886 else if (INTEL_GEN(dev_priv
) >= 7)
1887 /* IVB/HSW primary/sprite plane watermarks */
1888 return level
== 0 ? 127 : 1023;
1889 else if (!is_sprite
)
1890 /* ILK/SNB primary plane watermarks */
1891 return level
== 0 ? 127 : 511;
1893 /* ILK/SNB sprite plane watermarks */
1894 return level
== 0 ? 63 : 255;
1898 ilk_cursor_wm_reg_max(const struct drm_i915_private
*dev_priv
, int level
)
1900 if (INTEL_GEN(dev_priv
) >= 7)
1901 return level
== 0 ? 63 : 255;
1903 return level
== 0 ? 31 : 63;
1906 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private
*dev_priv
)
1908 if (INTEL_GEN(dev_priv
) >= 8)
1914 /* Calculate the maximum primary/sprite plane watermark */
1915 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
1917 const struct intel_wm_config
*config
,
1918 enum intel_ddb_partitioning ddb_partitioning
,
1921 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1922 unsigned int fifo_size
= ilk_display_fifo_size(dev_priv
);
1924 /* if sprites aren't enabled, sprites get nothing */
1925 if (is_sprite
&& !config
->sprites_enabled
)
1928 /* HSW allows LP1+ watermarks even with multiple pipes */
1929 if (level
== 0 || config
->num_pipes_active
> 1) {
1930 fifo_size
/= INTEL_INFO(dev_priv
)->num_pipes
;
1933 * For some reason the non self refresh
1934 * FIFO size is only half of the self
1935 * refresh FIFO size on ILK/SNB.
1937 if (INTEL_GEN(dev_priv
) <= 6)
1941 if (config
->sprites_enabled
) {
1942 /* level 0 is always calculated with 1:1 split */
1943 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
1952 /* clamp to max that the registers can hold */
1953 return min(fifo_size
, ilk_plane_wm_reg_max(dev_priv
, level
, is_sprite
));
1956 /* Calculate the maximum cursor plane watermark */
1957 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
1959 const struct intel_wm_config
*config
)
1961 /* HSW LP1+ watermarks w/ multiple pipes */
1962 if (level
> 0 && config
->num_pipes_active
> 1)
1965 /* otherwise just report max that registers can hold */
1966 return ilk_cursor_wm_reg_max(to_i915(dev
), level
);
1969 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
1971 const struct intel_wm_config
*config
,
1972 enum intel_ddb_partitioning ddb_partitioning
,
1973 struct ilk_wm_maximums
*max
)
1975 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
1976 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
1977 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
1978 max
->fbc
= ilk_fbc_wm_reg_max(to_i915(dev
));
1981 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private
*dev_priv
,
1983 struct ilk_wm_maximums
*max
)
1985 max
->pri
= ilk_plane_wm_reg_max(dev_priv
, level
, false);
1986 max
->spr
= ilk_plane_wm_reg_max(dev_priv
, level
, true);
1987 max
->cur
= ilk_cursor_wm_reg_max(dev_priv
, level
);
1988 max
->fbc
= ilk_fbc_wm_reg_max(dev_priv
);
1991 static bool ilk_validate_wm_level(int level
,
1992 const struct ilk_wm_maximums
*max
,
1993 struct intel_wm_level
*result
)
1997 /* already determined to be invalid? */
1998 if (!result
->enable
)
2001 result
->enable
= result
->pri_val
<= max
->pri
&&
2002 result
->spr_val
<= max
->spr
&&
2003 result
->cur_val
<= max
->cur
;
2005 ret
= result
->enable
;
2008 * HACK until we can pre-compute everything,
2009 * and thus fail gracefully if LP0 watermarks
2012 if (level
== 0 && !result
->enable
) {
2013 if (result
->pri_val
> max
->pri
)
2014 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2015 level
, result
->pri_val
, max
->pri
);
2016 if (result
->spr_val
> max
->spr
)
2017 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2018 level
, result
->spr_val
, max
->spr
);
2019 if (result
->cur_val
> max
->cur
)
2020 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2021 level
, result
->cur_val
, max
->cur
);
2023 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
2024 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
2025 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
2026 result
->enable
= true;
2032 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2033 const struct intel_crtc
*intel_crtc
,
2035 struct intel_crtc_state
*cstate
,
2036 struct intel_plane_state
*pristate
,
2037 struct intel_plane_state
*sprstate
,
2038 struct intel_plane_state
*curstate
,
2039 struct intel_wm_level
*result
)
2041 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2042 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2043 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2045 /* WM1+ latency values stored in 0.5us units */
2053 result
->pri_val
= ilk_compute_pri_wm(cstate
, pristate
,
2054 pri_latency
, level
);
2055 result
->fbc_val
= ilk_compute_fbc_wm(cstate
, pristate
, result
->pri_val
);
2059 result
->spr_val
= ilk_compute_spr_wm(cstate
, sprstate
, spr_latency
);
2062 result
->cur_val
= ilk_compute_cur_wm(cstate
, curstate
, cur_latency
);
2064 result
->enable
= true;
2068 hsw_compute_linetime_wm(const struct intel_crtc_state
*cstate
)
2070 const struct intel_atomic_state
*intel_state
=
2071 to_intel_atomic_state(cstate
->base
.state
);
2072 const struct drm_display_mode
*adjusted_mode
=
2073 &cstate
->base
.adjusted_mode
;
2074 u32 linetime
, ips_linetime
;
2076 if (!cstate
->base
.active
)
2078 if (WARN_ON(adjusted_mode
->crtc_clock
== 0))
2080 if (WARN_ON(intel_state
->cdclk
== 0))
2083 /* The WM are computed with base on how long it takes to fill a single
2084 * row at the given clock rate, multiplied by 8.
2086 linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2087 adjusted_mode
->crtc_clock
);
2088 ips_linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2089 intel_state
->cdclk
);
2091 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2092 PIPE_WM_LINETIME_TIME(linetime
);
2095 static void intel_read_wm_latency(struct drm_i915_private
*dev_priv
,
2098 if (IS_GEN9(dev_priv
)) {
2101 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2103 /* read the first set of memory latencies[0:3] */
2104 val
= 0; /* data0 to be programmed to 0 for first set */
2105 mutex_lock(&dev_priv
->rps
.hw_lock
);
2106 ret
= sandybridge_pcode_read(dev_priv
,
2107 GEN9_PCODE_READ_MEM_LATENCY
,
2109 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2112 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2116 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2117 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2118 GEN9_MEM_LATENCY_LEVEL_MASK
;
2119 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2120 GEN9_MEM_LATENCY_LEVEL_MASK
;
2121 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2122 GEN9_MEM_LATENCY_LEVEL_MASK
;
2124 /* read the second set of memory latencies[4:7] */
2125 val
= 1; /* data0 to be programmed to 1 for second set */
2126 mutex_lock(&dev_priv
->rps
.hw_lock
);
2127 ret
= sandybridge_pcode_read(dev_priv
,
2128 GEN9_PCODE_READ_MEM_LATENCY
,
2130 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2132 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2136 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2137 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2138 GEN9_MEM_LATENCY_LEVEL_MASK
;
2139 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2140 GEN9_MEM_LATENCY_LEVEL_MASK
;
2141 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2142 GEN9_MEM_LATENCY_LEVEL_MASK
;
2145 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2146 * need to be disabled. We make sure to sanitize the values out
2147 * of the punit to satisfy this requirement.
2149 for (level
= 1; level
<= max_level
; level
++) {
2150 if (wm
[level
] == 0) {
2151 for (i
= level
+ 1; i
<= max_level
; i
++)
2158 * WaWmMemoryReadLatency:skl
2160 * punit doesn't take into account the read latency so we need
2161 * to add 2us to the various latency levels we retrieve from the
2162 * punit when level 0 response data us 0us.
2166 for (level
= 1; level
<= max_level
; level
++) {
2173 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2174 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
2176 wm
[0] = (sskpd
>> 56) & 0xFF;
2178 wm
[0] = sskpd
& 0xF;
2179 wm
[1] = (sskpd
>> 4) & 0xFF;
2180 wm
[2] = (sskpd
>> 12) & 0xFF;
2181 wm
[3] = (sskpd
>> 20) & 0x1FF;
2182 wm
[4] = (sskpd
>> 32) & 0x1FF;
2183 } else if (INTEL_GEN(dev_priv
) >= 6) {
2184 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
2186 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2187 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2188 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2189 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2190 } else if (INTEL_GEN(dev_priv
) >= 5) {
2191 uint32_t mltr
= I915_READ(MLTR_ILK
);
2193 /* ILK primary LP0 latency is 700 ns */
2195 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2196 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2200 static void intel_fixup_spr_wm_latency(struct drm_i915_private
*dev_priv
,
2203 /* ILK sprite LP0 latency is 1300 ns */
2204 if (IS_GEN5(dev_priv
))
2208 static void intel_fixup_cur_wm_latency(struct drm_i915_private
*dev_priv
,
2211 /* ILK cursor LP0 latency is 1300 ns */
2212 if (IS_GEN5(dev_priv
))
2215 /* WaDoubleCursorLP3Latency:ivb */
2216 if (IS_IVYBRIDGE(dev_priv
))
2220 int ilk_wm_max_level(const struct drm_i915_private
*dev_priv
)
2222 /* how many WM levels are we expecting */
2223 if (INTEL_GEN(dev_priv
) >= 9)
2225 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2227 else if (INTEL_GEN(dev_priv
) >= 6)
2233 static void intel_print_wm_latency(struct drm_i915_private
*dev_priv
,
2235 const uint16_t wm
[8])
2237 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2239 for (level
= 0; level
<= max_level
; level
++) {
2240 unsigned int latency
= wm
[level
];
2243 DRM_ERROR("%s WM%d latency not provided\n",
2249 * - latencies are in us on gen9.
2250 * - before then, WM1+ latency values are in 0.5us units
2252 if (IS_GEN9(dev_priv
))
2257 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2258 name
, level
, wm
[level
],
2259 latency
/ 10, latency
% 10);
2263 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
2264 uint16_t wm
[5], uint16_t min
)
2266 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2271 wm
[0] = max(wm
[0], min
);
2272 for (level
= 1; level
<= max_level
; level
++)
2273 wm
[level
] = max_t(uint16_t, wm
[level
], DIV_ROUND_UP(min
, 5));
2278 static void snb_wm_latency_quirk(struct drm_i915_private
*dev_priv
)
2283 * The BIOS provided WM memory latency values are often
2284 * inadequate for high resolution displays. Adjust them.
2286 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
2287 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
2288 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
2293 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2294 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
2295 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
2296 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
2299 static void ilk_setup_wm_latency(struct drm_i915_private
*dev_priv
)
2301 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
);
2303 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
2304 sizeof(dev_priv
->wm
.pri_latency
));
2305 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
2306 sizeof(dev_priv
->wm
.pri_latency
));
2308 intel_fixup_spr_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
);
2309 intel_fixup_cur_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
);
2311 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
2312 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
2313 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
2315 if (IS_GEN6(dev_priv
))
2316 snb_wm_latency_quirk(dev_priv
);
2319 static void skl_setup_wm_latency(struct drm_i915_private
*dev_priv
)
2321 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.skl_latency
);
2322 intel_print_wm_latency(dev_priv
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
2325 static bool ilk_validate_pipe_wm(struct drm_device
*dev
,
2326 struct intel_pipe_wm
*pipe_wm
)
2328 /* LP0 watermark maximums depend on this pipe alone */
2329 const struct intel_wm_config config
= {
2330 .num_pipes_active
= 1,
2331 .sprites_enabled
= pipe_wm
->sprites_enabled
,
2332 .sprites_scaled
= pipe_wm
->sprites_scaled
,
2334 struct ilk_wm_maximums max
;
2336 /* LP0 watermarks always use 1/2 DDB partitioning */
2337 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
2339 /* At least LP0 must be valid */
2340 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0])) {
2341 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2348 /* Compute new watermarks for the pipe */
2349 static int ilk_compute_pipe_wm(struct intel_crtc_state
*cstate
)
2351 struct drm_atomic_state
*state
= cstate
->base
.state
;
2352 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
2353 struct intel_pipe_wm
*pipe_wm
;
2354 struct drm_device
*dev
= state
->dev
;
2355 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
2356 struct intel_plane
*intel_plane
;
2357 struct intel_plane_state
*pristate
= NULL
;
2358 struct intel_plane_state
*sprstate
= NULL
;
2359 struct intel_plane_state
*curstate
= NULL
;
2360 int level
, max_level
= ilk_wm_max_level(dev_priv
), usable_level
;
2361 struct ilk_wm_maximums max
;
2363 pipe_wm
= &cstate
->wm
.ilk
.optimal
;
2365 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
2366 struct intel_plane_state
*ps
;
2368 ps
= intel_atomic_get_existing_plane_state(state
,
2373 if (intel_plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
2375 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_OVERLAY
)
2377 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
2381 pipe_wm
->pipe_enabled
= cstate
->base
.active
;
2383 pipe_wm
->sprites_enabled
= sprstate
->base
.visible
;
2384 pipe_wm
->sprites_scaled
= sprstate
->base
.visible
&&
2385 (drm_rect_width(&sprstate
->base
.dst
) != drm_rect_width(&sprstate
->base
.src
) >> 16 ||
2386 drm_rect_height(&sprstate
->base
.dst
) != drm_rect_height(&sprstate
->base
.src
) >> 16);
2389 usable_level
= max_level
;
2391 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2392 if (INTEL_GEN(dev_priv
) <= 6 && pipe_wm
->sprites_enabled
)
2395 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2396 if (pipe_wm
->sprites_scaled
)
2399 ilk_compute_wm_level(dev_priv
, intel_crtc
, 0, cstate
,
2400 pristate
, sprstate
, curstate
, &pipe_wm
->raw_wm
[0]);
2402 memset(&pipe_wm
->wm
, 0, sizeof(pipe_wm
->wm
));
2403 pipe_wm
->wm
[0] = pipe_wm
->raw_wm
[0];
2405 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2406 pipe_wm
->linetime
= hsw_compute_linetime_wm(cstate
);
2408 if (!ilk_validate_pipe_wm(dev
, pipe_wm
))
2411 ilk_compute_wm_reg_maximums(dev_priv
, 1, &max
);
2413 for (level
= 1; level
<= max_level
; level
++) {
2414 struct intel_wm_level
*wm
= &pipe_wm
->raw_wm
[level
];
2416 ilk_compute_wm_level(dev_priv
, intel_crtc
, level
, cstate
,
2417 pristate
, sprstate
, curstate
, wm
);
2420 * Disable any watermark level that exceeds the
2421 * register maximums since such watermarks are
2424 if (level
> usable_level
)
2427 if (ilk_validate_wm_level(level
, &max
, wm
))
2428 pipe_wm
->wm
[level
] = *wm
;
2430 usable_level
= level
;
2437 * Build a set of 'intermediate' watermark values that satisfy both the old
2438 * state and the new state. These can be programmed to the hardware
2441 static int ilk_compute_intermediate_wm(struct drm_device
*dev
,
2442 struct intel_crtc
*intel_crtc
,
2443 struct intel_crtc_state
*newstate
)
2445 struct intel_pipe_wm
*a
= &newstate
->wm
.ilk
.intermediate
;
2446 struct intel_pipe_wm
*b
= &intel_crtc
->wm
.active
.ilk
;
2447 int level
, max_level
= ilk_wm_max_level(to_i915(dev
));
2450 * Start with the final, target watermarks, then combine with the
2451 * currently active watermarks to get values that are safe both before
2452 * and after the vblank.
2454 *a
= newstate
->wm
.ilk
.optimal
;
2455 a
->pipe_enabled
|= b
->pipe_enabled
;
2456 a
->sprites_enabled
|= b
->sprites_enabled
;
2457 a
->sprites_scaled
|= b
->sprites_scaled
;
2459 for (level
= 0; level
<= max_level
; level
++) {
2460 struct intel_wm_level
*a_wm
= &a
->wm
[level
];
2461 const struct intel_wm_level
*b_wm
= &b
->wm
[level
];
2463 a_wm
->enable
&= b_wm
->enable
;
2464 a_wm
->pri_val
= max(a_wm
->pri_val
, b_wm
->pri_val
);
2465 a_wm
->spr_val
= max(a_wm
->spr_val
, b_wm
->spr_val
);
2466 a_wm
->cur_val
= max(a_wm
->cur_val
, b_wm
->cur_val
);
2467 a_wm
->fbc_val
= max(a_wm
->fbc_val
, b_wm
->fbc_val
);
2471 * We need to make sure that these merged watermark values are
2472 * actually a valid configuration themselves. If they're not,
2473 * there's no safe way to transition from the old state to
2474 * the new state, so we need to fail the atomic transaction.
2476 if (!ilk_validate_pipe_wm(dev
, a
))
2480 * If our intermediate WM are identical to the final WM, then we can
2481 * omit the post-vblank programming; only update if it's different.
2483 if (memcmp(a
, &newstate
->wm
.ilk
.optimal
, sizeof(*a
)) == 0)
2484 newstate
->wm
.need_postvbl_update
= false;
2490 * Merge the watermarks from all active pipes for a specific level.
2492 static void ilk_merge_wm_level(struct drm_device
*dev
,
2494 struct intel_wm_level
*ret_wm
)
2496 const struct intel_crtc
*intel_crtc
;
2498 ret_wm
->enable
= true;
2500 for_each_intel_crtc(dev
, intel_crtc
) {
2501 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
.ilk
;
2502 const struct intel_wm_level
*wm
= &active
->wm
[level
];
2504 if (!active
->pipe_enabled
)
2508 * The watermark values may have been used in the past,
2509 * so we must maintain them in the registers for some
2510 * time even if the level is now disabled.
2513 ret_wm
->enable
= false;
2515 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
2516 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
2517 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
2518 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
2523 * Merge all low power watermarks for all active pipes.
2525 static void ilk_wm_merge(struct drm_device
*dev
,
2526 const struct intel_wm_config
*config
,
2527 const struct ilk_wm_maximums
*max
,
2528 struct intel_pipe_wm
*merged
)
2530 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2531 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2532 int last_enabled_level
= max_level
;
2534 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2535 if ((INTEL_GEN(dev_priv
) <= 6 || IS_IVYBRIDGE(dev_priv
)) &&
2536 config
->num_pipes_active
> 1)
2537 last_enabled_level
= 0;
2539 /* ILK: FBC WM must be disabled always */
2540 merged
->fbc_wm_enabled
= INTEL_GEN(dev_priv
) >= 6;
2542 /* merge each WM1+ level */
2543 for (level
= 1; level
<= max_level
; level
++) {
2544 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2546 ilk_merge_wm_level(dev
, level
, wm
);
2548 if (level
> last_enabled_level
)
2550 else if (!ilk_validate_wm_level(level
, max
, wm
))
2551 /* make sure all following levels get disabled */
2552 last_enabled_level
= level
- 1;
2555 * The spec says it is preferred to disable
2556 * FBC WMs instead of disabling a WM level.
2558 if (wm
->fbc_val
> max
->fbc
) {
2560 merged
->fbc_wm_enabled
= false;
2565 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2567 * FIXME this is racy. FBC might get enabled later.
2568 * What we should check here is whether FBC can be
2569 * enabled sometime later.
2571 if (IS_GEN5(dev_priv
) && !merged
->fbc_wm_enabled
&&
2572 intel_fbc_is_active(dev_priv
)) {
2573 for (level
= 2; level
<= max_level
; level
++) {
2574 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2581 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
2583 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2584 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
2587 /* The value we need to program into the WM_LPx latency field */
2588 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
2590 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2592 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2595 return dev_priv
->wm
.pri_latency
[level
];
2598 static void ilk_compute_wm_results(struct drm_device
*dev
,
2599 const struct intel_pipe_wm
*merged
,
2600 enum intel_ddb_partitioning partitioning
,
2601 struct ilk_wm_values
*results
)
2603 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2604 struct intel_crtc
*intel_crtc
;
2607 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
2608 results
->partitioning
= partitioning
;
2610 /* LP1+ register values */
2611 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2612 const struct intel_wm_level
*r
;
2614 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
2616 r
= &merged
->wm
[level
];
2619 * Maintain the watermark values even if the level is
2620 * disabled. Doing otherwise could cause underruns.
2622 results
->wm_lp
[wm_lp
- 1] =
2623 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
2624 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
2628 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
2630 if (INTEL_GEN(dev_priv
) >= 8)
2631 results
->wm_lp
[wm_lp
- 1] |=
2632 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
2634 results
->wm_lp
[wm_lp
- 1] |=
2635 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
2638 * Always set WM1S_LP_EN when spr_val != 0, even if the
2639 * level is disabled. Doing otherwise could cause underruns.
2641 if (INTEL_GEN(dev_priv
) <= 6 && r
->spr_val
) {
2642 WARN_ON(wm_lp
!= 1);
2643 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
2645 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
2648 /* LP0 register values */
2649 for_each_intel_crtc(dev
, intel_crtc
) {
2650 enum pipe pipe
= intel_crtc
->pipe
;
2651 const struct intel_wm_level
*r
=
2652 &intel_crtc
->wm
.active
.ilk
.wm
[0];
2654 if (WARN_ON(!r
->enable
))
2657 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.ilk
.linetime
;
2659 results
->wm_pipe
[pipe
] =
2660 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
2661 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
2666 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2667 * case both are at the same level. Prefer r1 in case they're the same. */
2668 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
2669 struct intel_pipe_wm
*r1
,
2670 struct intel_pipe_wm
*r2
)
2672 int level
, max_level
= ilk_wm_max_level(to_i915(dev
));
2673 int level1
= 0, level2
= 0;
2675 for (level
= 1; level
<= max_level
; level
++) {
2676 if (r1
->wm
[level
].enable
)
2678 if (r2
->wm
[level
].enable
)
2682 if (level1
== level2
) {
2683 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
2687 } else if (level1
> level2
) {
2694 /* dirty bits used to track which watermarks need changes */
2695 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2696 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2697 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2698 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2699 #define WM_DIRTY_FBC (1 << 24)
2700 #define WM_DIRTY_DDB (1 << 25)
2702 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
2703 const struct ilk_wm_values
*old
,
2704 const struct ilk_wm_values
*new)
2706 unsigned int dirty
= 0;
2710 for_each_pipe(dev_priv
, pipe
) {
2711 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
2712 dirty
|= WM_DIRTY_LINETIME(pipe
);
2713 /* Must disable LP1+ watermarks too */
2714 dirty
|= WM_DIRTY_LP_ALL
;
2717 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
2718 dirty
|= WM_DIRTY_PIPE(pipe
);
2719 /* Must disable LP1+ watermarks too */
2720 dirty
|= WM_DIRTY_LP_ALL
;
2724 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
2725 dirty
|= WM_DIRTY_FBC
;
2726 /* Must disable LP1+ watermarks too */
2727 dirty
|= WM_DIRTY_LP_ALL
;
2730 if (old
->partitioning
!= new->partitioning
) {
2731 dirty
|= WM_DIRTY_DDB
;
2732 /* Must disable LP1+ watermarks too */
2733 dirty
|= WM_DIRTY_LP_ALL
;
2736 /* LP1+ watermarks already deemed dirty, no need to continue */
2737 if (dirty
& WM_DIRTY_LP_ALL
)
2740 /* Find the lowest numbered LP1+ watermark in need of an update... */
2741 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2742 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
2743 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
2747 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2748 for (; wm_lp
<= 3; wm_lp
++)
2749 dirty
|= WM_DIRTY_LP(wm_lp
);
2754 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
2757 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2758 bool changed
= false;
2760 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
2761 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
2762 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
2765 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
2766 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
2767 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
2770 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
2771 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
2772 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
2777 * Don't touch WM1S_LP_EN here.
2778 * Doing so could cause underruns.
2785 * The spec says we shouldn't write when we don't need, because every write
2786 * causes WMs to be re-evaluated, expending some power.
2788 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
2789 struct ilk_wm_values
*results
)
2791 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2795 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
2799 _ilk_disable_lp_wm(dev_priv
, dirty
);
2801 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
2802 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
2803 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
2804 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
2805 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
2806 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
2808 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
2809 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
2810 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
2811 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
2812 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
2813 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
2815 if (dirty
& WM_DIRTY_DDB
) {
2816 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2817 val
= I915_READ(WM_MISC
);
2818 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2819 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
2821 val
|= WM_MISC_DATA_PARTITION_5_6
;
2822 I915_WRITE(WM_MISC
, val
);
2824 val
= I915_READ(DISP_ARB_CTL2
);
2825 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2826 val
&= ~DISP_DATA_PARTITION_5_6
;
2828 val
|= DISP_DATA_PARTITION_5_6
;
2829 I915_WRITE(DISP_ARB_CTL2
, val
);
2833 if (dirty
& WM_DIRTY_FBC
) {
2834 val
= I915_READ(DISP_ARB_CTL
);
2835 if (results
->enable_fbc_wm
)
2836 val
&= ~DISP_FBC_WM_DIS
;
2838 val
|= DISP_FBC_WM_DIS
;
2839 I915_WRITE(DISP_ARB_CTL
, val
);
2842 if (dirty
& WM_DIRTY_LP(1) &&
2843 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
2844 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
2846 if (INTEL_GEN(dev_priv
) >= 7) {
2847 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
2848 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
2849 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
2850 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
2853 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
2854 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
2855 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
2856 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
2857 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
2858 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
2860 dev_priv
->wm
.hw
= *results
;
2863 bool ilk_disable_lp_wm(struct drm_device
*dev
)
2865 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2867 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
2870 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
2873 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2874 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2875 * other universal planes are in indices 1..n. Note that this may leave unused
2876 * indices between the top "sprite" plane and the cursor.
2879 skl_wm_plane_id(const struct intel_plane
*plane
)
2881 switch (plane
->base
.type
) {
2882 case DRM_PLANE_TYPE_PRIMARY
:
2884 case DRM_PLANE_TYPE_CURSOR
:
2885 return PLANE_CURSOR
;
2886 case DRM_PLANE_TYPE_OVERLAY
:
2887 return plane
->plane
+ 1;
2889 MISSING_CASE(plane
->base
.type
);
2890 return plane
->plane
;
2895 * FIXME: We still don't have the proper code detect if we need to apply the WA,
2896 * so assume we'll always need it in order to avoid underruns.
2898 static bool skl_needs_memory_bw_wa(struct intel_atomic_state
*state
)
2900 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
2902 if (IS_SKYLAKE(dev_priv
) || IS_BROXTON(dev_priv
) ||
2903 IS_KABYLAKE(dev_priv
))
2910 intel_has_sagv(struct drm_i915_private
*dev_priv
)
2912 if (IS_KABYLAKE(dev_priv
))
2915 if (IS_SKYLAKE(dev_priv
) &&
2916 dev_priv
->sagv_status
!= I915_SAGV_NOT_CONTROLLED
)
2923 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2924 * depending on power and performance requirements. The display engine access
2925 * to system memory is blocked during the adjustment time. Because of the
2926 * blocking time, having this enabled can cause full system hangs and/or pipe
2927 * underruns if we don't meet all of the following requirements:
2929 * - <= 1 pipe enabled
2930 * - All planes can enable watermarks for latencies >= SAGV engine block time
2931 * - We're not using an interlaced display configuration
2934 intel_enable_sagv(struct drm_i915_private
*dev_priv
)
2938 if (!intel_has_sagv(dev_priv
))
2941 if (dev_priv
->sagv_status
== I915_SAGV_ENABLED
)
2944 DRM_DEBUG_KMS("Enabling the SAGV\n");
2945 mutex_lock(&dev_priv
->rps
.hw_lock
);
2947 ret
= sandybridge_pcode_write(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
2950 /* We don't need to wait for the SAGV when enabling */
2951 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2954 * Some skl systems, pre-release machines in particular,
2955 * don't actually have an SAGV.
2957 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
2958 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2959 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
2961 } else if (ret
< 0) {
2962 DRM_ERROR("Failed to enable the SAGV\n");
2966 dev_priv
->sagv_status
= I915_SAGV_ENABLED
;
2971 intel_disable_sagv(struct drm_i915_private
*dev_priv
)
2975 if (!intel_has_sagv(dev_priv
))
2978 if (dev_priv
->sagv_status
== I915_SAGV_DISABLED
)
2981 DRM_DEBUG_KMS("Disabling the SAGV\n");
2982 mutex_lock(&dev_priv
->rps
.hw_lock
);
2984 /* bspec says to keep retrying for at least 1 ms */
2985 ret
= skl_pcode_request(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
2987 GEN9_SAGV_IS_DISABLED
, GEN9_SAGV_IS_DISABLED
,
2989 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2992 * Some skl systems, pre-release machines in particular,
2993 * don't actually have an SAGV.
2995 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
2996 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2997 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
2999 } else if (ret
< 0) {
3000 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret
);
3004 dev_priv
->sagv_status
= I915_SAGV_DISABLED
;
3008 bool intel_can_enable_sagv(struct drm_atomic_state
*state
)
3010 struct drm_device
*dev
= state
->dev
;
3011 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3012 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3013 struct intel_crtc
*crtc
;
3014 struct intel_plane
*plane
;
3015 struct intel_crtc_state
*cstate
;
3016 struct skl_plane_wm
*wm
;
3020 if (!intel_has_sagv(dev_priv
))
3024 * SKL workaround: bspec recommends we disable the SAGV when we have
3025 * more then one pipe enabled
3027 * If there are no active CRTCs, no additional checks need be performed
3029 if (hweight32(intel_state
->active_crtcs
) == 0)
3031 else if (hweight32(intel_state
->active_crtcs
) > 1)
3034 /* Since we're now guaranteed to only have one active CRTC... */
3035 pipe
= ffs(intel_state
->active_crtcs
) - 1;
3036 crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
3037 cstate
= to_intel_crtc_state(crtc
->base
.state
);
3039 if (crtc
->base
.state
->adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
3042 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
3043 wm
= &cstate
->wm
.skl
.optimal
.planes
[skl_wm_plane_id(plane
)];
3045 /* Skip this plane if it's not enabled */
3046 if (!wm
->wm
[0].plane_en
)
3049 /* Find the highest enabled wm level for this plane */
3050 for (level
= ilk_wm_max_level(dev_priv
);
3051 !wm
->wm
[level
].plane_en
; --level
)
3054 latency
= dev_priv
->wm
.skl_latency
[level
];
3056 if (skl_needs_memory_bw_wa(intel_state
) &&
3057 plane
->base
.state
->fb
->modifier
==
3058 I915_FORMAT_MOD_X_TILED
)
3062 * If any of the planes on this pipe don't enable wm levels
3063 * that incur memory latencies higher then 30µs we can't enable
3066 if (latency
< SKL_SAGV_BLOCK_TIME
)
3074 skl_ddb_get_pipe_allocation_limits(struct drm_device
*dev
,
3075 const struct intel_crtc_state
*cstate
,
3076 struct skl_ddb_entry
*alloc
, /* out */
3077 int *num_active
/* out */)
3079 struct drm_atomic_state
*state
= cstate
->base
.state
;
3080 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3081 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3082 struct drm_crtc
*for_crtc
= cstate
->base
.crtc
;
3083 unsigned int pipe_size
, ddb_size
;
3084 int nth_active_pipe
;
3086 if (WARN_ON(!state
) || !cstate
->base
.active
) {
3089 *num_active
= hweight32(dev_priv
->active_crtcs
);
3093 if (intel_state
->active_pipe_changes
)
3094 *num_active
= hweight32(intel_state
->active_crtcs
);
3096 *num_active
= hweight32(dev_priv
->active_crtcs
);
3098 ddb_size
= INTEL_INFO(dev_priv
)->ddb_size
;
3099 WARN_ON(ddb_size
== 0);
3101 ddb_size
-= 4; /* 4 blocks for bypass path allocation */
3104 * If the state doesn't change the active CRTC's, then there's
3105 * no need to recalculate; the existing pipe allocation limits
3106 * should remain unchanged. Note that we're safe from racing
3107 * commits since any racing commit that changes the active CRTC
3108 * list would need to grab _all_ crtc locks, including the one
3109 * we currently hold.
3111 if (!intel_state
->active_pipe_changes
) {
3113 * alloc may be cleared by clear_intel_crtc_state,
3114 * copy from old state to be sure
3116 *alloc
= to_intel_crtc_state(for_crtc
->state
)->wm
.skl
.ddb
;
3120 nth_active_pipe
= hweight32(intel_state
->active_crtcs
&
3121 (drm_crtc_mask(for_crtc
) - 1));
3122 pipe_size
= ddb_size
/ hweight32(intel_state
->active_crtcs
);
3123 alloc
->start
= nth_active_pipe
* ddb_size
/ *num_active
;
3124 alloc
->end
= alloc
->start
+ pipe_size
;
3127 static unsigned int skl_cursor_allocation(int num_active
)
3129 if (num_active
== 1)
3135 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry
*entry
, u32 reg
)
3137 entry
->start
= reg
& 0x3ff;
3138 entry
->end
= (reg
>> 16) & 0x3ff;
3143 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
,
3144 struct skl_ddb_allocation
*ddb
/* out */)
3150 memset(ddb
, 0, sizeof(*ddb
));
3152 for_each_pipe(dev_priv
, pipe
) {
3153 enum intel_display_power_domain power_domain
;
3155 power_domain
= POWER_DOMAIN_PIPE(pipe
);
3156 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
))
3159 for_each_universal_plane(dev_priv
, pipe
, plane
) {
3160 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane
));
3161 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][plane
],
3165 val
= I915_READ(CUR_BUF_CFG(pipe
));
3166 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][PLANE_CURSOR
],
3169 intel_display_power_put(dev_priv
, power_domain
);
3174 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3175 * The bspec defines downscale amount as:
3178 * Horizontal down scale amount = maximum[1, Horizontal source size /
3179 * Horizontal destination size]
3180 * Vertical down scale amount = maximum[1, Vertical source size /
3181 * Vertical destination size]
3182 * Total down scale amount = Horizontal down scale amount *
3183 * Vertical down scale amount
3186 * Return value is provided in 16.16 fixed point form to retain fractional part.
3187 * Caller should take care of dividing & rounding off the value.
3190 skl_plane_downscale_amount(const struct intel_plane_state
*pstate
)
3192 uint32_t downscale_h
, downscale_w
;
3193 uint32_t src_w
, src_h
, dst_w
, dst_h
;
3195 if (WARN_ON(!pstate
->base
.visible
))
3196 return DRM_PLANE_HELPER_NO_SCALING
;
3198 /* n.b., src is 16.16 fixed point, dst is whole integer */
3199 src_w
= drm_rect_width(&pstate
->base
.src
);
3200 src_h
= drm_rect_height(&pstate
->base
.src
);
3201 dst_w
= drm_rect_width(&pstate
->base
.dst
);
3202 dst_h
= drm_rect_height(&pstate
->base
.dst
);
3203 if (drm_rotation_90_or_270(pstate
->base
.rotation
))
3206 downscale_h
= max(src_h
/ dst_h
, (uint32_t)DRM_PLANE_HELPER_NO_SCALING
);
3207 downscale_w
= max(src_w
/ dst_w
, (uint32_t)DRM_PLANE_HELPER_NO_SCALING
);
3209 /* Provide result in 16.16 fixed point */
3210 return (uint64_t)downscale_w
* downscale_h
>> 16;
3214 skl_plane_relative_data_rate(const struct intel_crtc_state
*cstate
,
3215 const struct drm_plane_state
*pstate
,
3218 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
3219 uint32_t down_scale_amount
, data_rate
;
3220 uint32_t width
= 0, height
= 0;
3221 struct drm_framebuffer
*fb
;
3224 if (!intel_pstate
->base
.visible
)
3228 format
= fb
->format
->format
;
3230 if (pstate
->plane
->type
== DRM_PLANE_TYPE_CURSOR
)
3232 if (y
&& format
!= DRM_FORMAT_NV12
)
3235 width
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
3236 height
= drm_rect_height(&intel_pstate
->base
.src
) >> 16;
3238 if (drm_rotation_90_or_270(pstate
->rotation
))
3239 swap(width
, height
);
3241 /* for planar format */
3242 if (format
== DRM_FORMAT_NV12
) {
3243 if (y
) /* y-plane data rate */
3244 data_rate
= width
* height
*
3246 else /* uv-plane data rate */
3247 data_rate
= (width
/ 2) * (height
/ 2) *
3250 /* for packed formats */
3251 data_rate
= width
* height
* fb
->format
->cpp
[0];
3254 down_scale_amount
= skl_plane_downscale_amount(intel_pstate
);
3256 return (uint64_t)data_rate
* down_scale_amount
>> 16;
3260 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3261 * a 8192x4096@32bpp framebuffer:
3262 * 3 * 4096 * 8192 * 4 < 2^32
3265 skl_get_total_relative_data_rate(struct intel_crtc_state
*intel_cstate
,
3266 unsigned *plane_data_rate
,
3267 unsigned *plane_y_data_rate
)
3269 struct drm_crtc_state
*cstate
= &intel_cstate
->base
;
3270 struct drm_atomic_state
*state
= cstate
->state
;
3271 struct drm_plane
*plane
;
3272 const struct intel_plane
*intel_plane
;
3273 const struct drm_plane_state
*pstate
;
3274 unsigned int rate
, total_data_rate
= 0;
3277 if (WARN_ON(!state
))
3280 /* Calculate and cache data rate for each plane */
3281 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, cstate
) {
3282 id
= skl_wm_plane_id(to_intel_plane(plane
));
3283 intel_plane
= to_intel_plane(plane
);
3286 rate
= skl_plane_relative_data_rate(intel_cstate
,
3288 plane_data_rate
[id
] = rate
;
3290 total_data_rate
+= rate
;
3293 rate
= skl_plane_relative_data_rate(intel_cstate
,
3295 plane_y_data_rate
[id
] = rate
;
3297 total_data_rate
+= rate
;
3300 return total_data_rate
;
3304 skl_ddb_min_alloc(const struct drm_plane_state
*pstate
,
3307 struct drm_framebuffer
*fb
= pstate
->fb
;
3308 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
3309 uint32_t src_w
, src_h
;
3310 uint32_t min_scanlines
= 8;
3316 /* For packed formats, no y-plane, return 0 */
3317 if (y
&& fb
->format
->format
!= DRM_FORMAT_NV12
)
3320 /* For Non Y-tile return 8-blocks */
3321 if (fb
->modifier
!= I915_FORMAT_MOD_Y_TILED
&&
3322 fb
->modifier
!= I915_FORMAT_MOD_Yf_TILED
)
3325 src_w
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
3326 src_h
= drm_rect_height(&intel_pstate
->base
.src
) >> 16;
3328 if (drm_rotation_90_or_270(pstate
->rotation
))
3331 /* Halve UV plane width and height for NV12 */
3332 if (fb
->format
->format
== DRM_FORMAT_NV12
&& !y
) {
3337 if (fb
->format
->format
== DRM_FORMAT_NV12
&& !y
)
3338 plane_bpp
= fb
->format
->cpp
[1];
3340 plane_bpp
= fb
->format
->cpp
[0];
3342 if (drm_rotation_90_or_270(pstate
->rotation
)) {
3343 switch (plane_bpp
) {
3357 WARN(1, "Unsupported pixel depth %u for rotation",
3363 return DIV_ROUND_UP((4 * src_w
* plane_bpp
), 512) * min_scanlines
/4 + 3;
3367 skl_ddb_calc_min(const struct intel_crtc_state
*cstate
, int num_active
,
3368 uint16_t *minimum
, uint16_t *y_minimum
)
3370 const struct drm_plane_state
*pstate
;
3371 struct drm_plane
*plane
;
3373 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, &cstate
->base
) {
3374 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
3375 int id
= skl_wm_plane_id(intel_plane
);
3377 if (id
== PLANE_CURSOR
)
3380 if (!pstate
->visible
)
3383 minimum
[id
] = skl_ddb_min_alloc(pstate
, 0);
3384 y_minimum
[id
] = skl_ddb_min_alloc(pstate
, 1);
3387 minimum
[PLANE_CURSOR
] = skl_cursor_allocation(num_active
);
3391 skl_allocate_pipe_ddb(struct intel_crtc_state
*cstate
,
3392 struct skl_ddb_allocation
*ddb
/* out */)
3394 struct drm_atomic_state
*state
= cstate
->base
.state
;
3395 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
3396 struct drm_device
*dev
= crtc
->dev
;
3397 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3398 enum pipe pipe
= intel_crtc
->pipe
;
3399 struct skl_ddb_entry
*alloc
= &cstate
->wm
.skl
.ddb
;
3400 uint16_t alloc_size
, start
;
3401 uint16_t minimum
[I915_MAX_PLANES
] = {};
3402 uint16_t y_minimum
[I915_MAX_PLANES
] = {};
3403 unsigned int total_data_rate
;
3406 unsigned plane_data_rate
[I915_MAX_PLANES
] = {};
3407 unsigned plane_y_data_rate
[I915_MAX_PLANES
] = {};
3409 /* Clear the partitioning for disabled planes. */
3410 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
3411 memset(ddb
->y_plane
[pipe
], 0, sizeof(ddb
->y_plane
[pipe
]));
3413 if (WARN_ON(!state
))
3416 if (!cstate
->base
.active
) {
3417 alloc
->start
= alloc
->end
= 0;
3421 skl_ddb_get_pipe_allocation_limits(dev
, cstate
, alloc
, &num_active
);
3422 alloc_size
= skl_ddb_entry_size(alloc
);
3423 if (alloc_size
== 0) {
3424 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
3428 skl_ddb_calc_min(cstate
, num_active
, minimum
, y_minimum
);
3431 * 1. Allocate the mininum required blocks for each active plane
3432 * and allocate the cursor, it doesn't require extra allocation
3433 * proportional to the data rate.
3436 for (i
= 0; i
< I915_MAX_PLANES
; i
++) {
3437 alloc_size
-= minimum
[i
];
3438 alloc_size
-= y_minimum
[i
];
3441 ddb
->plane
[pipe
][PLANE_CURSOR
].start
= alloc
->end
- minimum
[PLANE_CURSOR
];
3442 ddb
->plane
[pipe
][PLANE_CURSOR
].end
= alloc
->end
;
3445 * 2. Distribute the remaining space in proportion to the amount of
3446 * data each plane needs to fetch from memory.
3448 * FIXME: we may not allocate every single block here.
3450 total_data_rate
= skl_get_total_relative_data_rate(cstate
,
3453 if (total_data_rate
== 0)
3456 start
= alloc
->start
;
3457 for (id
= 0; id
< I915_MAX_PLANES
; id
++) {
3458 unsigned int data_rate
, y_data_rate
;
3459 uint16_t plane_blocks
, y_plane_blocks
= 0;
3461 if (id
== PLANE_CURSOR
)
3464 data_rate
= plane_data_rate
[id
];
3467 * allocation for (packed formats) or (uv-plane part of planar format):
3468 * promote the expression to 64 bits to avoid overflowing, the
3469 * result is < available as data_rate / total_data_rate < 1
3471 plane_blocks
= minimum
[id
];
3472 plane_blocks
+= div_u64((uint64_t)alloc_size
* data_rate
,
3475 /* Leave disabled planes at (0,0) */
3477 ddb
->plane
[pipe
][id
].start
= start
;
3478 ddb
->plane
[pipe
][id
].end
= start
+ plane_blocks
;
3481 start
+= plane_blocks
;
3484 * allocation for y_plane part of planar format:
3486 y_data_rate
= plane_y_data_rate
[id
];
3488 y_plane_blocks
= y_minimum
[id
];
3489 y_plane_blocks
+= div_u64((uint64_t)alloc_size
* y_data_rate
,
3493 ddb
->y_plane
[pipe
][id
].start
= start
;
3494 ddb
->y_plane
[pipe
][id
].end
= start
+ y_plane_blocks
;
3497 start
+= y_plane_blocks
;
3504 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3505 * for the read latency) and cpp should always be <= 8, so that
3506 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3507 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3509 static uint32_t skl_wm_method1(uint32_t pixel_rate
, uint8_t cpp
, uint32_t latency
)
3511 uint32_t wm_intermediate_val
, ret
;
3516 wm_intermediate_val
= latency
* pixel_rate
* cpp
/ 512;
3517 ret
= DIV_ROUND_UP(wm_intermediate_val
, 1000);
3522 static uint32_t skl_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
3523 uint32_t latency
, uint32_t plane_blocks_per_line
)
3526 uint32_t wm_intermediate_val
;
3531 wm_intermediate_val
= latency
* pixel_rate
;
3532 ret
= DIV_ROUND_UP(wm_intermediate_val
, pipe_htotal
* 1000) *
3533 plane_blocks_per_line
;
3538 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state
*cstate
,
3539 struct intel_plane_state
*pstate
)
3541 uint64_t adjusted_pixel_rate
;
3542 uint64_t downscale_amount
;
3543 uint64_t pixel_rate
;
3545 /* Shouldn't reach here on disabled planes... */
3546 if (WARN_ON(!pstate
->base
.visible
))
3550 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3551 * with additional adjustments for plane-specific scaling.
3553 adjusted_pixel_rate
= ilk_pipe_pixel_rate(cstate
);
3554 downscale_amount
= skl_plane_downscale_amount(pstate
);
3556 pixel_rate
= adjusted_pixel_rate
* downscale_amount
>> 16;
3557 WARN_ON(pixel_rate
!= clamp_t(uint32_t, pixel_rate
, 0, ~0));
3562 static int skl_compute_plane_wm(const struct drm_i915_private
*dev_priv
,
3563 struct intel_crtc_state
*cstate
,
3564 struct intel_plane_state
*intel_pstate
,
3565 uint16_t ddb_allocation
,
3567 uint16_t *out_blocks
, /* out */
3568 uint8_t *out_lines
, /* out */
3569 bool *enabled
/* out */)
3571 struct drm_plane_state
*pstate
= &intel_pstate
->base
;
3572 struct drm_framebuffer
*fb
= pstate
->fb
;
3573 uint32_t latency
= dev_priv
->wm
.skl_latency
[level
];
3574 uint32_t method1
, method2
;
3575 uint32_t plane_bytes_per_line
, plane_blocks_per_line
;
3576 uint32_t res_blocks
, res_lines
;
3577 uint32_t selected_result
;
3579 uint32_t width
= 0, height
= 0;
3580 uint32_t plane_pixel_rate
;
3581 uint32_t y_tile_minimum
, y_min_scanlines
;
3582 struct intel_atomic_state
*state
=
3583 to_intel_atomic_state(cstate
->base
.state
);
3584 bool apply_memory_bw_wa
= skl_needs_memory_bw_wa(state
);
3586 if (latency
== 0 || !cstate
->base
.active
|| !intel_pstate
->base
.visible
) {
3591 if (apply_memory_bw_wa
&& fb
->modifier
== I915_FORMAT_MOD_X_TILED
)
3594 width
= drm_rect_width(&intel_pstate
->base
.src
) >> 16;
3595 height
= drm_rect_height(&intel_pstate
->base
.src
) >> 16;
3597 if (drm_rotation_90_or_270(pstate
->rotation
))
3598 swap(width
, height
);
3600 cpp
= fb
->format
->cpp
[0];
3601 plane_pixel_rate
= skl_adjusted_plane_pixel_rate(cstate
, intel_pstate
);
3603 if (drm_rotation_90_or_270(pstate
->rotation
)) {
3604 int cpp
= (fb
->format
->format
== DRM_FORMAT_NV12
) ?
3605 fb
->format
->cpp
[1] :
3610 y_min_scanlines
= 16;
3613 y_min_scanlines
= 8;
3616 y_min_scanlines
= 4;
3623 y_min_scanlines
= 4;
3626 if (apply_memory_bw_wa
)
3627 y_min_scanlines
*= 2;
3629 plane_bytes_per_line
= width
* cpp
;
3630 if (fb
->modifier
== I915_FORMAT_MOD_Y_TILED
||
3631 fb
->modifier
== I915_FORMAT_MOD_Yf_TILED
) {
3632 plane_blocks_per_line
=
3633 DIV_ROUND_UP(plane_bytes_per_line
* y_min_scanlines
, 512);
3634 plane_blocks_per_line
/= y_min_scanlines
;
3635 } else if (fb
->modifier
== DRM_FORMAT_MOD_NONE
) {
3636 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512)
3639 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512);
3642 method1
= skl_wm_method1(plane_pixel_rate
, cpp
, latency
);
3643 method2
= skl_wm_method2(plane_pixel_rate
,
3644 cstate
->base
.adjusted_mode
.crtc_htotal
,
3646 plane_blocks_per_line
);
3648 y_tile_minimum
= plane_blocks_per_line
* y_min_scanlines
;
3650 if (fb
->modifier
== I915_FORMAT_MOD_Y_TILED
||
3651 fb
->modifier
== I915_FORMAT_MOD_Yf_TILED
) {
3652 selected_result
= max(method2
, y_tile_minimum
);
3654 if ((cpp
* cstate
->base
.adjusted_mode
.crtc_htotal
/ 512 < 1) &&
3655 (plane_bytes_per_line
/ 512 < 1))
3656 selected_result
= method2
;
3657 else if ((ddb_allocation
/ plane_blocks_per_line
) >= 1)
3658 selected_result
= min(method1
, method2
);
3660 selected_result
= method1
;
3663 res_blocks
= selected_result
+ 1;
3664 res_lines
= DIV_ROUND_UP(selected_result
, plane_blocks_per_line
);
3666 if (level
>= 1 && level
<= 7) {
3667 if (fb
->modifier
== I915_FORMAT_MOD_Y_TILED
||
3668 fb
->modifier
== I915_FORMAT_MOD_Yf_TILED
) {
3669 res_blocks
+= y_tile_minimum
;
3670 res_lines
+= y_min_scanlines
;
3676 if (res_blocks
>= ddb_allocation
|| res_lines
> 31) {
3680 * If there are no valid level 0 watermarks, then we can't
3681 * support this display configuration.
3686 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3687 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3688 to_intel_crtc(cstate
->base
.crtc
)->pipe
,
3689 skl_wm_plane_id(to_intel_plane(pstate
->plane
)),
3690 res_blocks
, ddb_allocation
, res_lines
);
3696 *out_blocks
= res_blocks
;
3697 *out_lines
= res_lines
;
3704 skl_compute_wm_level(const struct drm_i915_private
*dev_priv
,
3705 struct skl_ddb_allocation
*ddb
,
3706 struct intel_crtc_state
*cstate
,
3707 struct intel_plane
*intel_plane
,
3709 struct skl_wm_level
*result
)
3711 struct drm_atomic_state
*state
= cstate
->base
.state
;
3712 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
3713 struct drm_plane
*plane
= &intel_plane
->base
;
3714 struct intel_plane_state
*intel_pstate
= NULL
;
3715 uint16_t ddb_blocks
;
3716 enum pipe pipe
= intel_crtc
->pipe
;
3718 int i
= skl_wm_plane_id(intel_plane
);
3722 intel_atomic_get_existing_plane_state(state
,
3726 * Note: If we start supporting multiple pending atomic commits against
3727 * the same planes/CRTC's in the future, plane->state will no longer be
3728 * the correct pre-state to use for the calculations here and we'll
3729 * need to change where we get the 'unchanged' plane data from.
3731 * For now this is fine because we only allow one queued commit against
3732 * a CRTC. Even if the plane isn't modified by this transaction and we
3733 * don't have a plane lock, we still have the CRTC's lock, so we know
3734 * that no other transactions are racing with us to update it.
3737 intel_pstate
= to_intel_plane_state(plane
->state
);
3739 WARN_ON(!intel_pstate
->base
.fb
);
3741 ddb_blocks
= skl_ddb_entry_size(&ddb
->plane
[pipe
][i
]);
3743 ret
= skl_compute_plane_wm(dev_priv
,
3748 &result
->plane_res_b
,
3749 &result
->plane_res_l
,
3758 skl_compute_linetime_wm(struct intel_crtc_state
*cstate
)
3760 uint32_t pixel_rate
;
3762 if (!cstate
->base
.active
)
3765 pixel_rate
= ilk_pipe_pixel_rate(cstate
);
3767 if (WARN_ON(pixel_rate
== 0))
3770 return DIV_ROUND_UP(8 * cstate
->base
.adjusted_mode
.crtc_htotal
* 1000,
3774 static void skl_compute_transition_wm(struct intel_crtc_state
*cstate
,
3775 struct skl_wm_level
*trans_wm
/* out */)
3777 if (!cstate
->base
.active
)
3780 /* Until we know more, just disable transition WMs */
3781 trans_wm
->plane_en
= false;
3784 static int skl_build_pipe_wm(struct intel_crtc_state
*cstate
,
3785 struct skl_ddb_allocation
*ddb
,
3786 struct skl_pipe_wm
*pipe_wm
)
3788 struct drm_device
*dev
= cstate
->base
.crtc
->dev
;
3789 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
3790 struct intel_plane
*intel_plane
;
3791 struct skl_plane_wm
*wm
;
3792 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3796 * We'll only calculate watermarks for planes that are actually
3797 * enabled, so make sure all other planes are set as disabled.
3799 memset(pipe_wm
->planes
, 0, sizeof(pipe_wm
->planes
));
3801 for_each_intel_plane_mask(&dev_priv
->drm
,
3803 cstate
->base
.plane_mask
) {
3804 wm
= &pipe_wm
->planes
[skl_wm_plane_id(intel_plane
)];
3806 for (level
= 0; level
<= max_level
; level
++) {
3807 ret
= skl_compute_wm_level(dev_priv
, ddb
, cstate
,
3813 skl_compute_transition_wm(cstate
, &wm
->trans_wm
);
3815 pipe_wm
->linetime
= skl_compute_linetime_wm(cstate
);
3820 static void skl_ddb_entry_write(struct drm_i915_private
*dev_priv
,
3822 const struct skl_ddb_entry
*entry
)
3825 I915_WRITE(reg
, (entry
->end
- 1) << 16 | entry
->start
);
3830 static void skl_write_wm_level(struct drm_i915_private
*dev_priv
,
3832 const struct skl_wm_level
*level
)
3836 if (level
->plane_en
) {
3838 val
|= level
->plane_res_b
;
3839 val
|= level
->plane_res_l
<< PLANE_WM_LINES_SHIFT
;
3842 I915_WRITE(reg
, val
);
3845 static void skl_write_plane_wm(struct intel_crtc
*intel_crtc
,
3846 const struct skl_plane_wm
*wm
,
3847 const struct skl_ddb_allocation
*ddb
,
3850 struct drm_crtc
*crtc
= &intel_crtc
->base
;
3851 struct drm_device
*dev
= crtc
->dev
;
3852 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3853 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3854 enum pipe pipe
= intel_crtc
->pipe
;
3856 for (level
= 0; level
<= max_level
; level
++) {
3857 skl_write_wm_level(dev_priv
, PLANE_WM(pipe
, plane
, level
),
3860 skl_write_wm_level(dev_priv
, PLANE_WM_TRANS(pipe
, plane
),
3863 skl_ddb_entry_write(dev_priv
, PLANE_BUF_CFG(pipe
, plane
),
3864 &ddb
->plane
[pipe
][plane
]);
3865 skl_ddb_entry_write(dev_priv
, PLANE_NV12_BUF_CFG(pipe
, plane
),
3866 &ddb
->y_plane
[pipe
][plane
]);
3869 static void skl_write_cursor_wm(struct intel_crtc
*intel_crtc
,
3870 const struct skl_plane_wm
*wm
,
3871 const struct skl_ddb_allocation
*ddb
)
3873 struct drm_crtc
*crtc
= &intel_crtc
->base
;
3874 struct drm_device
*dev
= crtc
->dev
;
3875 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3876 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3877 enum pipe pipe
= intel_crtc
->pipe
;
3879 for (level
= 0; level
<= max_level
; level
++) {
3880 skl_write_wm_level(dev_priv
, CUR_WM(pipe
, level
),
3883 skl_write_wm_level(dev_priv
, CUR_WM_TRANS(pipe
), &wm
->trans_wm
);
3885 skl_ddb_entry_write(dev_priv
, CUR_BUF_CFG(pipe
),
3886 &ddb
->plane
[pipe
][PLANE_CURSOR
]);
3889 bool skl_wm_level_equals(const struct skl_wm_level
*l1
,
3890 const struct skl_wm_level
*l2
)
3892 if (l1
->plane_en
!= l2
->plane_en
)
3895 /* If both planes aren't enabled, the rest shouldn't matter */
3899 return (l1
->plane_res_l
== l2
->plane_res_l
&&
3900 l1
->plane_res_b
== l2
->plane_res_b
);
3903 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry
*a
,
3904 const struct skl_ddb_entry
*b
)
3906 return a
->start
< b
->end
&& b
->start
< a
->end
;
3909 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry
**entries
,
3910 const struct skl_ddb_entry
*ddb
,
3915 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
3916 if (i
!= ignore
&& entries
[i
] &&
3917 skl_ddb_entries_overlap(ddb
, entries
[i
]))
3923 static int skl_update_pipe_wm(struct drm_crtc_state
*cstate
,
3924 const struct skl_pipe_wm
*old_pipe_wm
,
3925 struct skl_pipe_wm
*pipe_wm
, /* out */
3926 struct skl_ddb_allocation
*ddb
, /* out */
3927 bool *changed
/* out */)
3929 struct intel_crtc_state
*intel_cstate
= to_intel_crtc_state(cstate
);
3932 ret
= skl_build_pipe_wm(intel_cstate
, ddb
, pipe_wm
);
3936 if (!memcmp(old_pipe_wm
, pipe_wm
, sizeof(*pipe_wm
)))
3945 pipes_modified(struct drm_atomic_state
*state
)
3947 struct drm_crtc
*crtc
;
3948 struct drm_crtc_state
*cstate
;
3949 uint32_t i
, ret
= 0;
3951 for_each_crtc_in_state(state
, crtc
, cstate
, i
)
3952 ret
|= drm_crtc_mask(crtc
);
3958 skl_ddb_add_affected_planes(struct intel_crtc_state
*cstate
)
3960 struct drm_atomic_state
*state
= cstate
->base
.state
;
3961 struct drm_device
*dev
= state
->dev
;
3962 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
3963 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3964 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3965 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3966 struct skl_ddb_allocation
*new_ddb
= &intel_state
->wm_results
.ddb
;
3967 struct skl_ddb_allocation
*cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3968 struct drm_plane_state
*plane_state
;
3969 struct drm_plane
*plane
;
3970 enum pipe pipe
= intel_crtc
->pipe
;
3973 WARN_ON(!drm_atomic_get_existing_crtc_state(state
, crtc
));
3975 drm_for_each_plane_mask(plane
, dev
, cstate
->base
.plane_mask
) {
3976 id
= skl_wm_plane_id(to_intel_plane(plane
));
3978 if (skl_ddb_entry_equal(&cur_ddb
->plane
[pipe
][id
],
3979 &new_ddb
->plane
[pipe
][id
]) &&
3980 skl_ddb_entry_equal(&cur_ddb
->y_plane
[pipe
][id
],
3981 &new_ddb
->y_plane
[pipe
][id
]))
3984 plane_state
= drm_atomic_get_plane_state(state
, plane
);
3985 if (IS_ERR(plane_state
))
3986 return PTR_ERR(plane_state
);
3993 skl_compute_ddb(struct drm_atomic_state
*state
)
3995 struct drm_device
*dev
= state
->dev
;
3996 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3997 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3998 struct intel_crtc
*intel_crtc
;
3999 struct skl_ddb_allocation
*ddb
= &intel_state
->wm_results
.ddb
;
4000 uint32_t realloc_pipes
= pipes_modified(state
);
4004 * If this is our first atomic update following hardware readout,
4005 * we can't trust the DDB that the BIOS programmed for us. Let's
4006 * pretend that all pipes switched active status so that we'll
4007 * ensure a full DDB recompute.
4009 if (dev_priv
->wm
.distrust_bios_wm
) {
4010 ret
= drm_modeset_lock(&dev
->mode_config
.connection_mutex
,
4011 state
->acquire_ctx
);
4015 intel_state
->active_pipe_changes
= ~0;
4018 * We usually only initialize intel_state->active_crtcs if we
4019 * we're doing a modeset; make sure this field is always
4020 * initialized during the sanitization process that happens
4021 * on the first commit too.
4023 if (!intel_state
->modeset
)
4024 intel_state
->active_crtcs
= dev_priv
->active_crtcs
;
4028 * If the modeset changes which CRTC's are active, we need to
4029 * recompute the DDB allocation for *all* active pipes, even
4030 * those that weren't otherwise being modified in any way by this
4031 * atomic commit. Due to the shrinking of the per-pipe allocations
4032 * when new active CRTC's are added, it's possible for a pipe that
4033 * we were already using and aren't changing at all here to suddenly
4034 * become invalid if its DDB needs exceeds its new allocation.
4036 * Note that if we wind up doing a full DDB recompute, we can't let
4037 * any other display updates race with this transaction, so we need
4038 * to grab the lock on *all* CRTC's.
4040 if (intel_state
->active_pipe_changes
) {
4042 intel_state
->wm_results
.dirty_pipes
= ~0;
4046 * We're not recomputing for the pipes not included in the commit, so
4047 * make sure we start with the current state.
4049 memcpy(ddb
, &dev_priv
->wm
.skl_hw
.ddb
, sizeof(*ddb
));
4051 for_each_intel_crtc_mask(dev
, intel_crtc
, realloc_pipes
) {
4052 struct intel_crtc_state
*cstate
;
4054 cstate
= intel_atomic_get_crtc_state(state
, intel_crtc
);
4056 return PTR_ERR(cstate
);
4058 ret
= skl_allocate_pipe_ddb(cstate
, ddb
);
4062 ret
= skl_ddb_add_affected_planes(cstate
);
4071 skl_copy_wm_for_pipe(struct skl_wm_values
*dst
,
4072 struct skl_wm_values
*src
,
4075 memcpy(dst
->ddb
.y_plane
[pipe
], src
->ddb
.y_plane
[pipe
],
4076 sizeof(dst
->ddb
.y_plane
[pipe
]));
4077 memcpy(dst
->ddb
.plane
[pipe
], src
->ddb
.plane
[pipe
],
4078 sizeof(dst
->ddb
.plane
[pipe
]));
4082 skl_print_wm_changes(const struct drm_atomic_state
*state
)
4084 const struct drm_device
*dev
= state
->dev
;
4085 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
4086 const struct intel_atomic_state
*intel_state
=
4087 to_intel_atomic_state(state
);
4088 const struct drm_crtc
*crtc
;
4089 const struct drm_crtc_state
*cstate
;
4090 const struct intel_plane
*intel_plane
;
4091 const struct skl_ddb_allocation
*old_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
4092 const struct skl_ddb_allocation
*new_ddb
= &intel_state
->wm_results
.ddb
;
4096 for_each_crtc_in_state(state
, crtc
, cstate
, i
) {
4097 const struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4098 enum pipe pipe
= intel_crtc
->pipe
;
4100 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
4101 const struct skl_ddb_entry
*old
, *new;
4103 id
= skl_wm_plane_id(intel_plane
);
4104 old
= &old_ddb
->plane
[pipe
][id
];
4105 new = &new_ddb
->plane
[pipe
][id
];
4107 if (skl_ddb_entry_equal(old
, new))
4110 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
4111 intel_plane
->base
.base
.id
,
4112 intel_plane
->base
.name
,
4113 old
->start
, old
->end
,
4114 new->start
, new->end
);
4120 skl_compute_wm(struct drm_atomic_state
*state
)
4122 struct drm_crtc
*crtc
;
4123 struct drm_crtc_state
*cstate
;
4124 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
4125 struct skl_wm_values
*results
= &intel_state
->wm_results
;
4126 struct skl_pipe_wm
*pipe_wm
;
4127 bool changed
= false;
4131 * If this transaction isn't actually touching any CRTC's, don't
4132 * bother with watermark calculation. Note that if we pass this
4133 * test, we're guaranteed to hold at least one CRTC state mutex,
4134 * which means we can safely use values like dev_priv->active_crtcs
4135 * since any racing commits that want to update them would need to
4136 * hold _all_ CRTC state mutexes.
4138 for_each_crtc_in_state(state
, crtc
, cstate
, i
)
4143 /* Clear all dirty flags */
4144 results
->dirty_pipes
= 0;
4146 ret
= skl_compute_ddb(state
);
4151 * Calculate WM's for all pipes that are part of this transaction.
4152 * Note that the DDB allocation above may have added more CRTC's that
4153 * weren't otherwise being modified (and set bits in dirty_pipes) if
4154 * pipe allocations had to change.
4156 * FIXME: Now that we're doing this in the atomic check phase, we
4157 * should allow skl_update_pipe_wm() to return failure in cases where
4158 * no suitable watermark values can be found.
4160 for_each_crtc_in_state(state
, crtc
, cstate
, i
) {
4161 struct intel_crtc_state
*intel_cstate
=
4162 to_intel_crtc_state(cstate
);
4163 const struct skl_pipe_wm
*old_pipe_wm
=
4164 &to_intel_crtc_state(crtc
->state
)->wm
.skl
.optimal
;
4166 pipe_wm
= &intel_cstate
->wm
.skl
.optimal
;
4167 ret
= skl_update_pipe_wm(cstate
, old_pipe_wm
, pipe_wm
,
4168 &results
->ddb
, &changed
);
4173 results
->dirty_pipes
|= drm_crtc_mask(crtc
);
4175 if ((results
->dirty_pipes
& drm_crtc_mask(crtc
)) == 0)
4176 /* This pipe's WM's did not change */
4179 intel_cstate
->update_wm_pre
= true;
4182 skl_print_wm_changes(state
);
4187 static void skl_atomic_update_crtc_wm(struct intel_atomic_state
*state
,
4188 struct intel_crtc_state
*cstate
)
4190 struct intel_crtc
*crtc
= to_intel_crtc(cstate
->base
.crtc
);
4191 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
4192 struct skl_pipe_wm
*pipe_wm
= &cstate
->wm
.skl
.optimal
;
4193 const struct skl_ddb_allocation
*ddb
= &state
->wm_results
.ddb
;
4194 enum pipe pipe
= crtc
->pipe
;
4197 if (!(state
->wm_results
.dirty_pipes
& drm_crtc_mask(&crtc
->base
)))
4200 I915_WRITE(PIPE_WM_LINETIME(pipe
), pipe_wm
->linetime
);
4202 for_each_universal_plane(dev_priv
, pipe
, plane
)
4203 skl_write_plane_wm(crtc
, &pipe_wm
->planes
[plane
], ddb
, plane
);
4205 skl_write_cursor_wm(crtc
, &pipe_wm
->planes
[PLANE_CURSOR
], ddb
);
4208 static void skl_initial_wm(struct intel_atomic_state
*state
,
4209 struct intel_crtc_state
*cstate
)
4211 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4212 struct drm_device
*dev
= intel_crtc
->base
.dev
;
4213 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4214 struct skl_wm_values
*results
= &state
->wm_results
;
4215 struct skl_wm_values
*hw_vals
= &dev_priv
->wm
.skl_hw
;
4216 enum pipe pipe
= intel_crtc
->pipe
;
4218 if ((results
->dirty_pipes
& drm_crtc_mask(&intel_crtc
->base
)) == 0)
4221 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4223 if (cstate
->base
.active_changed
)
4224 skl_atomic_update_crtc_wm(state
, cstate
);
4226 skl_copy_wm_for_pipe(hw_vals
, results
, pipe
);
4228 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4231 static void ilk_compute_wm_config(struct drm_device
*dev
,
4232 struct intel_wm_config
*config
)
4234 struct intel_crtc
*crtc
;
4236 /* Compute the currently _active_ config */
4237 for_each_intel_crtc(dev
, crtc
) {
4238 const struct intel_pipe_wm
*wm
= &crtc
->wm
.active
.ilk
;
4240 if (!wm
->pipe_enabled
)
4243 config
->sprites_enabled
|= wm
->sprites_enabled
;
4244 config
->sprites_scaled
|= wm
->sprites_scaled
;
4245 config
->num_pipes_active
++;
4249 static void ilk_program_watermarks(struct drm_i915_private
*dev_priv
)
4251 struct drm_device
*dev
= &dev_priv
->drm
;
4252 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
4253 struct ilk_wm_maximums max
;
4254 struct intel_wm_config config
= {};
4255 struct ilk_wm_values results
= {};
4256 enum intel_ddb_partitioning partitioning
;
4258 ilk_compute_wm_config(dev
, &config
);
4260 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
4261 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
4263 /* 5/6 split only in single pipe config on IVB+ */
4264 if (INTEL_GEN(dev_priv
) >= 7 &&
4265 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
4266 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
4267 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
4269 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
4271 best_lp_wm
= &lp_wm_1_2
;
4274 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
4275 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
4277 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
4279 ilk_write_wm_values(dev_priv
, &results
);
4282 static void ilk_initial_watermarks(struct intel_atomic_state
*state
,
4283 struct intel_crtc_state
*cstate
)
4285 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
4286 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4288 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4289 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.ilk
.intermediate
;
4290 ilk_program_watermarks(dev_priv
);
4291 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4294 static void ilk_optimize_watermarks(struct intel_atomic_state
*state
,
4295 struct intel_crtc_state
*cstate
)
4297 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
4298 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4300 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4301 if (cstate
->wm
.need_postvbl_update
) {
4302 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.ilk
.optimal
;
4303 ilk_program_watermarks(dev_priv
);
4305 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4308 static inline void skl_wm_level_from_reg_val(uint32_t val
,
4309 struct skl_wm_level
*level
)
4311 level
->plane_en
= val
& PLANE_WM_EN
;
4312 level
->plane_res_b
= val
& PLANE_WM_BLOCKS_MASK
;
4313 level
->plane_res_l
= (val
>> PLANE_WM_LINES_SHIFT
) &
4314 PLANE_WM_LINES_MASK
;
4317 void skl_pipe_wm_get_hw_state(struct drm_crtc
*crtc
,
4318 struct skl_pipe_wm
*out
)
4320 struct drm_device
*dev
= crtc
->dev
;
4321 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4322 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4323 struct intel_plane
*intel_plane
;
4324 struct skl_plane_wm
*wm
;
4325 enum pipe pipe
= intel_crtc
->pipe
;
4326 int level
, id
, max_level
;
4329 max_level
= ilk_wm_max_level(dev_priv
);
4331 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
4332 id
= skl_wm_plane_id(intel_plane
);
4333 wm
= &out
->planes
[id
];
4335 for (level
= 0; level
<= max_level
; level
++) {
4336 if (id
!= PLANE_CURSOR
)
4337 val
= I915_READ(PLANE_WM(pipe
, id
, level
));
4339 val
= I915_READ(CUR_WM(pipe
, level
));
4341 skl_wm_level_from_reg_val(val
, &wm
->wm
[level
]);
4344 if (id
!= PLANE_CURSOR
)
4345 val
= I915_READ(PLANE_WM_TRANS(pipe
, id
));
4347 val
= I915_READ(CUR_WM_TRANS(pipe
));
4349 skl_wm_level_from_reg_val(val
, &wm
->trans_wm
);
4352 if (!intel_crtc
->active
)
4355 out
->linetime
= I915_READ(PIPE_WM_LINETIME(pipe
));
4358 void skl_wm_get_hw_state(struct drm_device
*dev
)
4360 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4361 struct skl_wm_values
*hw
= &dev_priv
->wm
.skl_hw
;
4362 struct skl_ddb_allocation
*ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
4363 struct drm_crtc
*crtc
;
4364 struct intel_crtc
*intel_crtc
;
4365 struct intel_crtc_state
*cstate
;
4367 skl_ddb_get_hw_state(dev_priv
, ddb
);
4368 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4369 intel_crtc
= to_intel_crtc(crtc
);
4370 cstate
= to_intel_crtc_state(crtc
->state
);
4372 skl_pipe_wm_get_hw_state(crtc
, &cstate
->wm
.skl
.optimal
);
4374 if (intel_crtc
->active
)
4375 hw
->dirty_pipes
|= drm_crtc_mask(crtc
);
4378 if (dev_priv
->active_crtcs
) {
4379 /* Fully recompute DDB on first atomic commit */
4380 dev_priv
->wm
.distrust_bios_wm
= true;
4382 /* Easy/common case; just sanitize DDB now if everything off */
4383 memset(ddb
, 0, sizeof(*ddb
));
4387 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
4389 struct drm_device
*dev
= crtc
->dev
;
4390 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4391 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
4392 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4393 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
4394 struct intel_pipe_wm
*active
= &cstate
->wm
.ilk
.optimal
;
4395 enum pipe pipe
= intel_crtc
->pipe
;
4396 static const i915_reg_t wm0_pipe_reg
[] = {
4397 [PIPE_A
] = WM0_PIPEA_ILK
,
4398 [PIPE_B
] = WM0_PIPEB_ILK
,
4399 [PIPE_C
] = WM0_PIPEC_IVB
,
4402 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
4403 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
4404 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
4406 memset(active
, 0, sizeof(*active
));
4408 active
->pipe_enabled
= intel_crtc
->active
;
4410 if (active
->pipe_enabled
) {
4411 u32 tmp
= hw
->wm_pipe
[pipe
];
4414 * For active pipes LP0 watermark is marked as
4415 * enabled, and LP1+ watermaks as disabled since
4416 * we can't really reverse compute them in case
4417 * multiple pipes are active.
4419 active
->wm
[0].enable
= true;
4420 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
4421 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
4422 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
4423 active
->linetime
= hw
->wm_linetime
[pipe
];
4425 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4428 * For inactive pipes, all watermark levels
4429 * should be marked as enabled but zeroed,
4430 * which is what we'd compute them to.
4432 for (level
= 0; level
<= max_level
; level
++)
4433 active
->wm
[level
].enable
= true;
4436 intel_crtc
->wm
.active
.ilk
= *active
;
4439 #define _FW_WM(value, plane) \
4440 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4441 #define _FW_WM_VLV(value, plane) \
4442 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4444 static void vlv_read_wm_values(struct drm_i915_private
*dev_priv
,
4445 struct vlv_wm_values
*wm
)
4450 for_each_pipe(dev_priv
, pipe
) {
4451 tmp
= I915_READ(VLV_DDL(pipe
));
4453 wm
->ddl
[pipe
].primary
=
4454 (tmp
>> DDL_PLANE_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4455 wm
->ddl
[pipe
].cursor
=
4456 (tmp
>> DDL_CURSOR_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4457 wm
->ddl
[pipe
].sprite
[0] =
4458 (tmp
>> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4459 wm
->ddl
[pipe
].sprite
[1] =
4460 (tmp
>> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4463 tmp
= I915_READ(DSPFW1
);
4464 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
4465 wm
->pipe
[PIPE_B
].cursor
= _FW_WM(tmp
, CURSORB
);
4466 wm
->pipe
[PIPE_B
].primary
= _FW_WM_VLV(tmp
, PLANEB
);
4467 wm
->pipe
[PIPE_A
].primary
= _FW_WM_VLV(tmp
, PLANEA
);
4469 tmp
= I915_READ(DSPFW2
);
4470 wm
->pipe
[PIPE_A
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITEB
);
4471 wm
->pipe
[PIPE_A
].cursor
= _FW_WM(tmp
, CURSORA
);
4472 wm
->pipe
[PIPE_A
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEA
);
4474 tmp
= I915_READ(DSPFW3
);
4475 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
4477 if (IS_CHERRYVIEW(dev_priv
)) {
4478 tmp
= I915_READ(DSPFW7_CHV
);
4479 wm
->pipe
[PIPE_B
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITED
);
4480 wm
->pipe
[PIPE_B
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEC
);
4482 tmp
= I915_READ(DSPFW8_CHV
);
4483 wm
->pipe
[PIPE_C
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITEF
);
4484 wm
->pipe
[PIPE_C
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEE
);
4486 tmp
= I915_READ(DSPFW9_CHV
);
4487 wm
->pipe
[PIPE_C
].primary
= _FW_WM_VLV(tmp
, PLANEC
);
4488 wm
->pipe
[PIPE_C
].cursor
= _FW_WM(tmp
, CURSORC
);
4490 tmp
= I915_READ(DSPHOWM
);
4491 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
4492 wm
->pipe
[PIPE_C
].sprite
[1] |= _FW_WM(tmp
, SPRITEF_HI
) << 8;
4493 wm
->pipe
[PIPE_C
].sprite
[0] |= _FW_WM(tmp
, SPRITEE_HI
) << 8;
4494 wm
->pipe
[PIPE_C
].primary
|= _FW_WM(tmp
, PLANEC_HI
) << 8;
4495 wm
->pipe
[PIPE_B
].sprite
[1] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
4496 wm
->pipe
[PIPE_B
].sprite
[0] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
4497 wm
->pipe
[PIPE_B
].primary
|= _FW_WM(tmp
, PLANEB_HI
) << 8;
4498 wm
->pipe
[PIPE_A
].sprite
[1] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
4499 wm
->pipe
[PIPE_A
].sprite
[0] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
4500 wm
->pipe
[PIPE_A
].primary
|= _FW_WM(tmp
, PLANEA_HI
) << 8;
4502 tmp
= I915_READ(DSPFW7
);
4503 wm
->pipe
[PIPE_B
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITED
);
4504 wm
->pipe
[PIPE_B
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEC
);
4506 tmp
= I915_READ(DSPHOWM
);
4507 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
4508 wm
->pipe
[PIPE_B
].sprite
[1] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
4509 wm
->pipe
[PIPE_B
].sprite
[0] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
4510 wm
->pipe
[PIPE_B
].primary
|= _FW_WM(tmp
, PLANEB_HI
) << 8;
4511 wm
->pipe
[PIPE_A
].sprite
[1] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
4512 wm
->pipe
[PIPE_A
].sprite
[0] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
4513 wm
->pipe
[PIPE_A
].primary
|= _FW_WM(tmp
, PLANEA_HI
) << 8;
4520 void vlv_wm_get_hw_state(struct drm_device
*dev
)
4522 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4523 struct vlv_wm_values
*wm
= &dev_priv
->wm
.vlv
;
4524 struct intel_plane
*plane
;
4528 vlv_read_wm_values(dev_priv
, wm
);
4530 for_each_intel_plane(dev
, plane
) {
4531 switch (plane
->base
.type
) {
4533 case DRM_PLANE_TYPE_CURSOR
:
4534 plane
->wm
.fifo_size
= 63;
4536 case DRM_PLANE_TYPE_PRIMARY
:
4537 plane
->wm
.fifo_size
= vlv_get_fifo_size(dev_priv
, plane
->pipe
, 0);
4539 case DRM_PLANE_TYPE_OVERLAY
:
4540 sprite
= plane
->plane
;
4541 plane
->wm
.fifo_size
= vlv_get_fifo_size(dev_priv
, plane
->pipe
, sprite
+ 1);
4546 wm
->cxsr
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
4547 wm
->level
= VLV_WM_LEVEL_PM2
;
4549 if (IS_CHERRYVIEW(dev_priv
)) {
4550 mutex_lock(&dev_priv
->rps
.hw_lock
);
4552 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
4553 if (val
& DSP_MAXFIFO_PM5_ENABLE
)
4554 wm
->level
= VLV_WM_LEVEL_PM5
;
4557 * If DDR DVFS is disabled in the BIOS, Punit
4558 * will never ack the request. So if that happens
4559 * assume we don't have to enable/disable DDR DVFS
4560 * dynamically. To test that just set the REQ_ACK
4561 * bit to poke the Punit, but don't change the
4562 * HIGH/LOW bits so that we don't actually change
4563 * the current state.
4565 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
4566 val
|= FORCE_DDR_FREQ_REQ_ACK
;
4567 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
4569 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
4570 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3)) {
4571 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4572 "assuming DDR DVFS is disabled\n");
4573 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM5
;
4575 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
4576 if ((val
& FORCE_DDR_HIGH_FREQ
) == 0)
4577 wm
->level
= VLV_WM_LEVEL_DDR_DVFS
;
4580 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4583 for_each_pipe(dev_priv
, pipe
)
4584 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4585 pipe_name(pipe
), wm
->pipe
[pipe
].primary
, wm
->pipe
[pipe
].cursor
,
4586 wm
->pipe
[pipe
].sprite
[0], wm
->pipe
[pipe
].sprite
[1]);
4588 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4589 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->level
, wm
->cxsr
);
4592 void ilk_wm_get_hw_state(struct drm_device
*dev
)
4594 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4595 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
4596 struct drm_crtc
*crtc
;
4598 for_each_crtc(dev
, crtc
)
4599 ilk_pipe_wm_get_hw_state(crtc
);
4601 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
4602 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
4603 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
4605 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
4606 if (INTEL_GEN(dev_priv
) >= 7) {
4607 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
4608 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
4611 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
4612 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
4613 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
4614 else if (IS_IVYBRIDGE(dev_priv
))
4615 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
4616 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
4619 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
4623 * intel_update_watermarks - update FIFO watermark values based on current modes
4625 * Calculate watermark values for the various WM regs based on current mode
4626 * and plane configuration.
4628 * There are several cases to deal with here:
4629 * - normal (i.e. non-self-refresh)
4630 * - self-refresh (SR) mode
4631 * - lines are large relative to FIFO size (buffer can hold up to 2)
4632 * - lines are small relative to FIFO size (buffer can hold more than 2
4633 * lines), so need to account for TLB latency
4635 * The normal calculation is:
4636 * watermark = dotclock * bytes per pixel * latency
4637 * where latency is platform & configuration dependent (we assume pessimal
4640 * The SR calculation is:
4641 * watermark = (trunc(latency/line time)+1) * surface width *
4644 * line time = htotal / dotclock
4645 * surface width = hdisplay for normal plane and 64 for cursor
4646 * and latency is assumed to be high, as above.
4648 * The final value programmed to the register should always be rounded up,
4649 * and include an extra 2 entries to account for clock crossings.
4651 * We don't use the sprite, so we can ignore that. And on Crestline we have
4652 * to set the non-SR watermarks to 8.
4654 void intel_update_watermarks(struct intel_crtc
*crtc
)
4656 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4658 if (dev_priv
->display
.update_wm
)
4659 dev_priv
->display
.update_wm(crtc
);
4663 * Lock protecting IPS related data structures
4665 DEFINE_SPINLOCK(mchdev_lock
);
4667 /* Global for IPS driver to get at the current i915 device. Protected by
4669 static struct drm_i915_private
*i915_mch_dev
;
4671 bool ironlake_set_drps(struct drm_i915_private
*dev_priv
, u8 val
)
4675 assert_spin_locked(&mchdev_lock
);
4677 rgvswctl
= I915_READ16(MEMSWCTL
);
4678 if (rgvswctl
& MEMCTL_CMD_STS
) {
4679 DRM_DEBUG("gpu busy, RCS change rejected\n");
4680 return false; /* still busy with another command */
4683 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
4684 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
4685 I915_WRITE16(MEMSWCTL
, rgvswctl
);
4686 POSTING_READ16(MEMSWCTL
);
4688 rgvswctl
|= MEMCTL_CMD_STS
;
4689 I915_WRITE16(MEMSWCTL
, rgvswctl
);
4694 static void ironlake_enable_drps(struct drm_i915_private
*dev_priv
)
4697 u8 fmax
, fmin
, fstart
, vstart
;
4699 spin_lock_irq(&mchdev_lock
);
4701 rgvmodectl
= I915_READ(MEMMODECTL
);
4703 /* Enable temp reporting */
4704 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
4705 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
4707 /* 100ms RC evaluation intervals */
4708 I915_WRITE(RCUPEI
, 100000);
4709 I915_WRITE(RCDNEI
, 100000);
4711 /* Set max/min thresholds to 90ms and 80ms respectively */
4712 I915_WRITE(RCBMAXAVG
, 90000);
4713 I915_WRITE(RCBMINAVG
, 80000);
4715 I915_WRITE(MEMIHYST
, 1);
4717 /* Set up min, max, and cur for interrupt handling */
4718 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
4719 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
4720 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
4721 MEMMODE_FSTART_SHIFT
;
4723 vstart
= (I915_READ(PXVFREQ(fstart
)) & PXVFREQ_PX_MASK
) >>
4726 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
4727 dev_priv
->ips
.fstart
= fstart
;
4729 dev_priv
->ips
.max_delay
= fstart
;
4730 dev_priv
->ips
.min_delay
= fmin
;
4731 dev_priv
->ips
.cur_delay
= fstart
;
4733 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4734 fmax
, fmin
, fstart
);
4736 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
4739 * Interrupts will be enabled in ironlake_irq_postinstall
4742 I915_WRITE(VIDSTART
, vstart
);
4743 POSTING_READ(VIDSTART
);
4745 rgvmodectl
|= MEMMODE_SWMODE_EN
;
4746 I915_WRITE(MEMMODECTL
, rgvmodectl
);
4748 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
4749 DRM_ERROR("stuck trying to change perf mode\n");
4752 ironlake_set_drps(dev_priv
, fstart
);
4754 dev_priv
->ips
.last_count1
= I915_READ(DMIEC
) +
4755 I915_READ(DDREC
) + I915_READ(CSIEC
);
4756 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
4757 dev_priv
->ips
.last_count2
= I915_READ(GFXEC
);
4758 dev_priv
->ips
.last_time2
= ktime_get_raw_ns();
4760 spin_unlock_irq(&mchdev_lock
);
4763 static void ironlake_disable_drps(struct drm_i915_private
*dev_priv
)
4767 spin_lock_irq(&mchdev_lock
);
4769 rgvswctl
= I915_READ16(MEMSWCTL
);
4771 /* Ack interrupts, disable EFC interrupt */
4772 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
4773 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
4774 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
4775 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
4776 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
4778 /* Go back to the starting frequency */
4779 ironlake_set_drps(dev_priv
, dev_priv
->ips
.fstart
);
4781 rgvswctl
|= MEMCTL_CMD_STS
;
4782 I915_WRITE(MEMSWCTL
, rgvswctl
);
4785 spin_unlock_irq(&mchdev_lock
);
4788 /* There's a funny hw issue where the hw returns all 0 when reading from
4789 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4790 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4791 * all limits and the gpu stuck at whatever frequency it is at atm).
4793 static u32
intel_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
4797 /* Only set the down limit when we've reached the lowest level to avoid
4798 * getting more interrupts, otherwise leave this clear. This prevents a
4799 * race in the hw when coming out of rc6: There's a tiny window where
4800 * the hw runs at the minimal clock before selecting the desired
4801 * frequency, if the down threshold expires in that window we will not
4802 * receive a down interrupt. */
4803 if (IS_GEN9(dev_priv
)) {
4804 limits
= (dev_priv
->rps
.max_freq_softlimit
) << 23;
4805 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4806 limits
|= (dev_priv
->rps
.min_freq_softlimit
) << 14;
4808 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
4809 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4810 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
4816 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
4819 u32 threshold_up
= 0, threshold_down
= 0; /* in % */
4820 u32 ei_up
= 0, ei_down
= 0;
4822 new_power
= dev_priv
->rps
.power
;
4823 switch (dev_priv
->rps
.power
) {
4825 if (val
> dev_priv
->rps
.efficient_freq
+ 1 &&
4826 val
> dev_priv
->rps
.cur_freq
)
4827 new_power
= BETWEEN
;
4831 if (val
<= dev_priv
->rps
.efficient_freq
&&
4832 val
< dev_priv
->rps
.cur_freq
)
4833 new_power
= LOW_POWER
;
4834 else if (val
>= dev_priv
->rps
.rp0_freq
&&
4835 val
> dev_priv
->rps
.cur_freq
)
4836 new_power
= HIGH_POWER
;
4840 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 &&
4841 val
< dev_priv
->rps
.cur_freq
)
4842 new_power
= BETWEEN
;
4845 /* Max/min bins are special */
4846 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4847 new_power
= LOW_POWER
;
4848 if (val
>= dev_priv
->rps
.max_freq_softlimit
)
4849 new_power
= HIGH_POWER
;
4850 if (new_power
== dev_priv
->rps
.power
)
4853 /* Note the units here are not exactly 1us, but 1280ns. */
4854 switch (new_power
) {
4856 /* Upclock if more than 95% busy over 16ms */
4860 /* Downclock if less than 85% busy over 32ms */
4862 threshold_down
= 85;
4866 /* Upclock if more than 90% busy over 13ms */
4870 /* Downclock if less than 75% busy over 32ms */
4872 threshold_down
= 75;
4876 /* Upclock if more than 85% busy over 10ms */
4880 /* Downclock if less than 60% busy over 32ms */
4882 threshold_down
= 60;
4886 I915_WRITE(GEN6_RP_UP_EI
,
4887 GT_INTERVAL_FROM_US(dev_priv
, ei_up
));
4888 I915_WRITE(GEN6_RP_UP_THRESHOLD
,
4889 GT_INTERVAL_FROM_US(dev_priv
,
4890 ei_up
* threshold_up
/ 100));
4892 I915_WRITE(GEN6_RP_DOWN_EI
,
4893 GT_INTERVAL_FROM_US(dev_priv
, ei_down
));
4894 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
,
4895 GT_INTERVAL_FROM_US(dev_priv
,
4896 ei_down
* threshold_down
/ 100));
4898 I915_WRITE(GEN6_RP_CONTROL
,
4899 GEN6_RP_MEDIA_TURBO
|
4900 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4901 GEN6_RP_MEDIA_IS_GFX
|
4903 GEN6_RP_UP_BUSY_AVG
|
4904 GEN6_RP_DOWN_IDLE_AVG
);
4906 dev_priv
->rps
.power
= new_power
;
4907 dev_priv
->rps
.up_threshold
= threshold_up
;
4908 dev_priv
->rps
.down_threshold
= threshold_down
;
4909 dev_priv
->rps
.last_adj
= 0;
4912 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
4916 if (val
> dev_priv
->rps
.min_freq_softlimit
)
4917 mask
|= GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
4918 if (val
< dev_priv
->rps
.max_freq_softlimit
)
4919 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_UP_THRESHOLD
;
4921 mask
&= dev_priv
->pm_rps_events
;
4923 return gen6_sanitize_rps_pm_mask(dev_priv
, ~mask
);
4926 /* gen6_set_rps is called to update the frequency request, but should also be
4927 * called when the range (min_delay and max_delay) is modified so that we can
4928 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4929 static void gen6_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
4931 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4932 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
))
4935 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4936 WARN_ON(val
> dev_priv
->rps
.max_freq
);
4937 WARN_ON(val
< dev_priv
->rps
.min_freq
);
4939 /* min/max delay may still have been modified so be sure to
4940 * write the limits value.
4942 if (val
!= dev_priv
->rps
.cur_freq
) {
4943 gen6_set_rps_thresholds(dev_priv
, val
);
4945 if (IS_GEN9(dev_priv
))
4946 I915_WRITE(GEN6_RPNSWREQ
,
4947 GEN9_FREQUENCY(val
));
4948 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
4949 I915_WRITE(GEN6_RPNSWREQ
,
4950 HSW_FREQUENCY(val
));
4952 I915_WRITE(GEN6_RPNSWREQ
,
4953 GEN6_FREQUENCY(val
) |
4955 GEN6_AGGRESSIVE_TURBO
);
4958 /* Make sure we continue to get interrupts
4959 * until we hit the minimum or maximum frequencies.
4961 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, intel_rps_limits(dev_priv
, val
));
4962 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
4964 POSTING_READ(GEN6_RPNSWREQ
);
4966 dev_priv
->rps
.cur_freq
= val
;
4967 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
4970 static void valleyview_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
4972 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4973 WARN_ON(val
> dev_priv
->rps
.max_freq
);
4974 WARN_ON(val
< dev_priv
->rps
.min_freq
);
4976 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv
) && (val
& 1),
4977 "Odd GPU freq value\n"))
4980 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
4982 if (val
!= dev_priv
->rps
.cur_freq
) {
4983 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
4984 if (!IS_CHERRYVIEW(dev_priv
))
4985 gen6_set_rps_thresholds(dev_priv
, val
);
4988 dev_priv
->rps
.cur_freq
= val
;
4989 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
4992 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4994 * * If Gfx is Idle, then
4995 * 1. Forcewake Media well.
4996 * 2. Request idle freq.
4997 * 3. Release Forcewake of Media well.
4999 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
5001 u32 val
= dev_priv
->rps
.idle_freq
;
5003 if (dev_priv
->rps
.cur_freq
<= val
)
5006 /* Wake up the media well, as that takes a lot less
5007 * power than the Render well. */
5008 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_MEDIA
);
5009 valleyview_set_rps(dev_priv
, val
);
5010 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_MEDIA
);
5013 void gen6_rps_busy(struct drm_i915_private
*dev_priv
)
5015 mutex_lock(&dev_priv
->rps
.hw_lock
);
5016 if (dev_priv
->rps
.enabled
) {
5017 if (dev_priv
->pm_rps_events
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
))
5018 gen6_rps_reset_ei(dev_priv
);
5019 I915_WRITE(GEN6_PMINTRMSK
,
5020 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
5022 gen6_enable_rps_interrupts(dev_priv
);
5024 /* Ensure we start at the user's desired frequency */
5025 intel_set_rps(dev_priv
,
5026 clamp(dev_priv
->rps
.cur_freq
,
5027 dev_priv
->rps
.min_freq_softlimit
,
5028 dev_priv
->rps
.max_freq_softlimit
));
5030 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5033 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
5035 /* Flush our bottom-half so that it does not race with us
5036 * setting the idle frequency and so that it is bounded by
5037 * our rpm wakeref. And then disable the interrupts to stop any
5038 * futher RPS reclocking whilst we are asleep.
5040 gen6_disable_rps_interrupts(dev_priv
);
5042 mutex_lock(&dev_priv
->rps
.hw_lock
);
5043 if (dev_priv
->rps
.enabled
) {
5044 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
5045 vlv_set_rps_idle(dev_priv
);
5047 gen6_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
5048 dev_priv
->rps
.last_adj
= 0;
5049 I915_WRITE(GEN6_PMINTRMSK
,
5050 gen6_sanitize_rps_pm_mask(dev_priv
, ~0));
5052 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5054 spin_lock(&dev_priv
->rps
.client_lock
);
5055 while (!list_empty(&dev_priv
->rps
.clients
))
5056 list_del_init(dev_priv
->rps
.clients
.next
);
5057 spin_unlock(&dev_priv
->rps
.client_lock
);
5060 void gen6_rps_boost(struct drm_i915_private
*dev_priv
,
5061 struct intel_rps_client
*rps
,
5062 unsigned long submitted
)
5064 /* This is intentionally racy! We peek at the state here, then
5065 * validate inside the RPS worker.
5067 if (!(dev_priv
->gt
.awake
&&
5068 dev_priv
->rps
.enabled
&&
5069 dev_priv
->rps
.cur_freq
< dev_priv
->rps
.boost_freq
))
5072 /* Force a RPS boost (and don't count it against the client) if
5073 * the GPU is severely congested.
5075 if (rps
&& time_after(jiffies
, submitted
+ DRM_I915_THROTTLE_JIFFIES
))
5078 spin_lock(&dev_priv
->rps
.client_lock
);
5079 if (rps
== NULL
|| list_empty(&rps
->link
)) {
5080 spin_lock_irq(&dev_priv
->irq_lock
);
5081 if (dev_priv
->rps
.interrupts_enabled
) {
5082 dev_priv
->rps
.client_boost
= true;
5083 schedule_work(&dev_priv
->rps
.work
);
5085 spin_unlock_irq(&dev_priv
->irq_lock
);
5088 list_add(&rps
->link
, &dev_priv
->rps
.clients
);
5091 dev_priv
->rps
.boosts
++;
5093 spin_unlock(&dev_priv
->rps
.client_lock
);
5096 void intel_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
5098 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
5099 valleyview_set_rps(dev_priv
, val
);
5101 gen6_set_rps(dev_priv
, val
);
5104 static void gen9_disable_rc6(struct drm_i915_private
*dev_priv
)
5106 I915_WRITE(GEN6_RC_CONTROL
, 0);
5107 I915_WRITE(GEN9_PG_ENABLE
, 0);
5110 static void gen9_disable_rps(struct drm_i915_private
*dev_priv
)
5112 I915_WRITE(GEN6_RP_CONTROL
, 0);
5115 static void gen6_disable_rps(struct drm_i915_private
*dev_priv
)
5117 I915_WRITE(GEN6_RC_CONTROL
, 0);
5118 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
5119 I915_WRITE(GEN6_RP_CONTROL
, 0);
5122 static void cherryview_disable_rps(struct drm_i915_private
*dev_priv
)
5124 I915_WRITE(GEN6_RC_CONTROL
, 0);
5127 static void valleyview_disable_rps(struct drm_i915_private
*dev_priv
)
5129 /* we're doing forcewake before Disabling RC6,
5130 * This what the BIOS expects when going into suspend */
5131 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5133 I915_WRITE(GEN6_RC_CONTROL
, 0);
5135 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5138 static void intel_print_rc6_info(struct drm_i915_private
*dev_priv
, u32 mode
)
5140 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
5141 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
5142 mode
= GEN6_RC_CTL_RC6_ENABLE
;
5146 if (HAS_RC6p(dev_priv
))
5147 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5148 "RC6 %s RC6p %s RC6pp %s\n",
5149 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
),
5150 onoff(mode
& GEN6_RC_CTL_RC6p_ENABLE
),
5151 onoff(mode
& GEN6_RC_CTL_RC6pp_ENABLE
));
5154 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5155 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
));
5158 static bool bxt_check_bios_rc6_setup(struct drm_i915_private
*dev_priv
)
5160 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
5161 bool enable_rc6
= true;
5162 unsigned long rc6_ctx_base
;
5166 rc_ctl
= I915_READ(GEN6_RC_CONTROL
);
5167 rc_sw_target
= (I915_READ(GEN6_RC_STATE
) & RC_SW_TARGET_STATE_MASK
) >>
5168 RC_SW_TARGET_STATE_SHIFT
;
5169 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5170 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5171 onoff(rc_ctl
& GEN6_RC_CTL_HW_ENABLE
),
5172 onoff(rc_ctl
& GEN6_RC_CTL_RC6_ENABLE
),
5175 if (!(I915_READ(RC6_LOCATION
) & RC6_CTX_IN_DRAM
)) {
5176 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5181 * The exact context size is not known for BXT, so assume a page size
5184 rc6_ctx_base
= I915_READ(RC6_CTX_BASE
) & RC6_CTX_BASE_MASK
;
5185 if (!((rc6_ctx_base
>= ggtt
->stolen_reserved_base
) &&
5186 (rc6_ctx_base
+ PAGE_SIZE
<= ggtt
->stolen_reserved_base
+
5187 ggtt
->stolen_reserved_size
))) {
5188 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5192 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
5193 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0
) & IDLE_TIME_MASK
) > 1) &&
5194 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
5195 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT
) & IDLE_TIME_MASK
) > 1))) {
5196 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5200 if (!I915_READ(GEN8_PUSHBUS_CONTROL
) ||
5201 !I915_READ(GEN8_PUSHBUS_ENABLE
) ||
5202 !I915_READ(GEN8_PUSHBUS_SHIFT
)) {
5203 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5207 if (!I915_READ(GEN6_GFXPAUSE
)) {
5208 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5212 if (!I915_READ(GEN8_MISC_CTRL0
)) {
5213 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5220 int sanitize_rc6_option(struct drm_i915_private
*dev_priv
, int enable_rc6
)
5222 /* No RC6 before Ironlake and code is gone for ilk. */
5223 if (INTEL_INFO(dev_priv
)->gen
< 6)
5229 if (IS_BROXTON(dev_priv
) && !bxt_check_bios_rc6_setup(dev_priv
)) {
5230 DRM_INFO("RC6 disabled by BIOS\n");
5234 /* Respect the kernel parameter if it is set */
5235 if (enable_rc6
>= 0) {
5238 if (HAS_RC6p(dev_priv
))
5239 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
5242 mask
= INTEL_RC6_ENABLE
;
5244 if ((enable_rc6
& mask
) != enable_rc6
)
5245 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5246 "(requested %d, valid %d)\n",
5247 enable_rc6
& mask
, enable_rc6
, mask
);
5249 return enable_rc6
& mask
;
5252 if (IS_IVYBRIDGE(dev_priv
))
5253 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
5255 return INTEL_RC6_ENABLE
;
5258 static void gen6_init_rps_frequencies(struct drm_i915_private
*dev_priv
)
5260 /* All of these values are in units of 50MHz */
5262 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5263 if (IS_BROXTON(dev_priv
)) {
5264 u32 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
5265 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 16) & 0xff;
5266 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
5267 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 0) & 0xff;
5269 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
5270 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
5271 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
5272 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
5274 /* hw_max = RP0 until we check for overclocking */
5275 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
5277 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
5278 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
) ||
5279 IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
5280 u32 ddcc_status
= 0;
5282 if (sandybridge_pcode_read(dev_priv
,
5283 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL
,
5285 dev_priv
->rps
.efficient_freq
=
5287 ((ddcc_status
>> 8) & 0xff),
5288 dev_priv
->rps
.min_freq
,
5289 dev_priv
->rps
.max_freq
);
5292 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
5293 /* Store the frequency values in 16.66 MHZ units, which is
5294 * the natural hardware unit for SKL
5296 dev_priv
->rps
.rp0_freq
*= GEN9_FREQ_SCALER
;
5297 dev_priv
->rps
.rp1_freq
*= GEN9_FREQ_SCALER
;
5298 dev_priv
->rps
.min_freq
*= GEN9_FREQ_SCALER
;
5299 dev_priv
->rps
.max_freq
*= GEN9_FREQ_SCALER
;
5300 dev_priv
->rps
.efficient_freq
*= GEN9_FREQ_SCALER
;
5304 static void reset_rps(struct drm_i915_private
*dev_priv
,
5305 void (*set
)(struct drm_i915_private
*, u8
))
5307 u8 freq
= dev_priv
->rps
.cur_freq
;
5310 dev_priv
->rps
.power
= -1;
5311 dev_priv
->rps
.cur_freq
= -1;
5313 set(dev_priv
, freq
);
5316 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5317 static void gen9_enable_rps(struct drm_i915_private
*dev_priv
)
5319 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5321 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5322 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
)) {
5324 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5325 * clear out the Control register just to avoid inconsitency
5326 * with debugfs interface, which will show Turbo as enabled
5327 * only and that is not expected by the User after adding the
5328 * WaGsvDisableTurbo. Apart from this there is no problem even
5329 * if the Turbo is left enabled in the Control register, as the
5330 * Up/Down interrupts would remain masked.
5332 gen9_disable_rps(dev_priv
);
5333 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5337 /* Program defaults and thresholds for RPS*/
5338 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
5339 GEN9_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5341 /* 1 second timeout*/
5342 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
,
5343 GT_INTERVAL_FROM_US(dev_priv
, 1000000));
5345 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 0xa);
5347 /* Leaning on the below call to gen6_set_rps to program/setup the
5348 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5349 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5350 reset_rps(dev_priv
, gen6_set_rps
);
5352 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5355 static void gen9_enable_rc6(struct drm_i915_private
*dev_priv
)
5357 struct intel_engine_cs
*engine
;
5358 enum intel_engine_id id
;
5359 uint32_t rc6_mask
= 0;
5361 /* 1a: Software RC state - RC0 */
5362 I915_WRITE(GEN6_RC_STATE
, 0);
5364 /* 1b: Get forcewake during program sequence. Although the driver
5365 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5366 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5368 /* 2a: Disable RC states. */
5369 I915_WRITE(GEN6_RC_CONTROL
, 0);
5371 /* 2b: Program RC6 thresholds.*/
5373 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5374 if (IS_SKYLAKE(dev_priv
))
5375 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 108 << 16);
5377 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16);
5378 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
5379 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
5380 for_each_engine(engine
, dev_priv
, id
)
5381 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5383 if (HAS_GUC(dev_priv
))
5384 I915_WRITE(GUC_MAX_IDLE_COUNT
, 0xA);
5386 I915_WRITE(GEN6_RC_SLEEP
, 0);
5388 /* 2c: Program Coarse Power Gating Policies. */
5389 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS
, 25);
5390 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS
, 25);
5392 /* 3a: Enable RC6 */
5393 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
5394 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
5395 DRM_INFO("RC6 %s\n", onoff(rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
));
5396 /* WaRsUseTimeoutMode:bxt */
5397 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
)) {
5398 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us */
5399 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5400 GEN7_RC_CTL_TO_MODE
|
5403 I915_WRITE(GEN6_RC6_THRESHOLD
, 37500); /* 37.5/125ms per EI */
5404 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5405 GEN6_RC_CTL_EI_MODE(1) |
5410 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5411 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5413 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv
))
5414 I915_WRITE(GEN9_PG_ENABLE
, 0);
5416 I915_WRITE(GEN9_PG_ENABLE
, (rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
) ?
5417 (GEN9_RENDER_PG_ENABLE
| GEN9_MEDIA_PG_ENABLE
) : 0);
5419 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5422 static void gen8_enable_rps(struct drm_i915_private
*dev_priv
)
5424 struct intel_engine_cs
*engine
;
5425 enum intel_engine_id id
;
5426 uint32_t rc6_mask
= 0;
5428 /* 1a: Software RC state - RC0 */
5429 I915_WRITE(GEN6_RC_STATE
, 0);
5431 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5432 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5433 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5435 /* 2a: Disable RC states. */
5436 I915_WRITE(GEN6_RC_CONTROL
, 0);
5438 /* 2b: Program RC6 thresholds.*/
5439 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
5440 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
5441 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
5442 for_each_engine(engine
, dev_priv
, id
)
5443 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5444 I915_WRITE(GEN6_RC_SLEEP
, 0);
5445 if (IS_BROADWELL(dev_priv
))
5446 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
5448 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
5451 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
5452 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
5453 intel_print_rc6_info(dev_priv
, rc6_mask
);
5454 if (IS_BROADWELL(dev_priv
))
5455 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5456 GEN7_RC_CTL_TO_MODE
|
5459 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5460 GEN6_RC_CTL_EI_MODE(1) |
5463 /* 4 Program defaults and thresholds for RPS*/
5464 I915_WRITE(GEN6_RPNSWREQ
,
5465 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5466 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
5467 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5468 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5469 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 100000000 / 128); /* 1 second timeout */
5471 /* Docs recommend 900MHz, and 300 MHz respectively */
5472 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
5473 dev_priv
->rps
.max_freq_softlimit
<< 24 |
5474 dev_priv
->rps
.min_freq_softlimit
<< 16);
5476 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 7600000 / 128); /* 76ms busyness per EI, 90% */
5477 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5478 I915_WRITE(GEN6_RP_UP_EI
, 66000); /* 84.48ms, XXX: random? */
5479 I915_WRITE(GEN6_RP_DOWN_EI
, 350000); /* 448ms, XXX: random? */
5481 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5484 I915_WRITE(GEN6_RP_CONTROL
,
5485 GEN6_RP_MEDIA_TURBO
|
5486 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5487 GEN6_RP_MEDIA_IS_GFX
|
5489 GEN6_RP_UP_BUSY_AVG
|
5490 GEN6_RP_DOWN_IDLE_AVG
);
5492 /* 6: Ring frequency + overclocking (our driver does this later */
5494 reset_rps(dev_priv
, gen6_set_rps
);
5496 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5499 static void gen6_enable_rps(struct drm_i915_private
*dev_priv
)
5501 struct intel_engine_cs
*engine
;
5502 enum intel_engine_id id
;
5503 u32 rc6vids
, rc6_mask
= 0;
5508 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5510 /* Here begins a magic sequence of register writes to enable
5511 * auto-downclocking.
5513 * Perhaps there might be some value in exposing these to
5516 I915_WRITE(GEN6_RC_STATE
, 0);
5518 /* Clear the DBG now so we don't confuse earlier errors */
5519 gtfifodbg
= I915_READ(GTFIFODBG
);
5521 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
5522 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5525 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5527 /* disable the counters and set deterministic thresholds */
5528 I915_WRITE(GEN6_RC_CONTROL
, 0);
5530 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
5531 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
5532 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
5533 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
5534 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
5536 for_each_engine(engine
, dev_priv
, id
)
5537 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5539 I915_WRITE(GEN6_RC_SLEEP
, 0);
5540 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
5541 if (IS_IVYBRIDGE(dev_priv
))
5542 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
5544 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
5545 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
5546 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
5548 /* Check if we are enabling RC6 */
5549 rc6_mode
= intel_enable_rc6();
5550 if (rc6_mode
& INTEL_RC6_ENABLE
)
5551 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
5553 /* We don't use those on Haswell */
5554 if (!IS_HASWELL(dev_priv
)) {
5555 if (rc6_mode
& INTEL_RC6p_ENABLE
)
5556 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
5558 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
5559 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
5562 intel_print_rc6_info(dev_priv
, rc6_mask
);
5564 I915_WRITE(GEN6_RC_CONTROL
,
5566 GEN6_RC_CTL_EI_MODE(1) |
5567 GEN6_RC_CTL_HW_ENABLE
);
5569 /* Power down if completely idle for over 50ms */
5570 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
5571 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5573 reset_rps(dev_priv
, gen6_set_rps
);
5576 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
5577 if (IS_GEN6(dev_priv
) && ret
) {
5578 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5579 } else if (IS_GEN6(dev_priv
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
5580 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5581 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
5582 rc6vids
&= 0xffff00;
5583 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
5584 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
5586 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5589 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5592 static void gen6_update_ring_freq(struct drm_i915_private
*dev_priv
)
5595 unsigned int gpu_freq
;
5596 unsigned int max_ia_freq
, min_ring_freq
;
5597 unsigned int max_gpu_freq
, min_gpu_freq
;
5598 int scaling_factor
= 180;
5599 struct cpufreq_policy
*policy
;
5601 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5603 policy
= cpufreq_cpu_get(0);
5605 max_ia_freq
= policy
->cpuinfo
.max_freq
;
5606 cpufreq_cpu_put(policy
);
5609 * Default to measured freq if none found, PCU will ensure we
5612 max_ia_freq
= tsc_khz
;
5615 /* Convert from kHz to MHz */
5616 max_ia_freq
/= 1000;
5618 min_ring_freq
= I915_READ(DCLK
) & 0xf;
5619 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5620 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
5622 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
5623 /* Convert GT frequency to 50 HZ units */
5624 min_gpu_freq
= dev_priv
->rps
.min_freq
/ GEN9_FREQ_SCALER
;
5625 max_gpu_freq
= dev_priv
->rps
.max_freq
/ GEN9_FREQ_SCALER
;
5627 min_gpu_freq
= dev_priv
->rps
.min_freq
;
5628 max_gpu_freq
= dev_priv
->rps
.max_freq
;
5632 * For each potential GPU frequency, load a ring frequency we'd like
5633 * to use for memory access. We do this by specifying the IA frequency
5634 * the PCU should use as a reference to determine the ring frequency.
5636 for (gpu_freq
= max_gpu_freq
; gpu_freq
>= min_gpu_freq
; gpu_freq
--) {
5637 int diff
= max_gpu_freq
- gpu_freq
;
5638 unsigned int ia_freq
= 0, ring_freq
= 0;
5640 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
5642 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5643 * No floor required for ring frequency on SKL.
5645 ring_freq
= gpu_freq
;
5646 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
5647 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5648 ring_freq
= max(min_ring_freq
, gpu_freq
);
5649 } else if (IS_HASWELL(dev_priv
)) {
5650 ring_freq
= mult_frac(gpu_freq
, 5, 4);
5651 ring_freq
= max(min_ring_freq
, ring_freq
);
5652 /* leave ia_freq as the default, chosen by cpufreq */
5654 /* On older processors, there is no separate ring
5655 * clock domain, so in order to boost the bandwidth
5656 * of the ring, we need to upclock the CPU (ia_freq).
5658 * For GPU frequencies less than 750MHz,
5659 * just use the lowest ring freq.
5661 if (gpu_freq
< min_freq
)
5664 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
5665 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
5668 sandybridge_pcode_write(dev_priv
,
5669 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
5670 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
5671 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
5676 static int cherryview_rps_max_freq(struct drm_i915_private
*dev_priv
)
5680 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
5682 switch (INTEL_INFO(dev_priv
)->sseu
.eu_total
) {
5684 /* (2 * 4) config */
5685 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT
);
5688 /* (2 * 6) config */
5689 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT
);
5692 /* (2 * 8) config */
5694 /* Setting (2 * 8) Min RP0 for any other combination */
5695 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT
);
5699 rp0
= (rp0
& FB_GFX_FREQ_FUSE_MASK
);
5704 static int cherryview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
5708 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_DUTYCYCLE_REG
);
5709 rpe
= (val
>> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
5714 static int cherryview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
5718 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
5719 rp1
= (val
& FB_GFX_FREQ_FUSE_MASK
);
5724 static int valleyview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
5728 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
5730 rp1
= (val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
5735 static int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
5739 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
5741 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
5743 rp0
= min_t(u32
, rp0
, 0xea);
5748 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
5752 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
5753 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
5754 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
5755 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
5760 static int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
5764 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
5766 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5767 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5768 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5769 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5770 * to make sure it matches what Punit accepts.
5772 return max_t(u32
, val
, 0xc0);
5775 /* Check that the pctx buffer wasn't move under us. */
5776 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
5778 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
5780 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
5781 dev_priv
->vlv_pctx
->stolen
->start
);
5785 /* Check that the pcbr address is not empty. */
5786 static void cherryview_check_pctx(struct drm_i915_private
*dev_priv
)
5788 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
5790 WARN_ON((pctx_addr
>> VLV_PCBR_ADDR_SHIFT
) == 0);
5793 static void cherryview_setup_pctx(struct drm_i915_private
*dev_priv
)
5795 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
5796 unsigned long pctx_paddr
, paddr
;
5798 int pctx_size
= 32*1024;
5800 pcbr
= I915_READ(VLV_PCBR
);
5801 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
5802 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5803 paddr
= (dev_priv
->mm
.stolen_base
+
5804 (ggtt
->stolen_size
- pctx_size
));
5806 pctx_paddr
= (paddr
& (~4095));
5807 I915_WRITE(VLV_PCBR
, pctx_paddr
);
5810 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
5813 static void valleyview_setup_pctx(struct drm_i915_private
*dev_priv
)
5815 struct drm_i915_gem_object
*pctx
;
5816 unsigned long pctx_paddr
;
5818 int pctx_size
= 24*1024;
5820 pcbr
= I915_READ(VLV_PCBR
);
5822 /* BIOS set it up already, grab the pre-alloc'd space */
5825 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
5826 pctx
= i915_gem_object_create_stolen_for_preallocated(&dev_priv
->drm
,
5828 I915_GTT_OFFSET_NONE
,
5833 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5836 * From the Gunit register HAS:
5837 * The Gfx driver is expected to program this register and ensure
5838 * proper allocation within Gfx stolen memory. For example, this
5839 * register should be programmed such than the PCBR range does not
5840 * overlap with other ranges, such as the frame buffer, protected
5841 * memory, or any other relevant ranges.
5843 pctx
= i915_gem_object_create_stolen(&dev_priv
->drm
, pctx_size
);
5845 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5849 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
5850 I915_WRITE(VLV_PCBR
, pctx_paddr
);
5853 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
5854 dev_priv
->vlv_pctx
= pctx
;
5857 static void valleyview_cleanup_pctx(struct drm_i915_private
*dev_priv
)
5859 if (WARN_ON(!dev_priv
->vlv_pctx
))
5862 i915_gem_object_put(dev_priv
->vlv_pctx
);
5863 dev_priv
->vlv_pctx
= NULL
;
5866 static void vlv_init_gpll_ref_freq(struct drm_i915_private
*dev_priv
)
5868 dev_priv
->rps
.gpll_ref_freq
=
5869 vlv_get_cck_clock(dev_priv
, "GPLL ref",
5870 CCK_GPLL_CLOCK_CONTROL
,
5871 dev_priv
->czclk_freq
);
5873 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5874 dev_priv
->rps
.gpll_ref_freq
);
5877 static void valleyview_init_gt_powersave(struct drm_i915_private
*dev_priv
)
5881 valleyview_setup_pctx(dev_priv
);
5883 vlv_init_gpll_ref_freq(dev_priv
);
5885 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5886 switch ((val
>> 6) & 3) {
5889 dev_priv
->mem_freq
= 800;
5892 dev_priv
->mem_freq
= 1066;
5895 dev_priv
->mem_freq
= 1333;
5898 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
5900 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
5901 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
5902 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5903 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
5904 dev_priv
->rps
.max_freq
);
5906 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
5907 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5908 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5909 dev_priv
->rps
.efficient_freq
);
5911 dev_priv
->rps
.rp1_freq
= valleyview_rps_guar_freq(dev_priv
);
5912 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5913 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
5914 dev_priv
->rps
.rp1_freq
);
5916 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
5917 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5918 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
5919 dev_priv
->rps
.min_freq
);
5922 static void cherryview_init_gt_powersave(struct drm_i915_private
*dev_priv
)
5926 cherryview_setup_pctx(dev_priv
);
5928 vlv_init_gpll_ref_freq(dev_priv
);
5930 mutex_lock(&dev_priv
->sb_lock
);
5931 val
= vlv_cck_read(dev_priv
, CCK_FUSE_REG
);
5932 mutex_unlock(&dev_priv
->sb_lock
);
5934 switch ((val
>> 2) & 0x7) {
5936 dev_priv
->mem_freq
= 2000;
5939 dev_priv
->mem_freq
= 1600;
5942 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
5944 dev_priv
->rps
.max_freq
= cherryview_rps_max_freq(dev_priv
);
5945 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
5946 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5947 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
5948 dev_priv
->rps
.max_freq
);
5950 dev_priv
->rps
.efficient_freq
= cherryview_rps_rpe_freq(dev_priv
);
5951 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5952 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5953 dev_priv
->rps
.efficient_freq
);
5955 dev_priv
->rps
.rp1_freq
= cherryview_rps_guar_freq(dev_priv
);
5956 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5957 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
5958 dev_priv
->rps
.rp1_freq
);
5960 /* PUnit validated range is only [RPe, RP0] */
5961 dev_priv
->rps
.min_freq
= dev_priv
->rps
.efficient_freq
;
5962 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5963 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
5964 dev_priv
->rps
.min_freq
);
5966 WARN_ONCE((dev_priv
->rps
.max_freq
|
5967 dev_priv
->rps
.efficient_freq
|
5968 dev_priv
->rps
.rp1_freq
|
5969 dev_priv
->rps
.min_freq
) & 1,
5970 "Odd GPU freq values\n");
5973 static void valleyview_cleanup_gt_powersave(struct drm_i915_private
*dev_priv
)
5975 valleyview_cleanup_pctx(dev_priv
);
5978 static void cherryview_enable_rps(struct drm_i915_private
*dev_priv
)
5980 struct intel_engine_cs
*engine
;
5981 enum intel_engine_id id
;
5982 u32 gtfifodbg
, val
, rc6_mode
= 0, pcbr
;
5984 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5986 gtfifodbg
= I915_READ(GTFIFODBG
) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV
|
5987 GT_FIFO_FREE_ENTRIES_CHV
);
5989 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5991 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5994 cherryview_check_pctx(dev_priv
);
5996 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5997 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5998 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6000 /* Disable RC states. */
6001 I915_WRITE(GEN6_RC_CONTROL
, 0);
6003 /* 2a: Program RC6 thresholds.*/
6004 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
6005 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
6006 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
6008 for_each_engine(engine
, dev_priv
, id
)
6009 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6010 I915_WRITE(GEN6_RC_SLEEP
, 0);
6012 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
6013 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x186);
6015 /* allows RC6 residency counter to work */
6016 I915_WRITE(VLV_COUNTER_CONTROL
,
6017 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
6018 VLV_MEDIA_RC6_COUNT_EN
|
6019 VLV_RENDER_RC6_COUNT_EN
));
6021 /* For now we assume BIOS is allocating and populating the PCBR */
6022 pcbr
= I915_READ(VLV_PCBR
);
6025 if ((intel_enable_rc6() & INTEL_RC6_ENABLE
) &&
6026 (pcbr
>> VLV_PCBR_ADDR_SHIFT
))
6027 rc6_mode
= GEN7_RC_CTL_TO_MODE
;
6029 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
6031 /* 4 Program defaults and thresholds for RPS*/
6032 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
6033 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
6034 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
6035 I915_WRITE(GEN6_RP_UP_EI
, 66000);
6036 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
6038 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
6041 I915_WRITE(GEN6_RP_CONTROL
,
6042 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
6043 GEN6_RP_MEDIA_IS_GFX
|
6045 GEN6_RP_UP_BUSY_AVG
|
6046 GEN6_RP_DOWN_IDLE_AVG
);
6048 /* Setting Fixed Bias */
6049 val
= VLV_OVERRIDE_EN
|
6051 CHV_BIAS_CPU_50_SOC_50
;
6052 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
6054 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
6056 /* RPS code assumes GPLL is used */
6057 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
6059 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
6060 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
6062 reset_rps(dev_priv
, valleyview_set_rps
);
6064 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6067 static void valleyview_enable_rps(struct drm_i915_private
*dev_priv
)
6069 struct intel_engine_cs
*engine
;
6070 enum intel_engine_id id
;
6071 u32 gtfifodbg
, val
, rc6_mode
= 0;
6073 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6075 valleyview_check_pctx(dev_priv
);
6077 gtfifodbg
= I915_READ(GTFIFODBG
);
6079 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6081 I915_WRITE(GTFIFODBG
, gtfifodbg
);
6084 /* If VLV, Forcewake all wells, else re-direct to regular path */
6085 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6087 /* Disable RC states. */
6088 I915_WRITE(GEN6_RC_CONTROL
, 0);
6090 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
6091 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
6092 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
6093 I915_WRITE(GEN6_RP_UP_EI
, 66000);
6094 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
6096 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
6098 I915_WRITE(GEN6_RP_CONTROL
,
6099 GEN6_RP_MEDIA_TURBO
|
6100 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
6101 GEN6_RP_MEDIA_IS_GFX
|
6103 GEN6_RP_UP_BUSY_AVG
|
6104 GEN6_RP_DOWN_IDLE_CONT
);
6106 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
6107 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
6108 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
6110 for_each_engine(engine
, dev_priv
, id
)
6111 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6113 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
6115 /* allows RC6 residency counter to work */
6116 I915_WRITE(VLV_COUNTER_CONTROL
,
6117 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN
|
6118 VLV_RENDER_RC0_COUNT_EN
|
6119 VLV_MEDIA_RC6_COUNT_EN
|
6120 VLV_RENDER_RC6_COUNT_EN
));
6122 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
6123 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
6125 intel_print_rc6_info(dev_priv
, rc6_mode
);
6127 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
6129 /* Setting Fixed Bias */
6130 val
= VLV_OVERRIDE_EN
|
6132 VLV_BIAS_CPU_125_SOC_875
;
6133 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
6135 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
6137 /* RPS code assumes GPLL is used */
6138 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
6140 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
6141 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
6143 reset_rps(dev_priv
, valleyview_set_rps
);
6145 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6148 static unsigned long intel_pxfreq(u32 vidfreq
)
6151 int div
= (vidfreq
& 0x3f0000) >> 16;
6152 int post
= (vidfreq
& 0x3000) >> 12;
6153 int pre
= (vidfreq
& 0x7);
6158 freq
= ((div
* 133333) / ((1<<post
) * pre
));
6163 static const struct cparams
{
6169 { 1, 1333, 301, 28664 },
6170 { 1, 1066, 294, 24460 },
6171 { 1, 800, 294, 25192 },
6172 { 0, 1333, 276, 27605 },
6173 { 0, 1066, 276, 27605 },
6174 { 0, 800, 231, 23784 },
6177 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
6179 u64 total_count
, diff
, ret
;
6180 u32 count1
, count2
, count3
, m
= 0, c
= 0;
6181 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
6184 assert_spin_locked(&mchdev_lock
);
6186 diff1
= now
- dev_priv
->ips
.last_time1
;
6188 /* Prevent division-by-zero if we are asking too fast.
6189 * Also, we don't get interesting results if we are polling
6190 * faster than once in 10ms, so just return the saved value
6194 return dev_priv
->ips
.chipset_power
;
6196 count1
= I915_READ(DMIEC
);
6197 count2
= I915_READ(DDREC
);
6198 count3
= I915_READ(CSIEC
);
6200 total_count
= count1
+ count2
+ count3
;
6202 /* FIXME: handle per-counter overflow */
6203 if (total_count
< dev_priv
->ips
.last_count1
) {
6204 diff
= ~0UL - dev_priv
->ips
.last_count1
;
6205 diff
+= total_count
;
6207 diff
= total_count
- dev_priv
->ips
.last_count1
;
6210 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
6211 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
6212 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
6219 diff
= div_u64(diff
, diff1
);
6220 ret
= ((m
* diff
) + c
);
6221 ret
= div_u64(ret
, 10);
6223 dev_priv
->ips
.last_count1
= total_count
;
6224 dev_priv
->ips
.last_time1
= now
;
6226 dev_priv
->ips
.chipset_power
= ret
;
6231 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
6235 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6238 spin_lock_irq(&mchdev_lock
);
6240 val
= __i915_chipset_val(dev_priv
);
6242 spin_unlock_irq(&mchdev_lock
);
6247 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
6249 unsigned long m
, x
, b
;
6252 tsfs
= I915_READ(TSFS
);
6254 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
6255 x
= I915_READ8(TR1
);
6257 b
= tsfs
& TSFS_INTR_MASK
;
6259 return ((m
* x
) / 127) - b
;
6262 static int _pxvid_to_vd(u8 pxvid
)
6267 if (pxvid
>= 8 && pxvid
< 31)
6270 return (pxvid
+ 2) * 125;
6273 static u32
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
6275 const int vd
= _pxvid_to_vd(pxvid
);
6276 const int vm
= vd
- 1125;
6278 if (INTEL_INFO(dev_priv
)->is_mobile
)
6279 return vm
> 0 ? vm
: 0;
6284 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
6286 u64 now
, diff
, diffms
;
6289 assert_spin_locked(&mchdev_lock
);
6291 now
= ktime_get_raw_ns();
6292 diffms
= now
- dev_priv
->ips
.last_time2
;
6293 do_div(diffms
, NSEC_PER_MSEC
);
6295 /* Don't divide by 0 */
6299 count
= I915_READ(GFXEC
);
6301 if (count
< dev_priv
->ips
.last_count2
) {
6302 diff
= ~0UL - dev_priv
->ips
.last_count2
;
6305 diff
= count
- dev_priv
->ips
.last_count2
;
6308 dev_priv
->ips
.last_count2
= count
;
6309 dev_priv
->ips
.last_time2
= now
;
6311 /* More magic constants... */
6313 diff
= div_u64(diff
, diffms
* 10);
6314 dev_priv
->ips
.gfx_power
= diff
;
6317 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
6319 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6322 spin_lock_irq(&mchdev_lock
);
6324 __i915_update_gfx_val(dev_priv
);
6326 spin_unlock_irq(&mchdev_lock
);
6329 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
6331 unsigned long t
, corr
, state1
, corr2
, state2
;
6334 assert_spin_locked(&mchdev_lock
);
6336 pxvid
= I915_READ(PXVFREQ(dev_priv
->rps
.cur_freq
));
6337 pxvid
= (pxvid
>> 24) & 0x7f;
6338 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
6342 t
= i915_mch_val(dev_priv
);
6344 /* Revel in the empirically derived constants */
6346 /* Correction factor in 1/100000 units */
6348 corr
= ((t
* 2349) + 135940);
6350 corr
= ((t
* 964) + 29317);
6352 corr
= ((t
* 301) + 1004);
6354 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
6356 corr2
= (corr
* dev_priv
->ips
.corr
);
6358 state2
= (corr2
* state1
) / 10000;
6359 state2
/= 100; /* convert to mW */
6361 __i915_update_gfx_val(dev_priv
);
6363 return dev_priv
->ips
.gfx_power
+ state2
;
6366 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
6370 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6373 spin_lock_irq(&mchdev_lock
);
6375 val
= __i915_gfx_val(dev_priv
);
6377 spin_unlock_irq(&mchdev_lock
);
6383 * i915_read_mch_val - return value for IPS use
6385 * Calculate and return a value for the IPS driver to use when deciding whether
6386 * we have thermal and power headroom to increase CPU or GPU power budget.
6388 unsigned long i915_read_mch_val(void)
6390 struct drm_i915_private
*dev_priv
;
6391 unsigned long chipset_val
, graphics_val
, ret
= 0;
6393 spin_lock_irq(&mchdev_lock
);
6396 dev_priv
= i915_mch_dev
;
6398 chipset_val
= __i915_chipset_val(dev_priv
);
6399 graphics_val
= __i915_gfx_val(dev_priv
);
6401 ret
= chipset_val
+ graphics_val
;
6404 spin_unlock_irq(&mchdev_lock
);
6408 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
6411 * i915_gpu_raise - raise GPU frequency limit
6413 * Raise the limit; IPS indicates we have thermal headroom.
6415 bool i915_gpu_raise(void)
6417 struct drm_i915_private
*dev_priv
;
6420 spin_lock_irq(&mchdev_lock
);
6421 if (!i915_mch_dev
) {
6425 dev_priv
= i915_mch_dev
;
6427 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
6428 dev_priv
->ips
.max_delay
--;
6431 spin_unlock_irq(&mchdev_lock
);
6435 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
6438 * i915_gpu_lower - lower GPU frequency limit
6440 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6441 * frequency maximum.
6443 bool i915_gpu_lower(void)
6445 struct drm_i915_private
*dev_priv
;
6448 spin_lock_irq(&mchdev_lock
);
6449 if (!i915_mch_dev
) {
6453 dev_priv
= i915_mch_dev
;
6455 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
6456 dev_priv
->ips
.max_delay
++;
6459 spin_unlock_irq(&mchdev_lock
);
6463 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
6466 * i915_gpu_busy - indicate GPU business to IPS
6468 * Tell the IPS driver whether or not the GPU is busy.
6470 bool i915_gpu_busy(void)
6474 spin_lock_irq(&mchdev_lock
);
6476 ret
= i915_mch_dev
->gt
.awake
;
6477 spin_unlock_irq(&mchdev_lock
);
6481 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
6484 * i915_gpu_turbo_disable - disable graphics turbo
6486 * Disable graphics turbo by resetting the max frequency and setting the
6487 * current frequency to the default.
6489 bool i915_gpu_turbo_disable(void)
6491 struct drm_i915_private
*dev_priv
;
6494 spin_lock_irq(&mchdev_lock
);
6495 if (!i915_mch_dev
) {
6499 dev_priv
= i915_mch_dev
;
6501 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
6503 if (!ironlake_set_drps(dev_priv
, dev_priv
->ips
.fstart
))
6507 spin_unlock_irq(&mchdev_lock
);
6511 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
6514 * Tells the intel_ips driver that the i915 driver is now loaded, if
6515 * IPS got loaded first.
6517 * This awkward dance is so that neither module has to depend on the
6518 * other in order for IPS to do the appropriate communication of
6519 * GPU turbo limits to i915.
6522 ips_ping_for_i915_load(void)
6526 link
= symbol_get(ips_link_to_i915_driver
);
6529 symbol_put(ips_link_to_i915_driver
);
6533 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
6535 /* We only register the i915 ips part with intel-ips once everything is
6536 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6537 spin_lock_irq(&mchdev_lock
);
6538 i915_mch_dev
= dev_priv
;
6539 spin_unlock_irq(&mchdev_lock
);
6541 ips_ping_for_i915_load();
6544 void intel_gpu_ips_teardown(void)
6546 spin_lock_irq(&mchdev_lock
);
6547 i915_mch_dev
= NULL
;
6548 spin_unlock_irq(&mchdev_lock
);
6551 static void intel_init_emon(struct drm_i915_private
*dev_priv
)
6557 /* Disable to program */
6561 /* Program energy weights for various events */
6562 I915_WRITE(SDEW
, 0x15040d00);
6563 I915_WRITE(CSIEW0
, 0x007f0000);
6564 I915_WRITE(CSIEW1
, 0x1e220004);
6565 I915_WRITE(CSIEW2
, 0x04000004);
6567 for (i
= 0; i
< 5; i
++)
6568 I915_WRITE(PEW(i
), 0);
6569 for (i
= 0; i
< 3; i
++)
6570 I915_WRITE(DEW(i
), 0);
6572 /* Program P-state weights to account for frequency power adjustment */
6573 for (i
= 0; i
< 16; i
++) {
6574 u32 pxvidfreq
= I915_READ(PXVFREQ(i
));
6575 unsigned long freq
= intel_pxfreq(pxvidfreq
);
6576 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
6581 val
*= (freq
/ 1000);
6583 val
/= (127*127*900);
6585 DRM_ERROR("bad pxval: %ld\n", val
);
6588 /* Render standby states get 0 weight */
6592 for (i
= 0; i
< 4; i
++) {
6593 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
6594 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
6595 I915_WRITE(PXW(i
), val
);
6598 /* Adjust magic regs to magic values (more experimental results) */
6599 I915_WRITE(OGW0
, 0);
6600 I915_WRITE(OGW1
, 0);
6601 I915_WRITE(EG0
, 0x00007f00);
6602 I915_WRITE(EG1
, 0x0000000e);
6603 I915_WRITE(EG2
, 0x000e0000);
6604 I915_WRITE(EG3
, 0x68000300);
6605 I915_WRITE(EG4
, 0x42000000);
6606 I915_WRITE(EG5
, 0x00140031);
6610 for (i
= 0; i
< 8; i
++)
6611 I915_WRITE(PXWL(i
), 0);
6613 /* Enable PMON + select events */
6614 I915_WRITE(ECR
, 0x80000019);
6616 lcfuse
= I915_READ(LCFUSE02
);
6618 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
6621 void intel_init_gt_powersave(struct drm_i915_private
*dev_priv
)
6624 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6627 if (!i915
.enable_rc6
) {
6628 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6629 intel_runtime_pm_get(dev_priv
);
6632 mutex_lock(&dev_priv
->drm
.struct_mutex
);
6633 mutex_lock(&dev_priv
->rps
.hw_lock
);
6635 /* Initialize RPS limits (for userspace) */
6636 if (IS_CHERRYVIEW(dev_priv
))
6637 cherryview_init_gt_powersave(dev_priv
);
6638 else if (IS_VALLEYVIEW(dev_priv
))
6639 valleyview_init_gt_powersave(dev_priv
);
6640 else if (INTEL_GEN(dev_priv
) >= 6)
6641 gen6_init_rps_frequencies(dev_priv
);
6643 /* Derive initial user preferences/limits from the hardware limits */
6644 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
6645 dev_priv
->rps
.cur_freq
= dev_priv
->rps
.idle_freq
;
6647 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
6648 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
6650 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
6651 dev_priv
->rps
.min_freq_softlimit
=
6653 dev_priv
->rps
.efficient_freq
,
6654 intel_freq_opcode(dev_priv
, 450));
6656 /* After setting max-softlimit, find the overclock max freq */
6657 if (IS_GEN6(dev_priv
) ||
6658 IS_IVYBRIDGE(dev_priv
) || IS_HASWELL(dev_priv
)) {
6661 sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, ¶ms
);
6662 if (params
& BIT(31)) { /* OC supported */
6663 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6664 (dev_priv
->rps
.max_freq
& 0xff) * 50,
6665 (params
& 0xff) * 50);
6666 dev_priv
->rps
.max_freq
= params
& 0xff;
6670 /* Finally allow us to boost to max by default */
6671 dev_priv
->rps
.boost_freq
= dev_priv
->rps
.max_freq
;
6673 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6674 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
6676 intel_autoenable_gt_powersave(dev_priv
);
6679 void intel_cleanup_gt_powersave(struct drm_i915_private
*dev_priv
)
6681 if (IS_VALLEYVIEW(dev_priv
))
6682 valleyview_cleanup_gt_powersave(dev_priv
);
6684 if (!i915
.enable_rc6
)
6685 intel_runtime_pm_put(dev_priv
);
6689 * intel_suspend_gt_powersave - suspend PM work and helper threads
6690 * @dev_priv: i915 device
6692 * We don't want to disable RC6 or other features here, we just want
6693 * to make sure any work we've queued has finished and won't bother
6694 * us while we're suspended.
6696 void intel_suspend_gt_powersave(struct drm_i915_private
*dev_priv
)
6698 if (INTEL_GEN(dev_priv
) < 6)
6701 if (cancel_delayed_work_sync(&dev_priv
->rps
.autoenable_work
))
6702 intel_runtime_pm_put(dev_priv
);
6704 /* gen6_rps_idle() will be called later to disable interrupts */
6707 void intel_sanitize_gt_powersave(struct drm_i915_private
*dev_priv
)
6709 dev_priv
->rps
.enabled
= true; /* force disabling */
6710 intel_disable_gt_powersave(dev_priv
);
6712 gen6_reset_rps_interrupts(dev_priv
);
6715 void intel_disable_gt_powersave(struct drm_i915_private
*dev_priv
)
6717 if (!READ_ONCE(dev_priv
->rps
.enabled
))
6720 mutex_lock(&dev_priv
->rps
.hw_lock
);
6722 if (INTEL_GEN(dev_priv
) >= 9) {
6723 gen9_disable_rc6(dev_priv
);
6724 gen9_disable_rps(dev_priv
);
6725 } else if (IS_CHERRYVIEW(dev_priv
)) {
6726 cherryview_disable_rps(dev_priv
);
6727 } else if (IS_VALLEYVIEW(dev_priv
)) {
6728 valleyview_disable_rps(dev_priv
);
6729 } else if (INTEL_GEN(dev_priv
) >= 6) {
6730 gen6_disable_rps(dev_priv
);
6731 } else if (IS_IRONLAKE_M(dev_priv
)) {
6732 ironlake_disable_drps(dev_priv
);
6735 dev_priv
->rps
.enabled
= false;
6736 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6739 void intel_enable_gt_powersave(struct drm_i915_private
*dev_priv
)
6741 /* We shouldn't be disabling as we submit, so this should be less
6742 * racy than it appears!
6744 if (READ_ONCE(dev_priv
->rps
.enabled
))
6747 /* Powersaving is controlled by the host when inside a VM */
6748 if (intel_vgpu_active(dev_priv
))
6751 mutex_lock(&dev_priv
->rps
.hw_lock
);
6753 if (IS_CHERRYVIEW(dev_priv
)) {
6754 cherryview_enable_rps(dev_priv
);
6755 } else if (IS_VALLEYVIEW(dev_priv
)) {
6756 valleyview_enable_rps(dev_priv
);
6757 } else if (INTEL_GEN(dev_priv
) >= 9) {
6758 gen9_enable_rc6(dev_priv
);
6759 gen9_enable_rps(dev_priv
);
6760 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
))
6761 gen6_update_ring_freq(dev_priv
);
6762 } else if (IS_BROADWELL(dev_priv
)) {
6763 gen8_enable_rps(dev_priv
);
6764 gen6_update_ring_freq(dev_priv
);
6765 } else if (INTEL_GEN(dev_priv
) >= 6) {
6766 gen6_enable_rps(dev_priv
);
6767 gen6_update_ring_freq(dev_priv
);
6768 } else if (IS_IRONLAKE_M(dev_priv
)) {
6769 ironlake_enable_drps(dev_priv
);
6770 intel_init_emon(dev_priv
);
6773 WARN_ON(dev_priv
->rps
.max_freq
< dev_priv
->rps
.min_freq
);
6774 WARN_ON(dev_priv
->rps
.idle_freq
> dev_priv
->rps
.max_freq
);
6776 WARN_ON(dev_priv
->rps
.efficient_freq
< dev_priv
->rps
.min_freq
);
6777 WARN_ON(dev_priv
->rps
.efficient_freq
> dev_priv
->rps
.max_freq
);
6779 dev_priv
->rps
.enabled
= true;
6780 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6783 static void __intel_autoenable_gt_powersave(struct work_struct
*work
)
6785 struct drm_i915_private
*dev_priv
=
6786 container_of(work
, typeof(*dev_priv
), rps
.autoenable_work
.work
);
6787 struct intel_engine_cs
*rcs
;
6788 struct drm_i915_gem_request
*req
;
6790 if (READ_ONCE(dev_priv
->rps
.enabled
))
6793 rcs
= dev_priv
->engine
[RCS
];
6794 if (rcs
->last_context
)
6797 if (!rcs
->init_context
)
6800 mutex_lock(&dev_priv
->drm
.struct_mutex
);
6802 req
= i915_gem_request_alloc(rcs
, dev_priv
->kernel_context
);
6806 if (!i915
.enable_execlists
&& i915_switch_context(req
) == 0)
6807 rcs
->init_context(req
);
6809 /* Mark the device busy, calling intel_enable_gt_powersave() */
6810 i915_add_request_no_flush(req
);
6813 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
6815 intel_runtime_pm_put(dev_priv
);
6818 void intel_autoenable_gt_powersave(struct drm_i915_private
*dev_priv
)
6820 if (READ_ONCE(dev_priv
->rps
.enabled
))
6823 if (IS_IRONLAKE_M(dev_priv
)) {
6824 ironlake_enable_drps(dev_priv
);
6825 intel_init_emon(dev_priv
);
6826 } else if (INTEL_INFO(dev_priv
)->gen
>= 6) {
6828 * PCU communication is slow and this doesn't need to be
6829 * done at any specific time, so do this out of our fast path
6830 * to make resume and init faster.
6832 * We depend on the HW RC6 power context save/restore
6833 * mechanism when entering D3 through runtime PM suspend. So
6834 * disable RPM until RPS/RC6 is properly setup. We can only
6835 * get here via the driver load/system resume/runtime resume
6836 * paths, so the _noresume version is enough (and in case of
6837 * runtime resume it's necessary).
6839 if (queue_delayed_work(dev_priv
->wq
,
6840 &dev_priv
->rps
.autoenable_work
,
6841 round_jiffies_up_relative(HZ
)))
6842 intel_runtime_pm_get_noresume(dev_priv
);
6846 static void ibx_init_clock_gating(struct drm_i915_private
*dev_priv
)
6849 * On Ibex Peak and Cougar Point, we need to disable clock
6850 * gating for the panel power sequencer or it will fail to
6851 * start up when no ports are active.
6853 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
6856 static void g4x_disable_trickle_feed(struct drm_i915_private
*dev_priv
)
6860 for_each_pipe(dev_priv
, pipe
) {
6861 I915_WRITE(DSPCNTR(pipe
),
6862 I915_READ(DSPCNTR(pipe
)) |
6863 DISPPLANE_TRICKLE_FEED_DISABLE
);
6865 I915_WRITE(DSPSURF(pipe
), I915_READ(DSPSURF(pipe
)));
6866 POSTING_READ(DSPSURF(pipe
));
6870 static void ilk_init_lp_watermarks(struct drm_i915_private
*dev_priv
)
6872 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
6873 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
6874 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
6877 * Don't touch WM1S_LP_EN here.
6878 * Doing so could cause underruns.
6882 static void ironlake_init_clock_gating(struct drm_i915_private
*dev_priv
)
6884 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6888 * WaFbcDisableDpfcClockGating:ilk
6890 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
6891 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
6892 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
6894 I915_WRITE(PCH_3DCGDIS0
,
6895 MARIUNIT_CLOCK_GATE_DISABLE
|
6896 SVSMUNIT_CLOCK_GATE_DISABLE
);
6897 I915_WRITE(PCH_3DCGDIS1
,
6898 VFMUNIT_CLOCK_GATE_DISABLE
);
6901 * According to the spec the following bits should be set in
6902 * order to enable memory self-refresh
6903 * The bit 22/21 of 0x42004
6904 * The bit 5 of 0x42020
6905 * The bit 15 of 0x45000
6907 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6908 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
6909 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
6910 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
6911 I915_WRITE(DISP_ARB_CTL
,
6912 (I915_READ(DISP_ARB_CTL
) |
6915 ilk_init_lp_watermarks(dev_priv
);
6918 * Based on the document from hardware guys the following bits
6919 * should be set unconditionally in order to enable FBC.
6920 * The bit 22 of 0x42000
6921 * The bit 22 of 0x42004
6922 * The bit 7,8,9 of 0x42020.
6924 if (IS_IRONLAKE_M(dev_priv
)) {
6925 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6926 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6927 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6929 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6930 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6934 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6936 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6937 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6938 ILK_ELPIN_409_SELECT
);
6939 I915_WRITE(_3D_CHICKEN2
,
6940 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
6941 _3D_CHICKEN2_WM_READ_PIPELINED
);
6943 /* WaDisableRenderCachePipelinedFlush:ilk */
6944 I915_WRITE(CACHE_MODE_0
,
6945 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
6947 /* WaDisable_RenderCache_OperationalFlush:ilk */
6948 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6950 g4x_disable_trickle_feed(dev_priv
);
6952 ibx_init_clock_gating(dev_priv
);
6955 static void cpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
6961 * On Ibex Peak and Cougar Point, we need to disable clock
6962 * gating for the panel power sequencer or it will fail to
6963 * start up when no ports are active.
6965 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
6966 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
6967 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
6968 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
6969 DPLS_EDP_PPS_FIX_DIS
);
6970 /* The below fixes the weird display corruption, a few pixels shifted
6971 * downward, on (only) LVDS of some HP laptops with IVY.
6973 for_each_pipe(dev_priv
, pipe
) {
6974 val
= I915_READ(TRANS_CHICKEN2(pipe
));
6975 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
6976 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6977 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
6978 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6979 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
6980 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
6981 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
6982 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
6984 /* WADP0ClockGatingDisable */
6985 for_each_pipe(dev_priv
, pipe
) {
6986 I915_WRITE(TRANS_CHICKEN1(pipe
),
6987 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6991 static void gen6_check_mch_setup(struct drm_i915_private
*dev_priv
)
6995 tmp
= I915_READ(MCH_SSKPD
);
6996 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
6997 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7001 static void gen6_init_clock_gating(struct drm_i915_private
*dev_priv
)
7003 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
7005 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
7007 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7008 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7009 ILK_ELPIN_409_SELECT
);
7011 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
7012 I915_WRITE(_3D_CHICKEN
,
7013 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
7015 /* WaDisable_RenderCache_OperationalFlush:snb */
7016 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7019 * BSpec recoomends 8x4 when MSAA is used,
7020 * however in practice 16x4 seems fastest.
7022 * Note that PS/WM thread counts depend on the WIZ hashing
7023 * disable bit, which we don't touch here, but it's good
7024 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7026 I915_WRITE(GEN6_GT_MODE
,
7027 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7029 ilk_init_lp_watermarks(dev_priv
);
7031 I915_WRITE(CACHE_MODE_0
,
7032 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
7034 I915_WRITE(GEN6_UCGCTL1
,
7035 I915_READ(GEN6_UCGCTL1
) |
7036 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
7037 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7039 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7040 * gating disable must be set. Failure to set it results in
7041 * flickering pixels due to Z write ordering failures after
7042 * some amount of runtime in the Mesa "fire" demo, and Unigine
7043 * Sanctuary and Tropics, and apparently anything else with
7044 * alpha test or pixel discard.
7046 * According to the spec, bit 11 (RCCUNIT) must also be set,
7047 * but we didn't debug actual testcases to find it out.
7049 * WaDisableRCCUnitClockGating:snb
7050 * WaDisableRCPBUnitClockGating:snb
7052 I915_WRITE(GEN6_UCGCTL2
,
7053 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
7054 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
7056 /* WaStripsFansDisableFastClipPerformanceFix:snb */
7057 I915_WRITE(_3D_CHICKEN3
,
7058 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
7062 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7063 * 3DSTATE_SF number of SF output attributes is more than 16."
7065 I915_WRITE(_3D_CHICKEN3
,
7066 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
7069 * According to the spec the following bits should be
7070 * set in order to enable memory self-refresh and fbc:
7071 * The bit21 and bit22 of 0x42000
7072 * The bit21 and bit22 of 0x42004
7073 * The bit5 and bit7 of 0x42020
7074 * The bit14 of 0x70180
7075 * The bit14 of 0x71180
7077 * WaFbcAsynchFlipDisableFbcQueue:snb
7079 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
7080 I915_READ(ILK_DISPLAY_CHICKEN1
) |
7081 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
7082 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7083 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7084 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
7085 I915_WRITE(ILK_DSPCLK_GATE_D
,
7086 I915_READ(ILK_DSPCLK_GATE_D
) |
7087 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
7088 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
7090 g4x_disable_trickle_feed(dev_priv
);
7092 cpt_init_clock_gating(dev_priv
);
7094 gen6_check_mch_setup(dev_priv
);
7097 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
7099 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
7102 * WaVSThreadDispatchOverride:ivb,vlv
7104 * This actually overrides the dispatch
7105 * mode for all thread types.
7107 reg
&= ~GEN7_FF_SCHED_MASK
;
7108 reg
|= GEN7_FF_TS_SCHED_HW
;
7109 reg
|= GEN7_FF_VS_SCHED_HW
;
7110 reg
|= GEN7_FF_DS_SCHED_HW
;
7112 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
7115 static void lpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
7118 * TODO: this bit should only be enabled when really needed, then
7119 * disabled when not needed anymore in order to save power.
7121 if (HAS_PCH_LPT_LP(dev_priv
))
7122 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
7123 I915_READ(SOUTH_DSPCLK_GATE_D
) |
7124 PCH_LP_PARTITION_LEVEL_DISABLE
);
7126 /* WADPOClockGatingDisable:hsw */
7127 I915_WRITE(TRANS_CHICKEN1(PIPE_A
),
7128 I915_READ(TRANS_CHICKEN1(PIPE_A
)) |
7129 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
7132 static void lpt_suspend_hw(struct drm_i915_private
*dev_priv
)
7134 if (HAS_PCH_LPT_LP(dev_priv
)) {
7135 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
7137 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
7138 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
7142 static void gen8_set_l3sqc_credits(struct drm_i915_private
*dev_priv
,
7143 int general_prio_credits
,
7144 int high_prio_credits
)
7148 /* WaTempDisableDOPClkGating:bdw */
7149 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
7150 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
7152 I915_WRITE(GEN8_L3SQCREG1
,
7153 L3_GENERAL_PRIO_CREDITS(general_prio_credits
) |
7154 L3_HIGH_PRIO_CREDITS(high_prio_credits
));
7157 * Wait at least 100 clocks before re-enabling clock gating.
7158 * See the definition of L3SQCREG1 in BSpec.
7160 POSTING_READ(GEN8_L3SQCREG1
);
7162 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
7165 static void kabylake_init_clock_gating(struct drm_i915_private
*dev_priv
)
7167 gen9_init_clock_gating(dev_priv
);
7169 /* WaDisableSDEUnitClockGating:kbl */
7170 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
7171 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7172 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7174 /* WaDisableGamClockGating:kbl */
7175 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
7176 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7177 GEN6_GAMUNIT_CLOCK_GATE_DISABLE
);
7179 /* WaFbcNukeOnHostModify:kbl */
7180 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
7181 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
7184 static void skylake_init_clock_gating(struct drm_i915_private
*dev_priv
)
7186 gen9_init_clock_gating(dev_priv
);
7188 /* WAC6entrylatency:skl */
7189 I915_WRITE(FBC_LLC_READ_CTRL
, I915_READ(FBC_LLC_READ_CTRL
) |
7190 FBC_LLC_FULLY_OPEN
);
7192 /* WaFbcNukeOnHostModify:skl */
7193 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
7194 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
7197 static void broadwell_init_clock_gating(struct drm_i915_private
*dev_priv
)
7201 ilk_init_lp_watermarks(dev_priv
);
7203 /* WaSwitchSolVfFArbitrationPriority:bdw */
7204 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
7206 /* WaPsrDPAMaskVBlankInSRD:bdw */
7207 I915_WRITE(CHICKEN_PAR1_1
,
7208 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
7210 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7211 for_each_pipe(dev_priv
, pipe
) {
7212 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
7213 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
7214 BDW_DPRS_MASK_VBLANK_SRD
);
7217 /* WaVSRefCountFullforceMissDisable:bdw */
7218 /* WaDSRefCountFullforceMissDisable:bdw */
7219 I915_WRITE(GEN7_FF_THREAD_MODE
,
7220 I915_READ(GEN7_FF_THREAD_MODE
) &
7221 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7223 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7224 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7226 /* WaDisableSDEUnitClockGating:bdw */
7227 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7228 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7230 /* WaProgramL3SqcReg1Default:bdw */
7231 gen8_set_l3sqc_credits(dev_priv
, 30, 2);
7234 * WaGttCachingOffByDefault:bdw
7235 * GTT cache may not work with big pages, so if those
7236 * are ever enabled GTT cache may need to be disabled.
7238 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
7240 /* WaKVMNotificationOnConfigChange:bdw */
7241 I915_WRITE(CHICKEN_PAR2_1
, I915_READ(CHICKEN_PAR2_1
)
7242 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT
);
7244 lpt_init_clock_gating(dev_priv
);
7247 static void haswell_init_clock_gating(struct drm_i915_private
*dev_priv
)
7249 ilk_init_lp_watermarks(dev_priv
);
7251 /* L3 caching of data atomics doesn't work -- disable it. */
7252 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
7253 I915_WRITE(HSW_ROW_CHICKEN3
,
7254 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
7256 /* This is required by WaCatErrorRejectionIssue:hsw */
7257 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7258 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7259 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7261 /* WaVSRefCountFullforceMissDisable:hsw */
7262 I915_WRITE(GEN7_FF_THREAD_MODE
,
7263 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
7265 /* WaDisable_RenderCache_OperationalFlush:hsw */
7266 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7268 /* enable HiZ Raw Stall Optimization */
7269 I915_WRITE(CACHE_MODE_0_GEN7
,
7270 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
7272 /* WaDisable4x2SubspanOptimization:hsw */
7273 I915_WRITE(CACHE_MODE_1
,
7274 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7277 * BSpec recommends 8x4 when MSAA is used,
7278 * however in practice 16x4 seems fastest.
7280 * Note that PS/WM thread counts depend on the WIZ hashing
7281 * disable bit, which we don't touch here, but it's good
7282 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7284 I915_WRITE(GEN7_GT_MODE
,
7285 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7287 /* WaSampleCChickenBitEnable:hsw */
7288 I915_WRITE(HALF_SLICE_CHICKEN3
,
7289 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE
));
7291 /* WaSwitchSolVfFArbitrationPriority:hsw */
7292 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
7294 /* WaRsPkgCStateDisplayPMReq:hsw */
7295 I915_WRITE(CHICKEN_PAR1_1
,
7296 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
7298 lpt_init_clock_gating(dev_priv
);
7301 static void ivybridge_init_clock_gating(struct drm_i915_private
*dev_priv
)
7305 ilk_init_lp_watermarks(dev_priv
);
7307 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
7309 /* WaDisableEarlyCull:ivb */
7310 I915_WRITE(_3D_CHICKEN3
,
7311 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
7313 /* WaDisableBackToBackFlipFix:ivb */
7314 I915_WRITE(IVB_CHICKEN3
,
7315 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
7316 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
7318 /* WaDisablePSDDualDispatchEnable:ivb */
7319 if (IS_IVB_GT1(dev_priv
))
7320 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
7321 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
7323 /* WaDisable_RenderCache_OperationalFlush:ivb */
7324 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7326 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7327 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
7328 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
7330 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7331 I915_WRITE(GEN7_L3CNTLREG1
,
7332 GEN7_WA_FOR_GEN7_L3_CONTROL
);
7333 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
7334 GEN7_WA_L3_CHICKEN_MODE
);
7335 if (IS_IVB_GT1(dev_priv
))
7336 I915_WRITE(GEN7_ROW_CHICKEN2
,
7337 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7339 /* must write both registers */
7340 I915_WRITE(GEN7_ROW_CHICKEN2
,
7341 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7342 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
7343 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7346 /* WaForceL3Serialization:ivb */
7347 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
7348 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7351 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7352 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7354 I915_WRITE(GEN6_UCGCTL2
,
7355 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7357 /* This is required by WaCatErrorRejectionIssue:ivb */
7358 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7359 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7360 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7362 g4x_disable_trickle_feed(dev_priv
);
7364 gen7_setup_fixed_func_scheduler(dev_priv
);
7366 if (0) { /* causes HiZ corruption on ivb:gt1 */
7367 /* enable HiZ Raw Stall Optimization */
7368 I915_WRITE(CACHE_MODE_0_GEN7
,
7369 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
7372 /* WaDisable4x2SubspanOptimization:ivb */
7373 I915_WRITE(CACHE_MODE_1
,
7374 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7377 * BSpec recommends 8x4 when MSAA is used,
7378 * however in practice 16x4 seems fastest.
7380 * Note that PS/WM thread counts depend on the WIZ hashing
7381 * disable bit, which we don't touch here, but it's good
7382 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7384 I915_WRITE(GEN7_GT_MODE
,
7385 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7387 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
7388 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
7389 snpcr
|= GEN6_MBC_SNPCR_MED
;
7390 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
7392 if (!HAS_PCH_NOP(dev_priv
))
7393 cpt_init_clock_gating(dev_priv
);
7395 gen6_check_mch_setup(dev_priv
);
7398 static void valleyview_init_clock_gating(struct drm_i915_private
*dev_priv
)
7400 /* WaDisableEarlyCull:vlv */
7401 I915_WRITE(_3D_CHICKEN3
,
7402 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
7404 /* WaDisableBackToBackFlipFix:vlv */
7405 I915_WRITE(IVB_CHICKEN3
,
7406 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
7407 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
7409 /* WaPsdDispatchEnable:vlv */
7410 /* WaDisablePSDDualDispatchEnable:vlv */
7411 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
7412 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
7413 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
7415 /* WaDisable_RenderCache_OperationalFlush:vlv */
7416 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7418 /* WaForceL3Serialization:vlv */
7419 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
7420 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7422 /* WaDisableDopClockGating:vlv */
7423 I915_WRITE(GEN7_ROW_CHICKEN2
,
7424 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7426 /* This is required by WaCatErrorRejectionIssue:vlv */
7427 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7428 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7429 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7431 gen7_setup_fixed_func_scheduler(dev_priv
);
7434 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7435 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7437 I915_WRITE(GEN6_UCGCTL2
,
7438 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7440 /* WaDisableL3Bank2xClockGate:vlv
7441 * Disabling L3 clock gating- MMIO 940c[25] = 1
7442 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7443 I915_WRITE(GEN7_UCGCTL4
,
7444 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
7447 * BSpec says this must be set, even though
7448 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7450 I915_WRITE(CACHE_MODE_1
,
7451 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7454 * BSpec recommends 8x4 when MSAA is used,
7455 * however in practice 16x4 seems fastest.
7457 * Note that PS/WM thread counts depend on the WIZ hashing
7458 * disable bit, which we don't touch here, but it's good
7459 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7461 I915_WRITE(GEN7_GT_MODE
,
7462 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7465 * WaIncreaseL3CreditsForVLVB0:vlv
7466 * This is the hardware default actually.
7468 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
7471 * WaDisableVLVClockGating_VBIIssue:vlv
7472 * Disable clock gating on th GCFG unit to prevent a delay
7473 * in the reporting of vblank events.
7475 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
7478 static void cherryview_init_clock_gating(struct drm_i915_private
*dev_priv
)
7480 /* WaVSRefCountFullforceMissDisable:chv */
7481 /* WaDSRefCountFullforceMissDisable:chv */
7482 I915_WRITE(GEN7_FF_THREAD_MODE
,
7483 I915_READ(GEN7_FF_THREAD_MODE
) &
7484 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7486 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7487 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7488 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7490 /* WaDisableCSUnitClockGating:chv */
7491 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7492 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7494 /* WaDisableSDEUnitClockGating:chv */
7495 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7496 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7499 * WaProgramL3SqcReg1Default:chv
7500 * See gfxspecs/Related Documents/Performance Guide/
7501 * LSQC Setting Recommendations.
7503 gen8_set_l3sqc_credits(dev_priv
, 38, 2);
7506 * GTT cache may not work with big pages, so if those
7507 * are ever enabled GTT cache may need to be disabled.
7509 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
7512 static void g4x_init_clock_gating(struct drm_i915_private
*dev_priv
)
7514 uint32_t dspclk_gate
;
7516 I915_WRITE(RENCLK_GATE_D1
, 0);
7517 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
7518 GS_UNIT_CLOCK_GATE_DISABLE
|
7519 CL_UNIT_CLOCK_GATE_DISABLE
);
7520 I915_WRITE(RAMCLK_GATE_D
, 0);
7521 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
7522 OVRUNIT_CLOCK_GATE_DISABLE
|
7523 OVCUNIT_CLOCK_GATE_DISABLE
;
7524 if (IS_GM45(dev_priv
))
7525 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
7526 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
7528 /* WaDisableRenderCachePipelinedFlush */
7529 I915_WRITE(CACHE_MODE_0
,
7530 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
7532 /* WaDisable_RenderCache_OperationalFlush:g4x */
7533 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7535 g4x_disable_trickle_feed(dev_priv
);
7538 static void crestline_init_clock_gating(struct drm_i915_private
*dev_priv
)
7540 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
7541 I915_WRITE(RENCLK_GATE_D2
, 0);
7542 I915_WRITE(DSPCLK_GATE_D
, 0);
7543 I915_WRITE(RAMCLK_GATE_D
, 0);
7544 I915_WRITE16(DEUC
, 0);
7545 I915_WRITE(MI_ARB_STATE
,
7546 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7548 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7549 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7552 static void broadwater_init_clock_gating(struct drm_i915_private
*dev_priv
)
7554 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
7555 I965_RCC_CLOCK_GATE_DISABLE
|
7556 I965_RCPB_CLOCK_GATE_DISABLE
|
7557 I965_ISC_CLOCK_GATE_DISABLE
|
7558 I965_FBC_CLOCK_GATE_DISABLE
);
7559 I915_WRITE(RENCLK_GATE_D2
, 0);
7560 I915_WRITE(MI_ARB_STATE
,
7561 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7563 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7564 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7567 static void gen3_init_clock_gating(struct drm_i915_private
*dev_priv
)
7569 u32 dstate
= I915_READ(D_STATE
);
7571 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
7572 DSTATE_DOT_CLOCK_GATING
;
7573 I915_WRITE(D_STATE
, dstate
);
7575 if (IS_PINEVIEW(dev_priv
))
7576 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
7578 /* IIR "flip pending" means done if this bit is set */
7579 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
7581 /* interrupts should cause a wake up from C3 */
7582 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
7584 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7585 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
7587 I915_WRITE(MI_ARB_STATE
,
7588 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7591 static void i85x_init_clock_gating(struct drm_i915_private
*dev_priv
)
7593 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
7595 /* interrupts should cause a wake up from C3 */
7596 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
7597 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
7599 I915_WRITE(MEM_MODE
,
7600 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
7603 static void i830_init_clock_gating(struct drm_i915_private
*dev_priv
)
7605 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
7607 I915_WRITE(MEM_MODE
,
7608 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
7609 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
7612 void intel_init_clock_gating(struct drm_i915_private
*dev_priv
)
7614 dev_priv
->display
.init_clock_gating(dev_priv
);
7617 void intel_suspend_hw(struct drm_i915_private
*dev_priv
)
7619 if (HAS_PCH_LPT(dev_priv
))
7620 lpt_suspend_hw(dev_priv
);
7623 static void nop_init_clock_gating(struct drm_i915_private
*dev_priv
)
7625 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7629 * intel_init_clock_gating_hooks - setup the clock gating hooks
7630 * @dev_priv: device private
7632 * Setup the hooks that configure which clocks of a given platform can be
7633 * gated and also apply various GT and display specific workarounds for these
7634 * platforms. Note that some GT specific workarounds are applied separately
7635 * when GPU contexts or batchbuffers start their execution.
7637 void intel_init_clock_gating_hooks(struct drm_i915_private
*dev_priv
)
7639 if (IS_SKYLAKE(dev_priv
))
7640 dev_priv
->display
.init_clock_gating
= skylake_init_clock_gating
;
7641 else if (IS_KABYLAKE(dev_priv
))
7642 dev_priv
->display
.init_clock_gating
= kabylake_init_clock_gating
;
7643 else if (IS_BROXTON(dev_priv
))
7644 dev_priv
->display
.init_clock_gating
= bxt_init_clock_gating
;
7645 else if (IS_BROADWELL(dev_priv
))
7646 dev_priv
->display
.init_clock_gating
= broadwell_init_clock_gating
;
7647 else if (IS_CHERRYVIEW(dev_priv
))
7648 dev_priv
->display
.init_clock_gating
= cherryview_init_clock_gating
;
7649 else if (IS_HASWELL(dev_priv
))
7650 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
7651 else if (IS_IVYBRIDGE(dev_priv
))
7652 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
7653 else if (IS_VALLEYVIEW(dev_priv
))
7654 dev_priv
->display
.init_clock_gating
= valleyview_init_clock_gating
;
7655 else if (IS_GEN6(dev_priv
))
7656 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
7657 else if (IS_GEN5(dev_priv
))
7658 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
7659 else if (IS_G4X(dev_priv
))
7660 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
7661 else if (IS_CRESTLINE(dev_priv
))
7662 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
7663 else if (IS_BROADWATER(dev_priv
))
7664 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
7665 else if (IS_GEN3(dev_priv
))
7666 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7667 else if (IS_I85X(dev_priv
) || IS_I865G(dev_priv
))
7668 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
7669 else if (IS_GEN2(dev_priv
))
7670 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
7672 MISSING_CASE(INTEL_DEVID(dev_priv
));
7673 dev_priv
->display
.init_clock_gating
= nop_init_clock_gating
;
7677 /* Set up chip specific power management-related functions */
7678 void intel_init_pm(struct drm_i915_private
*dev_priv
)
7680 intel_fbc_init(dev_priv
);
7683 if (IS_PINEVIEW(dev_priv
))
7684 i915_pineview_get_mem_freq(dev_priv
);
7685 else if (IS_GEN5(dev_priv
))
7686 i915_ironlake_get_mem_freq(dev_priv
);
7688 /* For FIFO watermark updates */
7689 if (INTEL_GEN(dev_priv
) >= 9) {
7690 skl_setup_wm_latency(dev_priv
);
7691 dev_priv
->display
.initial_watermarks
= skl_initial_wm
;
7692 dev_priv
->display
.atomic_update_watermarks
= skl_atomic_update_crtc_wm
;
7693 dev_priv
->display
.compute_global_watermarks
= skl_compute_wm
;
7694 } else if (HAS_PCH_SPLIT(dev_priv
)) {
7695 ilk_setup_wm_latency(dev_priv
);
7697 if ((IS_GEN5(dev_priv
) && dev_priv
->wm
.pri_latency
[1] &&
7698 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
7699 (!IS_GEN5(dev_priv
) && dev_priv
->wm
.pri_latency
[0] &&
7700 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
7701 dev_priv
->display
.compute_pipe_wm
= ilk_compute_pipe_wm
;
7702 dev_priv
->display
.compute_intermediate_wm
=
7703 ilk_compute_intermediate_wm
;
7704 dev_priv
->display
.initial_watermarks
=
7705 ilk_initial_watermarks
;
7706 dev_priv
->display
.optimize_watermarks
=
7707 ilk_optimize_watermarks
;
7709 DRM_DEBUG_KMS("Failed to read display plane latency. "
7712 } else if (IS_CHERRYVIEW(dev_priv
)) {
7713 vlv_setup_wm_latency(dev_priv
);
7714 dev_priv
->display
.update_wm
= vlv_update_wm
;
7715 } else if (IS_VALLEYVIEW(dev_priv
)) {
7716 vlv_setup_wm_latency(dev_priv
);
7717 dev_priv
->display
.update_wm
= vlv_update_wm
;
7718 } else if (IS_PINEVIEW(dev_priv
)) {
7719 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv
),
7722 dev_priv
->mem_freq
)) {
7723 DRM_INFO("failed to find known CxSR latency "
7724 "(found ddr%s fsb freq %d, mem freq %d), "
7726 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
7727 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
7728 /* Disable CxSR and never update its watermark again */
7729 intel_set_memory_cxsr(dev_priv
, false);
7730 dev_priv
->display
.update_wm
= NULL
;
7732 dev_priv
->display
.update_wm
= pineview_update_wm
;
7733 } else if (IS_G4X(dev_priv
)) {
7734 dev_priv
->display
.update_wm
= g4x_update_wm
;
7735 } else if (IS_GEN4(dev_priv
)) {
7736 dev_priv
->display
.update_wm
= i965_update_wm
;
7737 } else if (IS_GEN3(dev_priv
)) {
7738 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7739 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
7740 } else if (IS_GEN2(dev_priv
)) {
7741 if (INTEL_INFO(dev_priv
)->num_pipes
== 1) {
7742 dev_priv
->display
.update_wm
= i845_update_wm
;
7743 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
7745 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7746 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
7749 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7753 static inline int gen6_check_mailbox_status(struct drm_i915_private
*dev_priv
)
7756 I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_ERROR_MASK
;
7759 case GEN6_PCODE_SUCCESS
:
7761 case GEN6_PCODE_UNIMPLEMENTED_CMD
:
7762 case GEN6_PCODE_ILLEGAL_CMD
:
7764 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
7765 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
7767 case GEN6_PCODE_TIMEOUT
:
7775 static inline int gen7_check_mailbox_status(struct drm_i915_private
*dev_priv
)
7778 I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_ERROR_MASK
;
7781 case GEN6_PCODE_SUCCESS
:
7783 case GEN6_PCODE_ILLEGAL_CMD
:
7785 case GEN7_PCODE_TIMEOUT
:
7787 case GEN7_PCODE_ILLEGAL_DATA
:
7789 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
7792 MISSING_CASE(flags
);
7797 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u32 mbox
, u32
*val
)
7801 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7803 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7804 * use te fw I915_READ variants to reduce the amount of work
7805 * required when reading/writing.
7808 if (I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7809 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7813 I915_WRITE_FW(GEN6_PCODE_DATA
, *val
);
7814 I915_WRITE_FW(GEN6_PCODE_DATA1
, 0);
7815 I915_WRITE_FW(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7817 if (intel_wait_for_register_fw(dev_priv
,
7818 GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
, 0,
7820 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
7824 *val
= I915_READ_FW(GEN6_PCODE_DATA
);
7825 I915_WRITE_FW(GEN6_PCODE_DATA
, 0);
7827 if (INTEL_GEN(dev_priv
) > 6)
7828 status
= gen7_check_mailbox_status(dev_priv
);
7830 status
= gen6_check_mailbox_status(dev_priv
);
7833 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7841 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
,
7846 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7848 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7849 * use te fw I915_READ variants to reduce the amount of work
7850 * required when reading/writing.
7853 if (I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7854 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7858 I915_WRITE_FW(GEN6_PCODE_DATA
, val
);
7859 I915_WRITE_FW(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7861 if (intel_wait_for_register_fw(dev_priv
,
7862 GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
, 0,
7864 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
7868 I915_WRITE_FW(GEN6_PCODE_DATA
, 0);
7870 if (INTEL_GEN(dev_priv
) > 6)
7871 status
= gen7_check_mailbox_status(dev_priv
);
7873 status
= gen6_check_mailbox_status(dev_priv
);
7876 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7884 static bool skl_pcode_try_request(struct drm_i915_private
*dev_priv
, u32 mbox
,
7885 u32 request
, u32 reply_mask
, u32 reply
,
7890 *status
= sandybridge_pcode_read(dev_priv
, mbox
, &val
);
7892 return *status
|| ((val
& reply_mask
) == reply
);
7896 * skl_pcode_request - send PCODE request until acknowledgment
7897 * @dev_priv: device private
7898 * @mbox: PCODE mailbox ID the request is targeted for
7899 * @request: request ID
7900 * @reply_mask: mask used to check for request acknowledgment
7901 * @reply: value used to check for request acknowledgment
7902 * @timeout_base_ms: timeout for polling with preemption enabled
7904 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
7905 * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
7906 * The request is acknowledged once the PCODE reply dword equals @reply after
7907 * applying @reply_mask. Polling is first attempted with preemption enabled
7908 * for @timeout_base_ms and if this times out for another 10 ms with
7909 * preemption disabled.
7911 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
7912 * other error as reported by PCODE.
7914 int skl_pcode_request(struct drm_i915_private
*dev_priv
, u32 mbox
, u32 request
,
7915 u32 reply_mask
, u32 reply
, int timeout_base_ms
)
7920 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7922 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
7926 * Prime the PCODE by doing a request first. Normally it guarantees
7927 * that a subsequent request, at most @timeout_base_ms later, succeeds.
7928 * _wait_for() doesn't guarantee when its passed condition is evaluated
7929 * first, so send the first request explicitly.
7935 ret
= _wait_for(COND
, timeout_base_ms
* 1000, 10);
7940 * The above can time out if the number of requests was low (2 in the
7941 * worst case) _and_ PCODE was busy for some reason even after a
7942 * (queued) request and @timeout_base_ms delay. As a workaround retry
7943 * the poll with preemption disabled to maximize the number of
7944 * requests. Increase the timeout from @timeout_base_ms to 10ms to
7945 * account for interrupts that could reduce the number of these
7948 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
7949 WARN_ON_ONCE(timeout_base_ms
> 3);
7951 ret
= wait_for_atomic(COND
, 10);
7955 return ret
? ret
: status
;
7959 static int byt_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7963 * Slow = Fast = GPLL ref * N
7965 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* (val
- 0xb7), 1000);
7968 static int byt_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7970 return DIV_ROUND_CLOSEST(1000 * val
, dev_priv
->rps
.gpll_ref_freq
) + 0xb7;
7973 static int chv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7977 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7979 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* val
, 2 * 2 * 1000);
7982 static int chv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7984 /* CHV needs even values */
7985 return DIV_ROUND_CLOSEST(2 * 1000 * val
, dev_priv
->rps
.gpll_ref_freq
) * 2;
7988 int intel_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7990 if (IS_GEN9(dev_priv
))
7991 return DIV_ROUND_CLOSEST(val
* GT_FREQUENCY_MULTIPLIER
,
7993 else if (IS_CHERRYVIEW(dev_priv
))
7994 return chv_gpu_freq(dev_priv
, val
);
7995 else if (IS_VALLEYVIEW(dev_priv
))
7996 return byt_gpu_freq(dev_priv
, val
);
7998 return val
* GT_FREQUENCY_MULTIPLIER
;
8001 int intel_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
8003 if (IS_GEN9(dev_priv
))
8004 return DIV_ROUND_CLOSEST(val
* GEN9_FREQ_SCALER
,
8005 GT_FREQUENCY_MULTIPLIER
);
8006 else if (IS_CHERRYVIEW(dev_priv
))
8007 return chv_freq_opcode(dev_priv
, val
);
8008 else if (IS_VALLEYVIEW(dev_priv
))
8009 return byt_freq_opcode(dev_priv
, val
);
8011 return DIV_ROUND_CLOSEST(val
, GT_FREQUENCY_MULTIPLIER
);
8014 struct request_boost
{
8015 struct work_struct work
;
8016 struct drm_i915_gem_request
*req
;
8019 static void __intel_rps_boost_work(struct work_struct
*work
)
8021 struct request_boost
*boost
= container_of(work
, struct request_boost
, work
);
8022 struct drm_i915_gem_request
*req
= boost
->req
;
8024 if (!i915_gem_request_completed(req
))
8025 gen6_rps_boost(req
->i915
, NULL
, req
->emitted_jiffies
);
8027 i915_gem_request_put(req
);
8031 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request
*req
)
8033 struct request_boost
*boost
;
8035 if (req
== NULL
|| INTEL_GEN(req
->i915
) < 6)
8038 if (i915_gem_request_completed(req
))
8041 boost
= kmalloc(sizeof(*boost
), GFP_ATOMIC
);
8045 boost
->req
= i915_gem_request_get(req
);
8047 INIT_WORK(&boost
->work
, __intel_rps_boost_work
);
8048 queue_work(req
->i915
->wq
, &boost
->work
);
8051 void intel_pm_setup(struct drm_device
*dev
)
8053 struct drm_i915_private
*dev_priv
= to_i915(dev
);
8055 mutex_init(&dev_priv
->rps
.hw_lock
);
8056 spin_lock_init(&dev_priv
->rps
.client_lock
);
8058 INIT_DELAYED_WORK(&dev_priv
->rps
.autoenable_work
,
8059 __intel_autoenable_gt_powersave
);
8060 INIT_LIST_HEAD(&dev_priv
->rps
.clients
);
8062 dev_priv
->pm
.suspended
= false;
8063 atomic_set(&dev_priv
->pm
.wakeref_count
, 0);