]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/intel_pm.c
drm/i915/skl: Fix has_ipc on skl and document WaDisableIPC.
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_pm.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
30 #include "i915_drv.h"
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
35
36 /**
37 * DOC: RC6
38 *
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 *
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
47 *
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
54 */
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
58
59 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
60 {
61 if (HAS_LLC(dev_priv)) {
62 /*
63 * WaCompressedResourceDisplayNewHashMode:skl,kbl
64 * Display WA#0390: skl,kbl
65 *
66 * Must match Sampler, Pixel Back End, and Media. See
67 * WaCompressedResourceSamplerPbeMediaNewHashMode.
68 */
69 I915_WRITE(CHICKEN_PAR1_1,
70 I915_READ(CHICKEN_PAR1_1) |
71 SKL_DE_COMPRESSED_HASH_MODE);
72 }
73
74 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
75 I915_WRITE(CHICKEN_PAR1_1,
76 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
77
78 I915_WRITE(GEN8_CONFIG0,
79 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
80
81 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
82 I915_WRITE(GEN8_CHICKEN_DCPR_1,
83 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
84
85 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
86 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
87 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
88 DISP_FBC_WM_DIS |
89 DISP_FBC_MEMORY_WAKE);
90
91 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
92 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
93 ILK_DPFC_DISABLE_DUMMY0);
94
95 if (IS_SKYLAKE(dev_priv)) {
96 /* WaDisableDopClockGating */
97 I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
98 & ~GEN7_DOP_CLOCK_GATE_ENABLE);
99 }
100 }
101
102 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
103 {
104 gen9_init_clock_gating(dev_priv);
105
106 /* WaDisableSDEUnitClockGating:bxt */
107 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
108 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
109
110 /*
111 * FIXME:
112 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
113 */
114 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
115 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
116
117 /*
118 * Wa: Backlight PWM may stop in the asserted state, causing backlight
119 * to stay fully on.
120 */
121 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
122 PWM1_GATING_DIS | PWM2_GATING_DIS);
123 }
124
125 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
126 {
127 u32 val;
128 gen9_init_clock_gating(dev_priv);
129
130 /*
131 * WaDisablePWMClockGating:glk
132 * Backlight PWM may stop in the asserted state, causing backlight
133 * to stay fully on.
134 */
135 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
136 PWM1_GATING_DIS | PWM2_GATING_DIS);
137
138 /* WaDDIIOTimeout:glk */
139 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
140 u32 val = I915_READ(CHICKEN_MISC_2);
141 val &= ~(GLK_CL0_PWR_DOWN |
142 GLK_CL1_PWR_DOWN |
143 GLK_CL2_PWR_DOWN);
144 I915_WRITE(CHICKEN_MISC_2, val);
145 }
146
147 /* Display WA #1133: WaFbcSkipSegments:glk */
148 val = I915_READ(ILK_DPFC_CHICKEN);
149 val &= ~GLK_SKIP_SEG_COUNT_MASK;
150 val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
151 I915_WRITE(ILK_DPFC_CHICKEN, val);
152 }
153
154 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
155 {
156 u32 tmp;
157
158 tmp = I915_READ(CLKCFG);
159
160 switch (tmp & CLKCFG_FSB_MASK) {
161 case CLKCFG_FSB_533:
162 dev_priv->fsb_freq = 533; /* 133*4 */
163 break;
164 case CLKCFG_FSB_800:
165 dev_priv->fsb_freq = 800; /* 200*4 */
166 break;
167 case CLKCFG_FSB_667:
168 dev_priv->fsb_freq = 667; /* 167*4 */
169 break;
170 case CLKCFG_FSB_400:
171 dev_priv->fsb_freq = 400; /* 100*4 */
172 break;
173 }
174
175 switch (tmp & CLKCFG_MEM_MASK) {
176 case CLKCFG_MEM_533:
177 dev_priv->mem_freq = 533;
178 break;
179 case CLKCFG_MEM_667:
180 dev_priv->mem_freq = 667;
181 break;
182 case CLKCFG_MEM_800:
183 dev_priv->mem_freq = 800;
184 break;
185 }
186
187 /* detect pineview DDR3 setting */
188 tmp = I915_READ(CSHRDDR3CTL);
189 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
190 }
191
192 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
193 {
194 u16 ddrpll, csipll;
195
196 ddrpll = I915_READ16(DDRMPLL1);
197 csipll = I915_READ16(CSIPLL0);
198
199 switch (ddrpll & 0xff) {
200 case 0xc:
201 dev_priv->mem_freq = 800;
202 break;
203 case 0x10:
204 dev_priv->mem_freq = 1066;
205 break;
206 case 0x14:
207 dev_priv->mem_freq = 1333;
208 break;
209 case 0x18:
210 dev_priv->mem_freq = 1600;
211 break;
212 default:
213 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
214 ddrpll & 0xff);
215 dev_priv->mem_freq = 0;
216 break;
217 }
218
219 dev_priv->ips.r_t = dev_priv->mem_freq;
220
221 switch (csipll & 0x3ff) {
222 case 0x00c:
223 dev_priv->fsb_freq = 3200;
224 break;
225 case 0x00e:
226 dev_priv->fsb_freq = 3733;
227 break;
228 case 0x010:
229 dev_priv->fsb_freq = 4266;
230 break;
231 case 0x012:
232 dev_priv->fsb_freq = 4800;
233 break;
234 case 0x014:
235 dev_priv->fsb_freq = 5333;
236 break;
237 case 0x016:
238 dev_priv->fsb_freq = 5866;
239 break;
240 case 0x018:
241 dev_priv->fsb_freq = 6400;
242 break;
243 default:
244 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
245 csipll & 0x3ff);
246 dev_priv->fsb_freq = 0;
247 break;
248 }
249
250 if (dev_priv->fsb_freq == 3200) {
251 dev_priv->ips.c_m = 0;
252 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
253 dev_priv->ips.c_m = 1;
254 } else {
255 dev_priv->ips.c_m = 2;
256 }
257 }
258
259 static const struct cxsr_latency cxsr_latency_table[] = {
260 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
261 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
262 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
263 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
264 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
265
266 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
267 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
268 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
269 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
270 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
271
272 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
273 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
274 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
275 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
276 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
277
278 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
279 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
280 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
281 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
282 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
283
284 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
285 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
286 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
287 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
288 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
289
290 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
291 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
292 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
293 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
294 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
295 };
296
297 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
298 bool is_ddr3,
299 int fsb,
300 int mem)
301 {
302 const struct cxsr_latency *latency;
303 int i;
304
305 if (fsb == 0 || mem == 0)
306 return NULL;
307
308 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
309 latency = &cxsr_latency_table[i];
310 if (is_desktop == latency->is_desktop &&
311 is_ddr3 == latency->is_ddr3 &&
312 fsb == latency->fsb_freq && mem == latency->mem_freq)
313 return latency;
314 }
315
316 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
317
318 return NULL;
319 }
320
321 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
322 {
323 u32 val;
324
325 mutex_lock(&dev_priv->rps.hw_lock);
326
327 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
328 if (enable)
329 val &= ~FORCE_DDR_HIGH_FREQ;
330 else
331 val |= FORCE_DDR_HIGH_FREQ;
332 val &= ~FORCE_DDR_LOW_FREQ;
333 val |= FORCE_DDR_FREQ_REQ_ACK;
334 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
335
336 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
337 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
338 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
339
340 mutex_unlock(&dev_priv->rps.hw_lock);
341 }
342
343 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
344 {
345 u32 val;
346
347 mutex_lock(&dev_priv->rps.hw_lock);
348
349 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
350 if (enable)
351 val |= DSP_MAXFIFO_PM5_ENABLE;
352 else
353 val &= ~DSP_MAXFIFO_PM5_ENABLE;
354 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
355
356 mutex_unlock(&dev_priv->rps.hw_lock);
357 }
358
359 #define FW_WM(value, plane) \
360 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
361
362 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
363 {
364 bool was_enabled;
365 u32 val;
366
367 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
368 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
369 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
370 POSTING_READ(FW_BLC_SELF_VLV);
371 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
372 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
373 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
374 POSTING_READ(FW_BLC_SELF);
375 } else if (IS_PINEVIEW(dev_priv)) {
376 val = I915_READ(DSPFW3);
377 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
378 if (enable)
379 val |= PINEVIEW_SELF_REFRESH_EN;
380 else
381 val &= ~PINEVIEW_SELF_REFRESH_EN;
382 I915_WRITE(DSPFW3, val);
383 POSTING_READ(DSPFW3);
384 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
385 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
386 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
387 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
388 I915_WRITE(FW_BLC_SELF, val);
389 POSTING_READ(FW_BLC_SELF);
390 } else if (IS_I915GM(dev_priv)) {
391 /*
392 * FIXME can't find a bit like this for 915G, and
393 * and yet it does have the related watermark in
394 * FW_BLC_SELF. What's going on?
395 */
396 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
397 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
398 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
399 I915_WRITE(INSTPM, val);
400 POSTING_READ(INSTPM);
401 } else {
402 return false;
403 }
404
405 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
406
407 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
408 enableddisabled(enable),
409 enableddisabled(was_enabled));
410
411 return was_enabled;
412 }
413
414 /**
415 * intel_set_memory_cxsr - Configure CxSR state
416 * @dev_priv: i915 device
417 * @enable: Allow vs. disallow CxSR
418 *
419 * Allow or disallow the system to enter a special CxSR
420 * (C-state self refresh) state. What typically happens in CxSR mode
421 * is that several display FIFOs may get combined into a single larger
422 * FIFO for a particular plane (so called max FIFO mode) to allow the
423 * system to defer memory fetches longer, and the memory will enter
424 * self refresh.
425 *
426 * Note that enabling CxSR does not guarantee that the system enter
427 * this special mode, nor does it guarantee that the system stays
428 * in that mode once entered. So this just allows/disallows the system
429 * to autonomously utilize the CxSR mode. Other factors such as core
430 * C-states will affect when/if the system actually enters/exits the
431 * CxSR mode.
432 *
433 * Note that on VLV/CHV this actually only controls the max FIFO mode,
434 * and the system is free to enter/exit memory self refresh at any time
435 * even when the use of CxSR has been disallowed.
436 *
437 * While the system is actually in the CxSR/max FIFO mode, some plane
438 * control registers will not get latched on vblank. Thus in order to
439 * guarantee the system will respond to changes in the plane registers
440 * we must always disallow CxSR prior to making changes to those registers.
441 * Unfortunately the system will re-evaluate the CxSR conditions at
442 * frame start which happens after vblank start (which is when the plane
443 * registers would get latched), so we can't proceed with the plane update
444 * during the same frame where we disallowed CxSR.
445 *
446 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
447 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
448 * the hardware w.r.t. HPLL SR when writing to plane registers.
449 * Disallowing just CxSR is sufficient.
450 */
451 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
452 {
453 bool ret;
454
455 mutex_lock(&dev_priv->wm.wm_mutex);
456 ret = _intel_set_memory_cxsr(dev_priv, enable);
457 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
458 dev_priv->wm.vlv.cxsr = enable;
459 else if (IS_G4X(dev_priv))
460 dev_priv->wm.g4x.cxsr = enable;
461 mutex_unlock(&dev_priv->wm.wm_mutex);
462
463 return ret;
464 }
465
466 /*
467 * Latency for FIFO fetches is dependent on several factors:
468 * - memory configuration (speed, channels)
469 * - chipset
470 * - current MCH state
471 * It can be fairly high in some situations, so here we assume a fairly
472 * pessimal value. It's a tradeoff between extra memory fetches (if we
473 * set this value too high, the FIFO will fetch frequently to stay full)
474 * and power consumption (set it too low to save power and we might see
475 * FIFO underruns and display "flicker").
476 *
477 * A value of 5us seems to be a good balance; safe for very low end
478 * platforms but not overly aggressive on lower latency configs.
479 */
480 static const int pessimal_latency_ns = 5000;
481
482 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
483 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
484
485 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
486 {
487 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
488 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
489 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
490 enum pipe pipe = crtc->pipe;
491 int sprite0_start, sprite1_start;
492
493 switch (pipe) {
494 uint32_t dsparb, dsparb2, dsparb3;
495 case PIPE_A:
496 dsparb = I915_READ(DSPARB);
497 dsparb2 = I915_READ(DSPARB2);
498 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
499 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
500 break;
501 case PIPE_B:
502 dsparb = I915_READ(DSPARB);
503 dsparb2 = I915_READ(DSPARB2);
504 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
505 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
506 break;
507 case PIPE_C:
508 dsparb2 = I915_READ(DSPARB2);
509 dsparb3 = I915_READ(DSPARB3);
510 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
511 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
512 break;
513 default:
514 MISSING_CASE(pipe);
515 return;
516 }
517
518 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
519 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
520 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
521 fifo_state->plane[PLANE_CURSOR] = 63;
522 }
523
524 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
525 {
526 uint32_t dsparb = I915_READ(DSPARB);
527 int size;
528
529 size = dsparb & 0x7f;
530 if (plane)
531 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
532
533 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
534 plane ? "B" : "A", size);
535
536 return size;
537 }
538
539 static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
540 {
541 uint32_t dsparb = I915_READ(DSPARB);
542 int size;
543
544 size = dsparb & 0x1ff;
545 if (plane)
546 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
547 size >>= 1; /* Convert to cachelines */
548
549 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
550 plane ? "B" : "A", size);
551
552 return size;
553 }
554
555 static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
556 {
557 uint32_t dsparb = I915_READ(DSPARB);
558 int size;
559
560 size = dsparb & 0x7f;
561 size >>= 2; /* Convert to cachelines */
562
563 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
564 plane ? "B" : "A",
565 size);
566
567 return size;
568 }
569
570 /* Pineview has different values for various configs */
571 static const struct intel_watermark_params pineview_display_wm = {
572 .fifo_size = PINEVIEW_DISPLAY_FIFO,
573 .max_wm = PINEVIEW_MAX_WM,
574 .default_wm = PINEVIEW_DFT_WM,
575 .guard_size = PINEVIEW_GUARD_WM,
576 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
577 };
578 static const struct intel_watermark_params pineview_display_hplloff_wm = {
579 .fifo_size = PINEVIEW_DISPLAY_FIFO,
580 .max_wm = PINEVIEW_MAX_WM,
581 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
582 .guard_size = PINEVIEW_GUARD_WM,
583 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
584 };
585 static const struct intel_watermark_params pineview_cursor_wm = {
586 .fifo_size = PINEVIEW_CURSOR_FIFO,
587 .max_wm = PINEVIEW_CURSOR_MAX_WM,
588 .default_wm = PINEVIEW_CURSOR_DFT_WM,
589 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
590 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
591 };
592 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
593 .fifo_size = PINEVIEW_CURSOR_FIFO,
594 .max_wm = PINEVIEW_CURSOR_MAX_WM,
595 .default_wm = PINEVIEW_CURSOR_DFT_WM,
596 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
597 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
598 };
599 static const struct intel_watermark_params i965_cursor_wm_info = {
600 .fifo_size = I965_CURSOR_FIFO,
601 .max_wm = I965_CURSOR_MAX_WM,
602 .default_wm = I965_CURSOR_DFT_WM,
603 .guard_size = 2,
604 .cacheline_size = I915_FIFO_LINE_SIZE,
605 };
606 static const struct intel_watermark_params i945_wm_info = {
607 .fifo_size = I945_FIFO_SIZE,
608 .max_wm = I915_MAX_WM,
609 .default_wm = 1,
610 .guard_size = 2,
611 .cacheline_size = I915_FIFO_LINE_SIZE,
612 };
613 static const struct intel_watermark_params i915_wm_info = {
614 .fifo_size = I915_FIFO_SIZE,
615 .max_wm = I915_MAX_WM,
616 .default_wm = 1,
617 .guard_size = 2,
618 .cacheline_size = I915_FIFO_LINE_SIZE,
619 };
620 static const struct intel_watermark_params i830_a_wm_info = {
621 .fifo_size = I855GM_FIFO_SIZE,
622 .max_wm = I915_MAX_WM,
623 .default_wm = 1,
624 .guard_size = 2,
625 .cacheline_size = I830_FIFO_LINE_SIZE,
626 };
627 static const struct intel_watermark_params i830_bc_wm_info = {
628 .fifo_size = I855GM_FIFO_SIZE,
629 .max_wm = I915_MAX_WM/2,
630 .default_wm = 1,
631 .guard_size = 2,
632 .cacheline_size = I830_FIFO_LINE_SIZE,
633 };
634 static const struct intel_watermark_params i845_wm_info = {
635 .fifo_size = I830_FIFO_SIZE,
636 .max_wm = I915_MAX_WM,
637 .default_wm = 1,
638 .guard_size = 2,
639 .cacheline_size = I830_FIFO_LINE_SIZE,
640 };
641
642 /**
643 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
644 * @pixel_rate: Pipe pixel rate in kHz
645 * @cpp: Plane bytes per pixel
646 * @latency: Memory wakeup latency in 0.1us units
647 *
648 * Compute the watermark using the method 1 or "small buffer"
649 * formula. The caller may additonally add extra cachelines
650 * to account for TLB misses and clock crossings.
651 *
652 * This method is concerned with the short term drain rate
653 * of the FIFO, ie. it does not account for blanking periods
654 * which would effectively reduce the average drain rate across
655 * a longer period. The name "small" refers to the fact the
656 * FIFO is relatively small compared to the amount of data
657 * fetched.
658 *
659 * The FIFO level vs. time graph might look something like:
660 *
661 * |\ |\
662 * | \ | \
663 * __---__---__ (- plane active, _ blanking)
664 * -> time
665 *
666 * or perhaps like this:
667 *
668 * |\|\ |\|\
669 * __----__----__ (- plane active, _ blanking)
670 * -> time
671 *
672 * Returns:
673 * The watermark in bytes
674 */
675 static unsigned int intel_wm_method1(unsigned int pixel_rate,
676 unsigned int cpp,
677 unsigned int latency)
678 {
679 uint64_t ret;
680
681 ret = (uint64_t) pixel_rate * cpp * latency;
682 ret = DIV_ROUND_UP_ULL(ret, 10000);
683
684 return ret;
685 }
686
687 /**
688 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
689 * @pixel_rate: Pipe pixel rate in kHz
690 * @htotal: Pipe horizontal total
691 * @width: Plane width in pixels
692 * @cpp: Plane bytes per pixel
693 * @latency: Memory wakeup latency in 0.1us units
694 *
695 * Compute the watermark using the method 2 or "large buffer"
696 * formula. The caller may additonally add extra cachelines
697 * to account for TLB misses and clock crossings.
698 *
699 * This method is concerned with the long term drain rate
700 * of the FIFO, ie. it does account for blanking periods
701 * which effectively reduce the average drain rate across
702 * a longer period. The name "large" refers to the fact the
703 * FIFO is relatively large compared to the amount of data
704 * fetched.
705 *
706 * The FIFO level vs. time graph might look something like:
707 *
708 * |\___ |\___
709 * | \___ | \___
710 * | \ | \
711 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
712 * -> time
713 *
714 * Returns:
715 * The watermark in bytes
716 */
717 static unsigned int intel_wm_method2(unsigned int pixel_rate,
718 unsigned int htotal,
719 unsigned int width,
720 unsigned int cpp,
721 unsigned int latency)
722 {
723 unsigned int ret;
724
725 /*
726 * FIXME remove once all users are computing
727 * watermarks in the correct place.
728 */
729 if (WARN_ON_ONCE(htotal == 0))
730 htotal = 1;
731
732 ret = (latency * pixel_rate) / (htotal * 10000);
733 ret = (ret + 1) * width * cpp;
734
735 return ret;
736 }
737
738 /**
739 * intel_calculate_wm - calculate watermark level
740 * @pixel_rate: pixel clock
741 * @wm: chip FIFO params
742 * @cpp: bytes per pixel
743 * @latency_ns: memory latency for the platform
744 *
745 * Calculate the watermark level (the level at which the display plane will
746 * start fetching from memory again). Each chip has a different display
747 * FIFO size and allocation, so the caller needs to figure that out and pass
748 * in the correct intel_watermark_params structure.
749 *
750 * As the pixel clock runs, the FIFO will be drained at a rate that depends
751 * on the pixel size. When it reaches the watermark level, it'll start
752 * fetching FIFO line sized based chunks from memory until the FIFO fills
753 * past the watermark point. If the FIFO drains completely, a FIFO underrun
754 * will occur, and a display engine hang could result.
755 */
756 static unsigned int intel_calculate_wm(int pixel_rate,
757 const struct intel_watermark_params *wm,
758 int fifo_size, int cpp,
759 unsigned int latency_ns)
760 {
761 int entries, wm_size;
762
763 /*
764 * Note: we need to make sure we don't overflow for various clock &
765 * latency values.
766 * clocks go from a few thousand to several hundred thousand.
767 * latency is usually a few thousand
768 */
769 entries = intel_wm_method1(pixel_rate, cpp,
770 latency_ns / 100);
771 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
772 wm->guard_size;
773 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
774
775 wm_size = fifo_size - entries;
776 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
777
778 /* Don't promote wm_size to unsigned... */
779 if (wm_size > wm->max_wm)
780 wm_size = wm->max_wm;
781 if (wm_size <= 0)
782 wm_size = wm->default_wm;
783
784 /*
785 * Bspec seems to indicate that the value shouldn't be lower than
786 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
787 * Lets go for 8 which is the burst size since certain platforms
788 * already use a hardcoded 8 (which is what the spec says should be
789 * done).
790 */
791 if (wm_size <= 8)
792 wm_size = 8;
793
794 return wm_size;
795 }
796
797 static bool is_disabling(int old, int new, int threshold)
798 {
799 return old >= threshold && new < threshold;
800 }
801
802 static bool is_enabling(int old, int new, int threshold)
803 {
804 return old < threshold && new >= threshold;
805 }
806
807 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
808 {
809 return dev_priv->wm.max_level + 1;
810 }
811
812 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
813 const struct intel_plane_state *plane_state)
814 {
815 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
816
817 /* FIXME check the 'enable' instead */
818 if (!crtc_state->base.active)
819 return false;
820
821 /*
822 * Treat cursor with fb as always visible since cursor updates
823 * can happen faster than the vrefresh rate, and the current
824 * watermark code doesn't handle that correctly. Cursor updates
825 * which set/clear the fb or change the cursor size are going
826 * to get throttled by intel_legacy_cursor_update() to work
827 * around this problem with the watermark code.
828 */
829 if (plane->id == PLANE_CURSOR)
830 return plane_state->base.fb != NULL;
831 else
832 return plane_state->base.visible;
833 }
834
835 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
836 {
837 struct intel_crtc *crtc, *enabled = NULL;
838
839 for_each_intel_crtc(&dev_priv->drm, crtc) {
840 if (intel_crtc_active(crtc)) {
841 if (enabled)
842 return NULL;
843 enabled = crtc;
844 }
845 }
846
847 return enabled;
848 }
849
850 static void pineview_update_wm(struct intel_crtc *unused_crtc)
851 {
852 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
853 struct intel_crtc *crtc;
854 const struct cxsr_latency *latency;
855 u32 reg;
856 unsigned int wm;
857
858 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
859 dev_priv->is_ddr3,
860 dev_priv->fsb_freq,
861 dev_priv->mem_freq);
862 if (!latency) {
863 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
864 intel_set_memory_cxsr(dev_priv, false);
865 return;
866 }
867
868 crtc = single_enabled_crtc(dev_priv);
869 if (crtc) {
870 const struct drm_display_mode *adjusted_mode =
871 &crtc->config->base.adjusted_mode;
872 const struct drm_framebuffer *fb =
873 crtc->base.primary->state->fb;
874 int cpp = fb->format->cpp[0];
875 int clock = adjusted_mode->crtc_clock;
876
877 /* Display SR */
878 wm = intel_calculate_wm(clock, &pineview_display_wm,
879 pineview_display_wm.fifo_size,
880 cpp, latency->display_sr);
881 reg = I915_READ(DSPFW1);
882 reg &= ~DSPFW_SR_MASK;
883 reg |= FW_WM(wm, SR);
884 I915_WRITE(DSPFW1, reg);
885 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
886
887 /* cursor SR */
888 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
889 pineview_display_wm.fifo_size,
890 4, latency->cursor_sr);
891 reg = I915_READ(DSPFW3);
892 reg &= ~DSPFW_CURSOR_SR_MASK;
893 reg |= FW_WM(wm, CURSOR_SR);
894 I915_WRITE(DSPFW3, reg);
895
896 /* Display HPLL off SR */
897 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
898 pineview_display_hplloff_wm.fifo_size,
899 cpp, latency->display_hpll_disable);
900 reg = I915_READ(DSPFW3);
901 reg &= ~DSPFW_HPLL_SR_MASK;
902 reg |= FW_WM(wm, HPLL_SR);
903 I915_WRITE(DSPFW3, reg);
904
905 /* cursor HPLL off SR */
906 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
907 pineview_display_hplloff_wm.fifo_size,
908 4, latency->cursor_hpll_disable);
909 reg = I915_READ(DSPFW3);
910 reg &= ~DSPFW_HPLL_CURSOR_MASK;
911 reg |= FW_WM(wm, HPLL_CURSOR);
912 I915_WRITE(DSPFW3, reg);
913 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
914
915 intel_set_memory_cxsr(dev_priv, true);
916 } else {
917 intel_set_memory_cxsr(dev_priv, false);
918 }
919 }
920
921 /*
922 * Documentation says:
923 * "If the line size is small, the TLB fetches can get in the way of the
924 * data fetches, causing some lag in the pixel data return which is not
925 * accounted for in the above formulas. The following adjustment only
926 * needs to be applied if eight whole lines fit in the buffer at once.
927 * The WM is adjusted upwards by the difference between the FIFO size
928 * and the size of 8 whole lines. This adjustment is always performed
929 * in the actual pixel depth regardless of whether FBC is enabled or not."
930 */
931 static int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
932 {
933 int tlb_miss = fifo_size * 64 - width * cpp * 8;
934
935 return max(0, tlb_miss);
936 }
937
938 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
939 const struct g4x_wm_values *wm)
940 {
941 enum pipe pipe;
942
943 for_each_pipe(dev_priv, pipe)
944 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
945
946 I915_WRITE(DSPFW1,
947 FW_WM(wm->sr.plane, SR) |
948 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
949 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
950 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
951 I915_WRITE(DSPFW2,
952 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
953 FW_WM(wm->sr.fbc, FBC_SR) |
954 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
955 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
956 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
957 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
958 I915_WRITE(DSPFW3,
959 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
960 FW_WM(wm->sr.cursor, CURSOR_SR) |
961 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
962 FW_WM(wm->hpll.plane, HPLL_SR));
963
964 POSTING_READ(DSPFW1);
965 }
966
967 #define FW_WM_VLV(value, plane) \
968 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
969
970 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
971 const struct vlv_wm_values *wm)
972 {
973 enum pipe pipe;
974
975 for_each_pipe(dev_priv, pipe) {
976 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
977
978 I915_WRITE(VLV_DDL(pipe),
979 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
980 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
981 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
982 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
983 }
984
985 /*
986 * Zero the (unused) WM1 watermarks, and also clear all the
987 * high order bits so that there are no out of bounds values
988 * present in the registers during the reprogramming.
989 */
990 I915_WRITE(DSPHOWM, 0);
991 I915_WRITE(DSPHOWM1, 0);
992 I915_WRITE(DSPFW4, 0);
993 I915_WRITE(DSPFW5, 0);
994 I915_WRITE(DSPFW6, 0);
995
996 I915_WRITE(DSPFW1,
997 FW_WM(wm->sr.plane, SR) |
998 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
999 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1000 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1001 I915_WRITE(DSPFW2,
1002 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1003 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1004 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1005 I915_WRITE(DSPFW3,
1006 FW_WM(wm->sr.cursor, CURSOR_SR));
1007
1008 if (IS_CHERRYVIEW(dev_priv)) {
1009 I915_WRITE(DSPFW7_CHV,
1010 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1011 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1012 I915_WRITE(DSPFW8_CHV,
1013 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1014 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1015 I915_WRITE(DSPFW9_CHV,
1016 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1017 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1018 I915_WRITE(DSPHOWM,
1019 FW_WM(wm->sr.plane >> 9, SR_HI) |
1020 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1021 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1022 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1023 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1024 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1025 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1026 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1027 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1028 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1029 } else {
1030 I915_WRITE(DSPFW7,
1031 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1032 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1033 I915_WRITE(DSPHOWM,
1034 FW_WM(wm->sr.plane >> 9, SR_HI) |
1035 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1036 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1037 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1038 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1039 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1040 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1041 }
1042
1043 POSTING_READ(DSPFW1);
1044 }
1045
1046 #undef FW_WM_VLV
1047
1048 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1049 {
1050 /* all latencies in usec */
1051 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1052 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1053 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1054
1055 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1056 }
1057
1058 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1059 {
1060 /*
1061 * DSPCNTR[13] supposedly controls whether the
1062 * primary plane can use the FIFO space otherwise
1063 * reserved for the sprite plane. It's not 100% clear
1064 * what the actual FIFO size is, but it looks like we
1065 * can happily set both primary and sprite watermarks
1066 * up to 127 cachelines. So that would seem to mean
1067 * that either DSPCNTR[13] doesn't do anything, or that
1068 * the total FIFO is >= 256 cachelines in size. Either
1069 * way, we don't seem to have to worry about this
1070 * repartitioning as the maximum watermark value the
1071 * register can hold for each plane is lower than the
1072 * minimum FIFO size.
1073 */
1074 switch (plane_id) {
1075 case PLANE_CURSOR:
1076 return 63;
1077 case PLANE_PRIMARY:
1078 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1079 case PLANE_SPRITE0:
1080 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1081 default:
1082 MISSING_CASE(plane_id);
1083 return 0;
1084 }
1085 }
1086
1087 static int g4x_fbc_fifo_size(int level)
1088 {
1089 switch (level) {
1090 case G4X_WM_LEVEL_SR:
1091 return 7;
1092 case G4X_WM_LEVEL_HPLL:
1093 return 15;
1094 default:
1095 MISSING_CASE(level);
1096 return 0;
1097 }
1098 }
1099
1100 static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1101 const struct intel_plane_state *plane_state,
1102 int level)
1103 {
1104 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1105 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1106 const struct drm_display_mode *adjusted_mode =
1107 &crtc_state->base.adjusted_mode;
1108 int clock, htotal, cpp, width, wm;
1109 int latency = dev_priv->wm.pri_latency[level] * 10;
1110
1111 if (latency == 0)
1112 return USHRT_MAX;
1113
1114 if (!intel_wm_plane_visible(crtc_state, plane_state))
1115 return 0;
1116
1117 /*
1118 * Not 100% sure which way ELK should go here as the
1119 * spec only says CL/CTG should assume 32bpp and BW
1120 * doesn't need to. But as these things followed the
1121 * mobile vs. desktop lines on gen3 as well, let's
1122 * assume ELK doesn't need this.
1123 *
1124 * The spec also fails to list such a restriction for
1125 * the HPLL watermark, which seems a little strange.
1126 * Let's use 32bpp for the HPLL watermark as well.
1127 */
1128 if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1129 level != G4X_WM_LEVEL_NORMAL)
1130 cpp = 4;
1131 else
1132 cpp = plane_state->base.fb->format->cpp[0];
1133
1134 clock = adjusted_mode->crtc_clock;
1135 htotal = adjusted_mode->crtc_htotal;
1136
1137 if (plane->id == PLANE_CURSOR)
1138 width = plane_state->base.crtc_w;
1139 else
1140 width = drm_rect_width(&plane_state->base.dst);
1141
1142 if (plane->id == PLANE_CURSOR) {
1143 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1144 } else if (plane->id == PLANE_PRIMARY &&
1145 level == G4X_WM_LEVEL_NORMAL) {
1146 wm = intel_wm_method1(clock, cpp, latency);
1147 } else {
1148 int small, large;
1149
1150 small = intel_wm_method1(clock, cpp, latency);
1151 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1152
1153 wm = min(small, large);
1154 }
1155
1156 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1157 width, cpp);
1158
1159 wm = DIV_ROUND_UP(wm, 64) + 2;
1160
1161 return min_t(int, wm, USHRT_MAX);
1162 }
1163
1164 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1165 int level, enum plane_id plane_id, u16 value)
1166 {
1167 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1168 bool dirty = false;
1169
1170 for (; level < intel_wm_num_levels(dev_priv); level++) {
1171 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1172
1173 dirty |= raw->plane[plane_id] != value;
1174 raw->plane[plane_id] = value;
1175 }
1176
1177 return dirty;
1178 }
1179
1180 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1181 int level, u16 value)
1182 {
1183 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1184 bool dirty = false;
1185
1186 /* NORMAL level doesn't have an FBC watermark */
1187 level = max(level, G4X_WM_LEVEL_SR);
1188
1189 for (; level < intel_wm_num_levels(dev_priv); level++) {
1190 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1191
1192 dirty |= raw->fbc != value;
1193 raw->fbc = value;
1194 }
1195
1196 return dirty;
1197 }
1198
1199 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1200 const struct intel_plane_state *pstate,
1201 uint32_t pri_val);
1202
1203 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1204 const struct intel_plane_state *plane_state)
1205 {
1206 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1207 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1208 enum plane_id plane_id = plane->id;
1209 bool dirty = false;
1210 int level;
1211
1212 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1213 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1214 if (plane_id == PLANE_PRIMARY)
1215 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1216 goto out;
1217 }
1218
1219 for (level = 0; level < num_levels; level++) {
1220 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1221 int wm, max_wm;
1222
1223 wm = g4x_compute_wm(crtc_state, plane_state, level);
1224 max_wm = g4x_plane_fifo_size(plane_id, level);
1225
1226 if (wm > max_wm)
1227 break;
1228
1229 dirty |= raw->plane[plane_id] != wm;
1230 raw->plane[plane_id] = wm;
1231
1232 if (plane_id != PLANE_PRIMARY ||
1233 level == G4X_WM_LEVEL_NORMAL)
1234 continue;
1235
1236 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1237 raw->plane[plane_id]);
1238 max_wm = g4x_fbc_fifo_size(level);
1239
1240 /*
1241 * FBC wm is not mandatory as we
1242 * can always just disable its use.
1243 */
1244 if (wm > max_wm)
1245 wm = USHRT_MAX;
1246
1247 dirty |= raw->fbc != wm;
1248 raw->fbc = wm;
1249 }
1250
1251 /* mark watermarks as invalid */
1252 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1253
1254 if (plane_id == PLANE_PRIMARY)
1255 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1256
1257 out:
1258 if (dirty) {
1259 DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1260 plane->base.name,
1261 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1262 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1263 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1264
1265 if (plane_id == PLANE_PRIMARY)
1266 DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
1267 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1268 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1269 }
1270
1271 return dirty;
1272 }
1273
1274 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1275 enum plane_id plane_id, int level)
1276 {
1277 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1278
1279 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1280 }
1281
1282 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1283 int level)
1284 {
1285 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1286
1287 if (level > dev_priv->wm.max_level)
1288 return false;
1289
1290 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1291 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1292 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1293 }
1294
1295 /* mark all levels starting from 'level' as invalid */
1296 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1297 struct g4x_wm_state *wm_state, int level)
1298 {
1299 if (level <= G4X_WM_LEVEL_NORMAL) {
1300 enum plane_id plane_id;
1301
1302 for_each_plane_id_on_crtc(crtc, plane_id)
1303 wm_state->wm.plane[plane_id] = USHRT_MAX;
1304 }
1305
1306 if (level <= G4X_WM_LEVEL_SR) {
1307 wm_state->cxsr = false;
1308 wm_state->sr.cursor = USHRT_MAX;
1309 wm_state->sr.plane = USHRT_MAX;
1310 wm_state->sr.fbc = USHRT_MAX;
1311 }
1312
1313 if (level <= G4X_WM_LEVEL_HPLL) {
1314 wm_state->hpll_en = false;
1315 wm_state->hpll.cursor = USHRT_MAX;
1316 wm_state->hpll.plane = USHRT_MAX;
1317 wm_state->hpll.fbc = USHRT_MAX;
1318 }
1319 }
1320
1321 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1322 {
1323 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1324 struct intel_atomic_state *state =
1325 to_intel_atomic_state(crtc_state->base.state);
1326 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1327 int num_active_planes = hweight32(crtc_state->active_planes &
1328 ~BIT(PLANE_CURSOR));
1329 const struct g4x_pipe_wm *raw;
1330 const struct intel_plane_state *old_plane_state;
1331 const struct intel_plane_state *new_plane_state;
1332 struct intel_plane *plane;
1333 enum plane_id plane_id;
1334 int i, level;
1335 unsigned int dirty = 0;
1336
1337 for_each_oldnew_intel_plane_in_state(state, plane,
1338 old_plane_state,
1339 new_plane_state, i) {
1340 if (new_plane_state->base.crtc != &crtc->base &&
1341 old_plane_state->base.crtc != &crtc->base)
1342 continue;
1343
1344 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1345 dirty |= BIT(plane->id);
1346 }
1347
1348 if (!dirty)
1349 return 0;
1350
1351 level = G4X_WM_LEVEL_NORMAL;
1352 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1353 goto out;
1354
1355 raw = &crtc_state->wm.g4x.raw[level];
1356 for_each_plane_id_on_crtc(crtc, plane_id)
1357 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1358
1359 level = G4X_WM_LEVEL_SR;
1360
1361 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1362 goto out;
1363
1364 raw = &crtc_state->wm.g4x.raw[level];
1365 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1366 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1367 wm_state->sr.fbc = raw->fbc;
1368
1369 wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1370
1371 level = G4X_WM_LEVEL_HPLL;
1372
1373 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1374 goto out;
1375
1376 raw = &crtc_state->wm.g4x.raw[level];
1377 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1378 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1379 wm_state->hpll.fbc = raw->fbc;
1380
1381 wm_state->hpll_en = wm_state->cxsr;
1382
1383 level++;
1384
1385 out:
1386 if (level == G4X_WM_LEVEL_NORMAL)
1387 return -EINVAL;
1388
1389 /* invalidate the higher levels */
1390 g4x_invalidate_wms(crtc, wm_state, level);
1391
1392 /*
1393 * Determine if the FBC watermark(s) can be used. IF
1394 * this isn't the case we prefer to disable the FBC
1395 ( watermark(s) rather than disable the SR/HPLL
1396 * level(s) entirely.
1397 */
1398 wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
1399
1400 if (level >= G4X_WM_LEVEL_SR &&
1401 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1402 wm_state->fbc_en = false;
1403 else if (level >= G4X_WM_LEVEL_HPLL &&
1404 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1405 wm_state->fbc_en = false;
1406
1407 return 0;
1408 }
1409
1410 static int g4x_compute_intermediate_wm(struct drm_device *dev,
1411 struct intel_crtc *crtc,
1412 struct intel_crtc_state *crtc_state)
1413 {
1414 struct g4x_wm_state *intermediate = &crtc_state->wm.g4x.intermediate;
1415 const struct g4x_wm_state *optimal = &crtc_state->wm.g4x.optimal;
1416 const struct g4x_wm_state *active = &crtc->wm.active.g4x;
1417 enum plane_id plane_id;
1418
1419 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1420 !crtc_state->disable_cxsr;
1421 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1422 !crtc_state->disable_cxsr;
1423 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1424
1425 for_each_plane_id_on_crtc(crtc, plane_id) {
1426 intermediate->wm.plane[plane_id] =
1427 max(optimal->wm.plane[plane_id],
1428 active->wm.plane[plane_id]);
1429
1430 WARN_ON(intermediate->wm.plane[plane_id] >
1431 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1432 }
1433
1434 intermediate->sr.plane = max(optimal->sr.plane,
1435 active->sr.plane);
1436 intermediate->sr.cursor = max(optimal->sr.cursor,
1437 active->sr.cursor);
1438 intermediate->sr.fbc = max(optimal->sr.fbc,
1439 active->sr.fbc);
1440
1441 intermediate->hpll.plane = max(optimal->hpll.plane,
1442 active->hpll.plane);
1443 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1444 active->hpll.cursor);
1445 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1446 active->hpll.fbc);
1447
1448 WARN_ON((intermediate->sr.plane >
1449 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1450 intermediate->sr.cursor >
1451 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1452 intermediate->cxsr);
1453 WARN_ON((intermediate->sr.plane >
1454 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1455 intermediate->sr.cursor >
1456 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1457 intermediate->hpll_en);
1458
1459 WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1460 intermediate->fbc_en && intermediate->cxsr);
1461 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1462 intermediate->fbc_en && intermediate->hpll_en);
1463
1464 /*
1465 * If our intermediate WM are identical to the final WM, then we can
1466 * omit the post-vblank programming; only update if it's different.
1467 */
1468 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1469 crtc_state->wm.need_postvbl_update = true;
1470
1471 return 0;
1472 }
1473
1474 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1475 struct g4x_wm_values *wm)
1476 {
1477 struct intel_crtc *crtc;
1478 int num_active_crtcs = 0;
1479
1480 wm->cxsr = true;
1481 wm->hpll_en = true;
1482 wm->fbc_en = true;
1483
1484 for_each_intel_crtc(&dev_priv->drm, crtc) {
1485 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1486
1487 if (!crtc->active)
1488 continue;
1489
1490 if (!wm_state->cxsr)
1491 wm->cxsr = false;
1492 if (!wm_state->hpll_en)
1493 wm->hpll_en = false;
1494 if (!wm_state->fbc_en)
1495 wm->fbc_en = false;
1496
1497 num_active_crtcs++;
1498 }
1499
1500 if (num_active_crtcs != 1) {
1501 wm->cxsr = false;
1502 wm->hpll_en = false;
1503 wm->fbc_en = false;
1504 }
1505
1506 for_each_intel_crtc(&dev_priv->drm, crtc) {
1507 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1508 enum pipe pipe = crtc->pipe;
1509
1510 wm->pipe[pipe] = wm_state->wm;
1511 if (crtc->active && wm->cxsr)
1512 wm->sr = wm_state->sr;
1513 if (crtc->active && wm->hpll_en)
1514 wm->hpll = wm_state->hpll;
1515 }
1516 }
1517
1518 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1519 {
1520 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1521 struct g4x_wm_values new_wm = {};
1522
1523 g4x_merge_wm(dev_priv, &new_wm);
1524
1525 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1526 return;
1527
1528 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1529 _intel_set_memory_cxsr(dev_priv, false);
1530
1531 g4x_write_wm_values(dev_priv, &new_wm);
1532
1533 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1534 _intel_set_memory_cxsr(dev_priv, true);
1535
1536 *old_wm = new_wm;
1537 }
1538
1539 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1540 struct intel_crtc_state *crtc_state)
1541 {
1542 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1543 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1544
1545 mutex_lock(&dev_priv->wm.wm_mutex);
1546 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1547 g4x_program_watermarks(dev_priv);
1548 mutex_unlock(&dev_priv->wm.wm_mutex);
1549 }
1550
1551 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1552 struct intel_crtc_state *crtc_state)
1553 {
1554 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1555 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
1556
1557 if (!crtc_state->wm.need_postvbl_update)
1558 return;
1559
1560 mutex_lock(&dev_priv->wm.wm_mutex);
1561 intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1562 g4x_program_watermarks(dev_priv);
1563 mutex_unlock(&dev_priv->wm.wm_mutex);
1564 }
1565
1566 /* latency must be in 0.1us units. */
1567 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1568 unsigned int htotal,
1569 unsigned int width,
1570 unsigned int cpp,
1571 unsigned int latency)
1572 {
1573 unsigned int ret;
1574
1575 ret = intel_wm_method2(pixel_rate, htotal,
1576 width, cpp, latency);
1577 ret = DIV_ROUND_UP(ret, 64);
1578
1579 return ret;
1580 }
1581
1582 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1583 {
1584 /* all latencies in usec */
1585 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1586
1587 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1588
1589 if (IS_CHERRYVIEW(dev_priv)) {
1590 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1591 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1592
1593 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1594 }
1595 }
1596
1597 static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1598 const struct intel_plane_state *plane_state,
1599 int level)
1600 {
1601 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1602 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1603 const struct drm_display_mode *adjusted_mode =
1604 &crtc_state->base.adjusted_mode;
1605 int clock, htotal, cpp, width, wm;
1606
1607 if (dev_priv->wm.pri_latency[level] == 0)
1608 return USHRT_MAX;
1609
1610 if (!intel_wm_plane_visible(crtc_state, plane_state))
1611 return 0;
1612
1613 cpp = plane_state->base.fb->format->cpp[0];
1614 clock = adjusted_mode->crtc_clock;
1615 htotal = adjusted_mode->crtc_htotal;
1616 width = crtc_state->pipe_src_w;
1617
1618 if (plane->id == PLANE_CURSOR) {
1619 /*
1620 * FIXME the formula gives values that are
1621 * too big for the cursor FIFO, and hence we
1622 * would never be able to use cursors. For
1623 * now just hardcode the watermark.
1624 */
1625 wm = 63;
1626 } else {
1627 wm = vlv_wm_method2(clock, htotal, width, cpp,
1628 dev_priv->wm.pri_latency[level] * 10);
1629 }
1630
1631 return min_t(int, wm, USHRT_MAX);
1632 }
1633
1634 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1635 {
1636 return (active_planes & (BIT(PLANE_SPRITE0) |
1637 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1638 }
1639
1640 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1641 {
1642 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1643 const struct g4x_pipe_wm *raw =
1644 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1645 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1646 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1647 int num_active_planes = hweight32(active_planes);
1648 const int fifo_size = 511;
1649 int fifo_extra, fifo_left = fifo_size;
1650 int sprite0_fifo_extra = 0;
1651 unsigned int total_rate;
1652 enum plane_id plane_id;
1653
1654 /*
1655 * When enabling sprite0 after sprite1 has already been enabled
1656 * we tend to get an underrun unless sprite0 already has some
1657 * FIFO space allcoated. Hence we always allocate at least one
1658 * cacheline for sprite0 whenever sprite1 is enabled.
1659 *
1660 * All other plane enable sequences appear immune to this problem.
1661 */
1662 if (vlv_need_sprite0_fifo_workaround(active_planes))
1663 sprite0_fifo_extra = 1;
1664
1665 total_rate = raw->plane[PLANE_PRIMARY] +
1666 raw->plane[PLANE_SPRITE0] +
1667 raw->plane[PLANE_SPRITE1] +
1668 sprite0_fifo_extra;
1669
1670 if (total_rate > fifo_size)
1671 return -EINVAL;
1672
1673 if (total_rate == 0)
1674 total_rate = 1;
1675
1676 for_each_plane_id_on_crtc(crtc, plane_id) {
1677 unsigned int rate;
1678
1679 if ((active_planes & BIT(plane_id)) == 0) {
1680 fifo_state->plane[plane_id] = 0;
1681 continue;
1682 }
1683
1684 rate = raw->plane[plane_id];
1685 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1686 fifo_left -= fifo_state->plane[plane_id];
1687 }
1688
1689 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1690 fifo_left -= sprite0_fifo_extra;
1691
1692 fifo_state->plane[PLANE_CURSOR] = 63;
1693
1694 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1695
1696 /* spread the remainder evenly */
1697 for_each_plane_id_on_crtc(crtc, plane_id) {
1698 int plane_extra;
1699
1700 if (fifo_left == 0)
1701 break;
1702
1703 if ((active_planes & BIT(plane_id)) == 0)
1704 continue;
1705
1706 plane_extra = min(fifo_extra, fifo_left);
1707 fifo_state->plane[plane_id] += plane_extra;
1708 fifo_left -= plane_extra;
1709 }
1710
1711 WARN_ON(active_planes != 0 && fifo_left != 0);
1712
1713 /* give it all to the first plane if none are active */
1714 if (active_planes == 0) {
1715 WARN_ON(fifo_left != fifo_size);
1716 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1717 }
1718
1719 return 0;
1720 }
1721
1722 /* mark all levels starting from 'level' as invalid */
1723 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1724 struct vlv_wm_state *wm_state, int level)
1725 {
1726 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1727
1728 for (; level < intel_wm_num_levels(dev_priv); level++) {
1729 enum plane_id plane_id;
1730
1731 for_each_plane_id_on_crtc(crtc, plane_id)
1732 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1733
1734 wm_state->sr[level].cursor = USHRT_MAX;
1735 wm_state->sr[level].plane = USHRT_MAX;
1736 }
1737 }
1738
1739 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1740 {
1741 if (wm > fifo_size)
1742 return USHRT_MAX;
1743 else
1744 return fifo_size - wm;
1745 }
1746
1747 /*
1748 * Starting from 'level' set all higher
1749 * levels to 'value' in the "raw" watermarks.
1750 */
1751 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1752 int level, enum plane_id plane_id, u16 value)
1753 {
1754 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1755 int num_levels = intel_wm_num_levels(dev_priv);
1756 bool dirty = false;
1757
1758 for (; level < num_levels; level++) {
1759 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1760
1761 dirty |= raw->plane[plane_id] != value;
1762 raw->plane[plane_id] = value;
1763 }
1764
1765 return dirty;
1766 }
1767
1768 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1769 const struct intel_plane_state *plane_state)
1770 {
1771 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1772 enum plane_id plane_id = plane->id;
1773 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1774 int level;
1775 bool dirty = false;
1776
1777 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1778 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1779 goto out;
1780 }
1781
1782 for (level = 0; level < num_levels; level++) {
1783 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1784 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1785 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1786
1787 if (wm > max_wm)
1788 break;
1789
1790 dirty |= raw->plane[plane_id] != wm;
1791 raw->plane[plane_id] = wm;
1792 }
1793
1794 /* mark all higher levels as invalid */
1795 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1796
1797 out:
1798 if (dirty)
1799 DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1800 plane->base.name,
1801 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1802 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1803 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1804
1805 return dirty;
1806 }
1807
1808 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1809 enum plane_id plane_id, int level)
1810 {
1811 const struct g4x_pipe_wm *raw =
1812 &crtc_state->wm.vlv.raw[level];
1813 const struct vlv_fifo_state *fifo_state =
1814 &crtc_state->wm.vlv.fifo_state;
1815
1816 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1817 }
1818
1819 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1820 {
1821 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1822 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1823 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1824 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1825 }
1826
1827 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1828 {
1829 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1830 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1831 struct intel_atomic_state *state =
1832 to_intel_atomic_state(crtc_state->base.state);
1833 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1834 const struct vlv_fifo_state *fifo_state =
1835 &crtc_state->wm.vlv.fifo_state;
1836 int num_active_planes = hweight32(crtc_state->active_planes &
1837 ~BIT(PLANE_CURSOR));
1838 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
1839 const struct intel_plane_state *old_plane_state;
1840 const struct intel_plane_state *new_plane_state;
1841 struct intel_plane *plane;
1842 enum plane_id plane_id;
1843 int level, ret, i;
1844 unsigned int dirty = 0;
1845
1846 for_each_oldnew_intel_plane_in_state(state, plane,
1847 old_plane_state,
1848 new_plane_state, i) {
1849 if (new_plane_state->base.crtc != &crtc->base &&
1850 old_plane_state->base.crtc != &crtc->base)
1851 continue;
1852
1853 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1854 dirty |= BIT(plane->id);
1855 }
1856
1857 /*
1858 * DSPARB registers may have been reset due to the
1859 * power well being turned off. Make sure we restore
1860 * them to a consistent state even if no primary/sprite
1861 * planes are initially active.
1862 */
1863 if (needs_modeset)
1864 crtc_state->fifo_changed = true;
1865
1866 if (!dirty)
1867 return 0;
1868
1869 /* cursor changes don't warrant a FIFO recompute */
1870 if (dirty & ~BIT(PLANE_CURSOR)) {
1871 const struct intel_crtc_state *old_crtc_state =
1872 intel_atomic_get_old_crtc_state(state, crtc);
1873 const struct vlv_fifo_state *old_fifo_state =
1874 &old_crtc_state->wm.vlv.fifo_state;
1875
1876 ret = vlv_compute_fifo(crtc_state);
1877 if (ret)
1878 return ret;
1879
1880 if (needs_modeset ||
1881 memcmp(old_fifo_state, fifo_state,
1882 sizeof(*fifo_state)) != 0)
1883 crtc_state->fifo_changed = true;
1884 }
1885
1886 /* initially allow all levels */
1887 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1888 /*
1889 * Note that enabling cxsr with no primary/sprite planes
1890 * enabled can wedge the pipe. Hence we only allow cxsr
1891 * with exactly one enabled primary/sprite plane.
1892 */
1893 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1894
1895 for (level = 0; level < wm_state->num_levels; level++) {
1896 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1897 const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1898
1899 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1900 break;
1901
1902 for_each_plane_id_on_crtc(crtc, plane_id) {
1903 wm_state->wm[level].plane[plane_id] =
1904 vlv_invert_wm_value(raw->plane[plane_id],
1905 fifo_state->plane[plane_id]);
1906 }
1907
1908 wm_state->sr[level].plane =
1909 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1910 raw->plane[PLANE_SPRITE0],
1911 raw->plane[PLANE_SPRITE1]),
1912 sr_fifo_size);
1913
1914 wm_state->sr[level].cursor =
1915 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1916 63);
1917 }
1918
1919 if (level == 0)
1920 return -EINVAL;
1921
1922 /* limit to only levels we can actually handle */
1923 wm_state->num_levels = level;
1924
1925 /* invalidate the higher levels */
1926 vlv_invalidate_wms(crtc, wm_state, level);
1927
1928 return 0;
1929 }
1930
1931 #define VLV_FIFO(plane, value) \
1932 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1933
1934 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1935 struct intel_crtc_state *crtc_state)
1936 {
1937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1938 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1939 const struct vlv_fifo_state *fifo_state =
1940 &crtc_state->wm.vlv.fifo_state;
1941 int sprite0_start, sprite1_start, fifo_size;
1942
1943 if (!crtc_state->fifo_changed)
1944 return;
1945
1946 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1947 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1948 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1949
1950 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1951 WARN_ON(fifo_size != 511);
1952
1953 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1954
1955 /*
1956 * uncore.lock serves a double purpose here. It allows us to
1957 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1958 * it protects the DSPARB registers from getting clobbered by
1959 * parallel updates from multiple pipes.
1960 *
1961 * intel_pipe_update_start() has already disabled interrupts
1962 * for us, so a plain spin_lock() is sufficient here.
1963 */
1964 spin_lock(&dev_priv->uncore.lock);
1965
1966 switch (crtc->pipe) {
1967 uint32_t dsparb, dsparb2, dsparb3;
1968 case PIPE_A:
1969 dsparb = I915_READ_FW(DSPARB);
1970 dsparb2 = I915_READ_FW(DSPARB2);
1971
1972 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1973 VLV_FIFO(SPRITEB, 0xff));
1974 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1975 VLV_FIFO(SPRITEB, sprite1_start));
1976
1977 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1978 VLV_FIFO(SPRITEB_HI, 0x1));
1979 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1980 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1981
1982 I915_WRITE_FW(DSPARB, dsparb);
1983 I915_WRITE_FW(DSPARB2, dsparb2);
1984 break;
1985 case PIPE_B:
1986 dsparb = I915_READ_FW(DSPARB);
1987 dsparb2 = I915_READ_FW(DSPARB2);
1988
1989 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1990 VLV_FIFO(SPRITED, 0xff));
1991 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1992 VLV_FIFO(SPRITED, sprite1_start));
1993
1994 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1995 VLV_FIFO(SPRITED_HI, 0xff));
1996 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1997 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1998
1999 I915_WRITE_FW(DSPARB, dsparb);
2000 I915_WRITE_FW(DSPARB2, dsparb2);
2001 break;
2002 case PIPE_C:
2003 dsparb3 = I915_READ_FW(DSPARB3);
2004 dsparb2 = I915_READ_FW(DSPARB2);
2005
2006 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2007 VLV_FIFO(SPRITEF, 0xff));
2008 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2009 VLV_FIFO(SPRITEF, sprite1_start));
2010
2011 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2012 VLV_FIFO(SPRITEF_HI, 0xff));
2013 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2014 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2015
2016 I915_WRITE_FW(DSPARB3, dsparb3);
2017 I915_WRITE_FW(DSPARB2, dsparb2);
2018 break;
2019 default:
2020 break;
2021 }
2022
2023 POSTING_READ_FW(DSPARB);
2024
2025 spin_unlock(&dev_priv->uncore.lock);
2026 }
2027
2028 #undef VLV_FIFO
2029
2030 static int vlv_compute_intermediate_wm(struct drm_device *dev,
2031 struct intel_crtc *crtc,
2032 struct intel_crtc_state *crtc_state)
2033 {
2034 struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate;
2035 const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal;
2036 const struct vlv_wm_state *active = &crtc->wm.active.vlv;
2037 int level;
2038
2039 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2040 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2041 !crtc_state->disable_cxsr;
2042
2043 for (level = 0; level < intermediate->num_levels; level++) {
2044 enum plane_id plane_id;
2045
2046 for_each_plane_id_on_crtc(crtc, plane_id) {
2047 intermediate->wm[level].plane[plane_id] =
2048 min(optimal->wm[level].plane[plane_id],
2049 active->wm[level].plane[plane_id]);
2050 }
2051
2052 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2053 active->sr[level].plane);
2054 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2055 active->sr[level].cursor);
2056 }
2057
2058 vlv_invalidate_wms(crtc, intermediate, level);
2059
2060 /*
2061 * If our intermediate WM are identical to the final WM, then we can
2062 * omit the post-vblank programming; only update if it's different.
2063 */
2064 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2065 crtc_state->wm.need_postvbl_update = true;
2066
2067 return 0;
2068 }
2069
2070 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2071 struct vlv_wm_values *wm)
2072 {
2073 struct intel_crtc *crtc;
2074 int num_active_crtcs = 0;
2075
2076 wm->level = dev_priv->wm.max_level;
2077 wm->cxsr = true;
2078
2079 for_each_intel_crtc(&dev_priv->drm, crtc) {
2080 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2081
2082 if (!crtc->active)
2083 continue;
2084
2085 if (!wm_state->cxsr)
2086 wm->cxsr = false;
2087
2088 num_active_crtcs++;
2089 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2090 }
2091
2092 if (num_active_crtcs != 1)
2093 wm->cxsr = false;
2094
2095 if (num_active_crtcs > 1)
2096 wm->level = VLV_WM_LEVEL_PM2;
2097
2098 for_each_intel_crtc(&dev_priv->drm, crtc) {
2099 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2100 enum pipe pipe = crtc->pipe;
2101
2102 wm->pipe[pipe] = wm_state->wm[wm->level];
2103 if (crtc->active && wm->cxsr)
2104 wm->sr = wm_state->sr[wm->level];
2105
2106 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2107 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2108 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2109 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2110 }
2111 }
2112
2113 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2114 {
2115 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2116 struct vlv_wm_values new_wm = {};
2117
2118 vlv_merge_wm(dev_priv, &new_wm);
2119
2120 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2121 return;
2122
2123 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2124 chv_set_memory_dvfs(dev_priv, false);
2125
2126 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2127 chv_set_memory_pm5(dev_priv, false);
2128
2129 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2130 _intel_set_memory_cxsr(dev_priv, false);
2131
2132 vlv_write_wm_values(dev_priv, &new_wm);
2133
2134 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2135 _intel_set_memory_cxsr(dev_priv, true);
2136
2137 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2138 chv_set_memory_pm5(dev_priv, true);
2139
2140 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2141 chv_set_memory_dvfs(dev_priv, true);
2142
2143 *old_wm = new_wm;
2144 }
2145
2146 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2147 struct intel_crtc_state *crtc_state)
2148 {
2149 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2150 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2151
2152 mutex_lock(&dev_priv->wm.wm_mutex);
2153 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2154 vlv_program_watermarks(dev_priv);
2155 mutex_unlock(&dev_priv->wm.wm_mutex);
2156 }
2157
2158 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2159 struct intel_crtc_state *crtc_state)
2160 {
2161 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2162 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2163
2164 if (!crtc_state->wm.need_postvbl_update)
2165 return;
2166
2167 mutex_lock(&dev_priv->wm.wm_mutex);
2168 intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2169 vlv_program_watermarks(dev_priv);
2170 mutex_unlock(&dev_priv->wm.wm_mutex);
2171 }
2172
2173 static void i965_update_wm(struct intel_crtc *unused_crtc)
2174 {
2175 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2176 struct intel_crtc *crtc;
2177 int srwm = 1;
2178 int cursor_sr = 16;
2179 bool cxsr_enabled;
2180
2181 /* Calc sr entries for one plane configs */
2182 crtc = single_enabled_crtc(dev_priv);
2183 if (crtc) {
2184 /* self-refresh has much higher latency */
2185 static const int sr_latency_ns = 12000;
2186 const struct drm_display_mode *adjusted_mode =
2187 &crtc->config->base.adjusted_mode;
2188 const struct drm_framebuffer *fb =
2189 crtc->base.primary->state->fb;
2190 int clock = adjusted_mode->crtc_clock;
2191 int htotal = adjusted_mode->crtc_htotal;
2192 int hdisplay = crtc->config->pipe_src_w;
2193 int cpp = fb->format->cpp[0];
2194 int entries;
2195
2196 entries = intel_wm_method2(clock, htotal,
2197 hdisplay, cpp, sr_latency_ns / 100);
2198 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2199 srwm = I965_FIFO_SIZE - entries;
2200 if (srwm < 0)
2201 srwm = 1;
2202 srwm &= 0x1ff;
2203 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
2204 entries, srwm);
2205
2206 entries = intel_wm_method2(clock, htotal,
2207 crtc->base.cursor->state->crtc_w, 4,
2208 sr_latency_ns / 100);
2209 entries = DIV_ROUND_UP(entries,
2210 i965_cursor_wm_info.cacheline_size) +
2211 i965_cursor_wm_info.guard_size;
2212
2213 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2214 if (cursor_sr > i965_cursor_wm_info.max_wm)
2215 cursor_sr = i965_cursor_wm_info.max_wm;
2216
2217 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
2218 "cursor %d\n", srwm, cursor_sr);
2219
2220 cxsr_enabled = true;
2221 } else {
2222 cxsr_enabled = false;
2223 /* Turn off self refresh if both pipes are enabled */
2224 intel_set_memory_cxsr(dev_priv, false);
2225 }
2226
2227 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2228 srwm);
2229
2230 /* 965 has limitations... */
2231 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2232 FW_WM(8, CURSORB) |
2233 FW_WM(8, PLANEB) |
2234 FW_WM(8, PLANEA));
2235 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2236 FW_WM(8, PLANEC_OLD));
2237 /* update cursor SR watermark */
2238 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2239
2240 if (cxsr_enabled)
2241 intel_set_memory_cxsr(dev_priv, true);
2242 }
2243
2244 #undef FW_WM
2245
2246 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2247 {
2248 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2249 const struct intel_watermark_params *wm_info;
2250 uint32_t fwater_lo;
2251 uint32_t fwater_hi;
2252 int cwm, srwm = 1;
2253 int fifo_size;
2254 int planea_wm, planeb_wm;
2255 struct intel_crtc *crtc, *enabled = NULL;
2256
2257 if (IS_I945GM(dev_priv))
2258 wm_info = &i945_wm_info;
2259 else if (!IS_GEN2(dev_priv))
2260 wm_info = &i915_wm_info;
2261 else
2262 wm_info = &i830_a_wm_info;
2263
2264 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
2265 crtc = intel_get_crtc_for_plane(dev_priv, 0);
2266 if (intel_crtc_active(crtc)) {
2267 const struct drm_display_mode *adjusted_mode =
2268 &crtc->config->base.adjusted_mode;
2269 const struct drm_framebuffer *fb =
2270 crtc->base.primary->state->fb;
2271 int cpp;
2272
2273 if (IS_GEN2(dev_priv))
2274 cpp = 4;
2275 else
2276 cpp = fb->format->cpp[0];
2277
2278 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2279 wm_info, fifo_size, cpp,
2280 pessimal_latency_ns);
2281 enabled = crtc;
2282 } else {
2283 planea_wm = fifo_size - wm_info->guard_size;
2284 if (planea_wm > (long)wm_info->max_wm)
2285 planea_wm = wm_info->max_wm;
2286 }
2287
2288 if (IS_GEN2(dev_priv))
2289 wm_info = &i830_bc_wm_info;
2290
2291 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
2292 crtc = intel_get_crtc_for_plane(dev_priv, 1);
2293 if (intel_crtc_active(crtc)) {
2294 const struct drm_display_mode *adjusted_mode =
2295 &crtc->config->base.adjusted_mode;
2296 const struct drm_framebuffer *fb =
2297 crtc->base.primary->state->fb;
2298 int cpp;
2299
2300 if (IS_GEN2(dev_priv))
2301 cpp = 4;
2302 else
2303 cpp = fb->format->cpp[0];
2304
2305 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2306 wm_info, fifo_size, cpp,
2307 pessimal_latency_ns);
2308 if (enabled == NULL)
2309 enabled = crtc;
2310 else
2311 enabled = NULL;
2312 } else {
2313 planeb_wm = fifo_size - wm_info->guard_size;
2314 if (planeb_wm > (long)wm_info->max_wm)
2315 planeb_wm = wm_info->max_wm;
2316 }
2317
2318 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2319
2320 if (IS_I915GM(dev_priv) && enabled) {
2321 struct drm_i915_gem_object *obj;
2322
2323 obj = intel_fb_obj(enabled->base.primary->state->fb);
2324
2325 /* self-refresh seems busted with untiled */
2326 if (!i915_gem_object_is_tiled(obj))
2327 enabled = NULL;
2328 }
2329
2330 /*
2331 * Overlay gets an aggressive default since video jitter is bad.
2332 */
2333 cwm = 2;
2334
2335 /* Play safe and disable self-refresh before adjusting watermarks. */
2336 intel_set_memory_cxsr(dev_priv, false);
2337
2338 /* Calc sr entries for one plane configs */
2339 if (HAS_FW_BLC(dev_priv) && enabled) {
2340 /* self-refresh has much higher latency */
2341 static const int sr_latency_ns = 6000;
2342 const struct drm_display_mode *adjusted_mode =
2343 &enabled->config->base.adjusted_mode;
2344 const struct drm_framebuffer *fb =
2345 enabled->base.primary->state->fb;
2346 int clock = adjusted_mode->crtc_clock;
2347 int htotal = adjusted_mode->crtc_htotal;
2348 int hdisplay = enabled->config->pipe_src_w;
2349 int cpp;
2350 int entries;
2351
2352 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2353 cpp = 4;
2354 else
2355 cpp = fb->format->cpp[0];
2356
2357 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2358 sr_latency_ns / 100);
2359 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2360 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
2361 srwm = wm_info->fifo_size - entries;
2362 if (srwm < 0)
2363 srwm = 1;
2364
2365 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2366 I915_WRITE(FW_BLC_SELF,
2367 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2368 else
2369 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2370 }
2371
2372 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2373 planea_wm, planeb_wm, cwm, srwm);
2374
2375 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2376 fwater_hi = (cwm & 0x1f);
2377
2378 /* Set request length to 8 cachelines per fetch */
2379 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2380 fwater_hi = fwater_hi | (1 << 8);
2381
2382 I915_WRITE(FW_BLC, fwater_lo);
2383 I915_WRITE(FW_BLC2, fwater_hi);
2384
2385 if (enabled)
2386 intel_set_memory_cxsr(dev_priv, true);
2387 }
2388
2389 static void i845_update_wm(struct intel_crtc *unused_crtc)
2390 {
2391 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2392 struct intel_crtc *crtc;
2393 const struct drm_display_mode *adjusted_mode;
2394 uint32_t fwater_lo;
2395 int planea_wm;
2396
2397 crtc = single_enabled_crtc(dev_priv);
2398 if (crtc == NULL)
2399 return;
2400
2401 adjusted_mode = &crtc->config->base.adjusted_mode;
2402 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2403 &i845_wm_info,
2404 dev_priv->display.get_fifo_size(dev_priv, 0),
2405 4, pessimal_latency_ns);
2406 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2407 fwater_lo |= (3<<8) | planea_wm;
2408
2409 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2410
2411 I915_WRITE(FW_BLC, fwater_lo);
2412 }
2413
2414 /* latency must be in 0.1us units. */
2415 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2416 unsigned int cpp,
2417 unsigned int latency)
2418 {
2419 unsigned int ret;
2420
2421 ret = intel_wm_method1(pixel_rate, cpp, latency);
2422 ret = DIV_ROUND_UP(ret, 64) + 2;
2423
2424 return ret;
2425 }
2426
2427 /* latency must be in 0.1us units. */
2428 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2429 unsigned int htotal,
2430 unsigned int width,
2431 unsigned int cpp,
2432 unsigned int latency)
2433 {
2434 unsigned int ret;
2435
2436 ret = intel_wm_method2(pixel_rate, htotal,
2437 width, cpp, latency);
2438 ret = DIV_ROUND_UP(ret, 64) + 2;
2439
2440 return ret;
2441 }
2442
2443 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2444 uint8_t cpp)
2445 {
2446 /*
2447 * Neither of these should be possible since this function shouldn't be
2448 * called if the CRTC is off or the plane is invisible. But let's be
2449 * extra paranoid to avoid a potential divide-by-zero if we screw up
2450 * elsewhere in the driver.
2451 */
2452 if (WARN_ON(!cpp))
2453 return 0;
2454 if (WARN_ON(!horiz_pixels))
2455 return 0;
2456
2457 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2458 }
2459
2460 struct ilk_wm_maximums {
2461 uint16_t pri;
2462 uint16_t spr;
2463 uint16_t cur;
2464 uint16_t fbc;
2465 };
2466
2467 /*
2468 * For both WM_PIPE and WM_LP.
2469 * mem_value must be in 0.1us units.
2470 */
2471 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2472 const struct intel_plane_state *pstate,
2473 uint32_t mem_value,
2474 bool is_lp)
2475 {
2476 uint32_t method1, method2;
2477 int cpp;
2478
2479 if (!intel_wm_plane_visible(cstate, pstate))
2480 return 0;
2481
2482 cpp = pstate->base.fb->format->cpp[0];
2483
2484 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2485
2486 if (!is_lp)
2487 return method1;
2488
2489 method2 = ilk_wm_method2(cstate->pixel_rate,
2490 cstate->base.adjusted_mode.crtc_htotal,
2491 drm_rect_width(&pstate->base.dst),
2492 cpp, mem_value);
2493
2494 return min(method1, method2);
2495 }
2496
2497 /*
2498 * For both WM_PIPE and WM_LP.
2499 * mem_value must be in 0.1us units.
2500 */
2501 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2502 const struct intel_plane_state *pstate,
2503 uint32_t mem_value)
2504 {
2505 uint32_t method1, method2;
2506 int cpp;
2507
2508 if (!intel_wm_plane_visible(cstate, pstate))
2509 return 0;
2510
2511 cpp = pstate->base.fb->format->cpp[0];
2512
2513 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2514 method2 = ilk_wm_method2(cstate->pixel_rate,
2515 cstate->base.adjusted_mode.crtc_htotal,
2516 drm_rect_width(&pstate->base.dst),
2517 cpp, mem_value);
2518 return min(method1, method2);
2519 }
2520
2521 /*
2522 * For both WM_PIPE and WM_LP.
2523 * mem_value must be in 0.1us units.
2524 */
2525 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2526 const struct intel_plane_state *pstate,
2527 uint32_t mem_value)
2528 {
2529 int cpp;
2530
2531 if (!intel_wm_plane_visible(cstate, pstate))
2532 return 0;
2533
2534 cpp = pstate->base.fb->format->cpp[0];
2535
2536 return ilk_wm_method2(cstate->pixel_rate,
2537 cstate->base.adjusted_mode.crtc_htotal,
2538 pstate->base.crtc_w, cpp, mem_value);
2539 }
2540
2541 /* Only for WM_LP. */
2542 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
2543 const struct intel_plane_state *pstate,
2544 uint32_t pri_val)
2545 {
2546 int cpp;
2547
2548 if (!intel_wm_plane_visible(cstate, pstate))
2549 return 0;
2550
2551 cpp = pstate->base.fb->format->cpp[0];
2552
2553 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
2554 }
2555
2556 static unsigned int
2557 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2558 {
2559 if (INTEL_GEN(dev_priv) >= 8)
2560 return 3072;
2561 else if (INTEL_GEN(dev_priv) >= 7)
2562 return 768;
2563 else
2564 return 512;
2565 }
2566
2567 static unsigned int
2568 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2569 int level, bool is_sprite)
2570 {
2571 if (INTEL_GEN(dev_priv) >= 8)
2572 /* BDW primary/sprite plane watermarks */
2573 return level == 0 ? 255 : 2047;
2574 else if (INTEL_GEN(dev_priv) >= 7)
2575 /* IVB/HSW primary/sprite plane watermarks */
2576 return level == 0 ? 127 : 1023;
2577 else if (!is_sprite)
2578 /* ILK/SNB primary plane watermarks */
2579 return level == 0 ? 127 : 511;
2580 else
2581 /* ILK/SNB sprite plane watermarks */
2582 return level == 0 ? 63 : 255;
2583 }
2584
2585 static unsigned int
2586 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2587 {
2588 if (INTEL_GEN(dev_priv) >= 7)
2589 return level == 0 ? 63 : 255;
2590 else
2591 return level == 0 ? 31 : 63;
2592 }
2593
2594 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2595 {
2596 if (INTEL_GEN(dev_priv) >= 8)
2597 return 31;
2598 else
2599 return 15;
2600 }
2601
2602 /* Calculate the maximum primary/sprite plane watermark */
2603 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2604 int level,
2605 const struct intel_wm_config *config,
2606 enum intel_ddb_partitioning ddb_partitioning,
2607 bool is_sprite)
2608 {
2609 struct drm_i915_private *dev_priv = to_i915(dev);
2610 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2611
2612 /* if sprites aren't enabled, sprites get nothing */
2613 if (is_sprite && !config->sprites_enabled)
2614 return 0;
2615
2616 /* HSW allows LP1+ watermarks even with multiple pipes */
2617 if (level == 0 || config->num_pipes_active > 1) {
2618 fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
2619
2620 /*
2621 * For some reason the non self refresh
2622 * FIFO size is only half of the self
2623 * refresh FIFO size on ILK/SNB.
2624 */
2625 if (INTEL_GEN(dev_priv) <= 6)
2626 fifo_size /= 2;
2627 }
2628
2629 if (config->sprites_enabled) {
2630 /* level 0 is always calculated with 1:1 split */
2631 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2632 if (is_sprite)
2633 fifo_size *= 5;
2634 fifo_size /= 6;
2635 } else {
2636 fifo_size /= 2;
2637 }
2638 }
2639
2640 /* clamp to max that the registers can hold */
2641 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2642 }
2643
2644 /* Calculate the maximum cursor plane watermark */
2645 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2646 int level,
2647 const struct intel_wm_config *config)
2648 {
2649 /* HSW LP1+ watermarks w/ multiple pipes */
2650 if (level > 0 && config->num_pipes_active > 1)
2651 return 64;
2652
2653 /* otherwise just report max that registers can hold */
2654 return ilk_cursor_wm_reg_max(to_i915(dev), level);
2655 }
2656
2657 static void ilk_compute_wm_maximums(const struct drm_device *dev,
2658 int level,
2659 const struct intel_wm_config *config,
2660 enum intel_ddb_partitioning ddb_partitioning,
2661 struct ilk_wm_maximums *max)
2662 {
2663 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2664 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2665 max->cur = ilk_cursor_wm_max(dev, level, config);
2666 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
2667 }
2668
2669 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2670 int level,
2671 struct ilk_wm_maximums *max)
2672 {
2673 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2674 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2675 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2676 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2677 }
2678
2679 static bool ilk_validate_wm_level(int level,
2680 const struct ilk_wm_maximums *max,
2681 struct intel_wm_level *result)
2682 {
2683 bool ret;
2684
2685 /* already determined to be invalid? */
2686 if (!result->enable)
2687 return false;
2688
2689 result->enable = result->pri_val <= max->pri &&
2690 result->spr_val <= max->spr &&
2691 result->cur_val <= max->cur;
2692
2693 ret = result->enable;
2694
2695 /*
2696 * HACK until we can pre-compute everything,
2697 * and thus fail gracefully if LP0 watermarks
2698 * are exceeded...
2699 */
2700 if (level == 0 && !result->enable) {
2701 if (result->pri_val > max->pri)
2702 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2703 level, result->pri_val, max->pri);
2704 if (result->spr_val > max->spr)
2705 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2706 level, result->spr_val, max->spr);
2707 if (result->cur_val > max->cur)
2708 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2709 level, result->cur_val, max->cur);
2710
2711 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2712 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2713 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2714 result->enable = true;
2715 }
2716
2717 return ret;
2718 }
2719
2720 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2721 const struct intel_crtc *intel_crtc,
2722 int level,
2723 struct intel_crtc_state *cstate,
2724 struct intel_plane_state *pristate,
2725 struct intel_plane_state *sprstate,
2726 struct intel_plane_state *curstate,
2727 struct intel_wm_level *result)
2728 {
2729 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2730 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2731 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2732
2733 /* WM1+ latency values stored in 0.5us units */
2734 if (level > 0) {
2735 pri_latency *= 5;
2736 spr_latency *= 5;
2737 cur_latency *= 5;
2738 }
2739
2740 if (pristate) {
2741 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2742 pri_latency, level);
2743 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2744 }
2745
2746 if (sprstate)
2747 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2748
2749 if (curstate)
2750 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2751
2752 result->enable = true;
2753 }
2754
2755 static uint32_t
2756 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2757 {
2758 const struct intel_atomic_state *intel_state =
2759 to_intel_atomic_state(cstate->base.state);
2760 const struct drm_display_mode *adjusted_mode =
2761 &cstate->base.adjusted_mode;
2762 u32 linetime, ips_linetime;
2763
2764 if (!cstate->base.active)
2765 return 0;
2766 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2767 return 0;
2768 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2769 return 0;
2770
2771 /* The WM are computed with base on how long it takes to fill a single
2772 * row at the given clock rate, multiplied by 8.
2773 * */
2774 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2775 adjusted_mode->crtc_clock);
2776 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2777 intel_state->cdclk.logical.cdclk);
2778
2779 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2780 PIPE_WM_LINETIME_TIME(linetime);
2781 }
2782
2783 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2784 uint16_t wm[8])
2785 {
2786 if (INTEL_GEN(dev_priv) >= 9) {
2787 uint32_t val;
2788 int ret, i;
2789 int level, max_level = ilk_wm_max_level(dev_priv);
2790
2791 /* read the first set of memory latencies[0:3] */
2792 val = 0; /* data0 to be programmed to 0 for first set */
2793 mutex_lock(&dev_priv->rps.hw_lock);
2794 ret = sandybridge_pcode_read(dev_priv,
2795 GEN9_PCODE_READ_MEM_LATENCY,
2796 &val);
2797 mutex_unlock(&dev_priv->rps.hw_lock);
2798
2799 if (ret) {
2800 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2801 return;
2802 }
2803
2804 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2805 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2806 GEN9_MEM_LATENCY_LEVEL_MASK;
2807 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2808 GEN9_MEM_LATENCY_LEVEL_MASK;
2809 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2810 GEN9_MEM_LATENCY_LEVEL_MASK;
2811
2812 /* read the second set of memory latencies[4:7] */
2813 val = 1; /* data0 to be programmed to 1 for second set */
2814 mutex_lock(&dev_priv->rps.hw_lock);
2815 ret = sandybridge_pcode_read(dev_priv,
2816 GEN9_PCODE_READ_MEM_LATENCY,
2817 &val);
2818 mutex_unlock(&dev_priv->rps.hw_lock);
2819 if (ret) {
2820 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2821 return;
2822 }
2823
2824 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2825 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2826 GEN9_MEM_LATENCY_LEVEL_MASK;
2827 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2828 GEN9_MEM_LATENCY_LEVEL_MASK;
2829 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2830 GEN9_MEM_LATENCY_LEVEL_MASK;
2831
2832 /*
2833 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2834 * need to be disabled. We make sure to sanitize the values out
2835 * of the punit to satisfy this requirement.
2836 */
2837 for (level = 1; level <= max_level; level++) {
2838 if (wm[level] == 0) {
2839 for (i = level + 1; i <= max_level; i++)
2840 wm[i] = 0;
2841 break;
2842 }
2843 }
2844
2845 /*
2846 * WaWmMemoryReadLatency:skl+,glk
2847 *
2848 * punit doesn't take into account the read latency so we need
2849 * to add 2us to the various latency levels we retrieve from the
2850 * punit when level 0 response data us 0us.
2851 */
2852 if (wm[0] == 0) {
2853 wm[0] += 2;
2854 for (level = 1; level <= max_level; level++) {
2855 if (wm[level] == 0)
2856 break;
2857 wm[level] += 2;
2858 }
2859 }
2860
2861 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2862 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2863
2864 wm[0] = (sskpd >> 56) & 0xFF;
2865 if (wm[0] == 0)
2866 wm[0] = sskpd & 0xF;
2867 wm[1] = (sskpd >> 4) & 0xFF;
2868 wm[2] = (sskpd >> 12) & 0xFF;
2869 wm[3] = (sskpd >> 20) & 0x1FF;
2870 wm[4] = (sskpd >> 32) & 0x1FF;
2871 } else if (INTEL_GEN(dev_priv) >= 6) {
2872 uint32_t sskpd = I915_READ(MCH_SSKPD);
2873
2874 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2875 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2876 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2877 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2878 } else if (INTEL_GEN(dev_priv) >= 5) {
2879 uint32_t mltr = I915_READ(MLTR_ILK);
2880
2881 /* ILK primary LP0 latency is 700 ns */
2882 wm[0] = 7;
2883 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2884 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2885 } else {
2886 MISSING_CASE(INTEL_DEVID(dev_priv));
2887 }
2888 }
2889
2890 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2891 uint16_t wm[5])
2892 {
2893 /* ILK sprite LP0 latency is 1300 ns */
2894 if (IS_GEN5(dev_priv))
2895 wm[0] = 13;
2896 }
2897
2898 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2899 uint16_t wm[5])
2900 {
2901 /* ILK cursor LP0 latency is 1300 ns */
2902 if (IS_GEN5(dev_priv))
2903 wm[0] = 13;
2904
2905 /* WaDoubleCursorLP3Latency:ivb */
2906 if (IS_IVYBRIDGE(dev_priv))
2907 wm[3] *= 2;
2908 }
2909
2910 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2911 {
2912 /* how many WM levels are we expecting */
2913 if (INTEL_GEN(dev_priv) >= 9)
2914 return 7;
2915 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2916 return 4;
2917 else if (INTEL_GEN(dev_priv) >= 6)
2918 return 3;
2919 else
2920 return 2;
2921 }
2922
2923 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2924 const char *name,
2925 const uint16_t wm[8])
2926 {
2927 int level, max_level = ilk_wm_max_level(dev_priv);
2928
2929 for (level = 0; level <= max_level; level++) {
2930 unsigned int latency = wm[level];
2931
2932 if (latency == 0) {
2933 DRM_ERROR("%s WM%d latency not provided\n",
2934 name, level);
2935 continue;
2936 }
2937
2938 /*
2939 * - latencies are in us on gen9.
2940 * - before then, WM1+ latency values are in 0.5us units
2941 */
2942 if (INTEL_GEN(dev_priv) >= 9)
2943 latency *= 10;
2944 else if (level > 0)
2945 latency *= 5;
2946
2947 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2948 name, level, wm[level],
2949 latency / 10, latency % 10);
2950 }
2951 }
2952
2953 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2954 uint16_t wm[5], uint16_t min)
2955 {
2956 int level, max_level = ilk_wm_max_level(dev_priv);
2957
2958 if (wm[0] >= min)
2959 return false;
2960
2961 wm[0] = max(wm[0], min);
2962 for (level = 1; level <= max_level; level++)
2963 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2964
2965 return true;
2966 }
2967
2968 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2969 {
2970 bool changed;
2971
2972 /*
2973 * The BIOS provided WM memory latency values are often
2974 * inadequate for high resolution displays. Adjust them.
2975 */
2976 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2977 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2978 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2979
2980 if (!changed)
2981 return;
2982
2983 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2984 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2985 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2986 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2987 }
2988
2989 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
2990 {
2991 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
2992
2993 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2994 sizeof(dev_priv->wm.pri_latency));
2995 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2996 sizeof(dev_priv->wm.pri_latency));
2997
2998 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
2999 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3000
3001 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3002 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3003 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3004
3005 if (IS_GEN6(dev_priv))
3006 snb_wm_latency_quirk(dev_priv);
3007 }
3008
3009 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3010 {
3011 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3012 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3013 }
3014
3015 static bool ilk_validate_pipe_wm(struct drm_device *dev,
3016 struct intel_pipe_wm *pipe_wm)
3017 {
3018 /* LP0 watermark maximums depend on this pipe alone */
3019 const struct intel_wm_config config = {
3020 .num_pipes_active = 1,
3021 .sprites_enabled = pipe_wm->sprites_enabled,
3022 .sprites_scaled = pipe_wm->sprites_scaled,
3023 };
3024 struct ilk_wm_maximums max;
3025
3026 /* LP0 watermarks always use 1/2 DDB partitioning */
3027 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
3028
3029 /* At least LP0 must be valid */
3030 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3031 DRM_DEBUG_KMS("LP0 watermark invalid\n");
3032 return false;
3033 }
3034
3035 return true;
3036 }
3037
3038 /* Compute new watermarks for the pipe */
3039 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
3040 {
3041 struct drm_atomic_state *state = cstate->base.state;
3042 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3043 struct intel_pipe_wm *pipe_wm;
3044 struct drm_device *dev = state->dev;
3045 const struct drm_i915_private *dev_priv = to_i915(dev);
3046 struct intel_plane *intel_plane;
3047 struct intel_plane_state *pristate = NULL;
3048 struct intel_plane_state *sprstate = NULL;
3049 struct intel_plane_state *curstate = NULL;
3050 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3051 struct ilk_wm_maximums max;
3052
3053 pipe_wm = &cstate->wm.ilk.optimal;
3054
3055 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3056 struct intel_plane_state *ps;
3057
3058 ps = intel_atomic_get_existing_plane_state(state,
3059 intel_plane);
3060 if (!ps)
3061 continue;
3062
3063 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3064 pristate = ps;
3065 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3066 sprstate = ps;
3067 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
3068 curstate = ps;
3069 }
3070
3071 pipe_wm->pipe_enabled = cstate->base.active;
3072 if (sprstate) {
3073 pipe_wm->sprites_enabled = sprstate->base.visible;
3074 pipe_wm->sprites_scaled = sprstate->base.visible &&
3075 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
3076 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
3077 }
3078
3079 usable_level = max_level;
3080
3081 /* ILK/SNB: LP2+ watermarks only w/o sprites */
3082 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3083 usable_level = 1;
3084
3085 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3086 if (pipe_wm->sprites_scaled)
3087 usable_level = 0;
3088
3089 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
3090 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
3091
3092 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3093 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
3094
3095 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3096 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
3097
3098 if (!ilk_validate_pipe_wm(dev, pipe_wm))
3099 return -EINVAL;
3100
3101 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3102
3103 for (level = 1; level <= max_level; level++) {
3104 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
3105
3106 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
3107 pristate, sprstate, curstate, wm);
3108
3109 /*
3110 * Disable any watermark level that exceeds the
3111 * register maximums since such watermarks are
3112 * always invalid.
3113 */
3114 if (level > usable_level)
3115 continue;
3116
3117 if (ilk_validate_wm_level(level, &max, wm))
3118 pipe_wm->wm[level] = *wm;
3119 else
3120 usable_level = level;
3121 }
3122
3123 return 0;
3124 }
3125
3126 /*
3127 * Build a set of 'intermediate' watermark values that satisfy both the old
3128 * state and the new state. These can be programmed to the hardware
3129 * immediately.
3130 */
3131 static int ilk_compute_intermediate_wm(struct drm_device *dev,
3132 struct intel_crtc *intel_crtc,
3133 struct intel_crtc_state *newstate)
3134 {
3135 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3136 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
3137 int level, max_level = ilk_wm_max_level(to_i915(dev));
3138
3139 /*
3140 * Start with the final, target watermarks, then combine with the
3141 * currently active watermarks to get values that are safe both before
3142 * and after the vblank.
3143 */
3144 *a = newstate->wm.ilk.optimal;
3145 a->pipe_enabled |= b->pipe_enabled;
3146 a->sprites_enabled |= b->sprites_enabled;
3147 a->sprites_scaled |= b->sprites_scaled;
3148
3149 for (level = 0; level <= max_level; level++) {
3150 struct intel_wm_level *a_wm = &a->wm[level];
3151 const struct intel_wm_level *b_wm = &b->wm[level];
3152
3153 a_wm->enable &= b_wm->enable;
3154 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3155 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3156 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3157 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3158 }
3159
3160 /*
3161 * We need to make sure that these merged watermark values are
3162 * actually a valid configuration themselves. If they're not,
3163 * there's no safe way to transition from the old state to
3164 * the new state, so we need to fail the atomic transaction.
3165 */
3166 if (!ilk_validate_pipe_wm(dev, a))
3167 return -EINVAL;
3168
3169 /*
3170 * If our intermediate WM are identical to the final WM, then we can
3171 * omit the post-vblank programming; only update if it's different.
3172 */
3173 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3174 newstate->wm.need_postvbl_update = true;
3175
3176 return 0;
3177 }
3178
3179 /*
3180 * Merge the watermarks from all active pipes for a specific level.
3181 */
3182 static void ilk_merge_wm_level(struct drm_device *dev,
3183 int level,
3184 struct intel_wm_level *ret_wm)
3185 {
3186 const struct intel_crtc *intel_crtc;
3187
3188 ret_wm->enable = true;
3189
3190 for_each_intel_crtc(dev, intel_crtc) {
3191 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3192 const struct intel_wm_level *wm = &active->wm[level];
3193
3194 if (!active->pipe_enabled)
3195 continue;
3196
3197 /*
3198 * The watermark values may have been used in the past,
3199 * so we must maintain them in the registers for some
3200 * time even if the level is now disabled.
3201 */
3202 if (!wm->enable)
3203 ret_wm->enable = false;
3204
3205 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3206 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3207 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3208 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3209 }
3210 }
3211
3212 /*
3213 * Merge all low power watermarks for all active pipes.
3214 */
3215 static void ilk_wm_merge(struct drm_device *dev,
3216 const struct intel_wm_config *config,
3217 const struct ilk_wm_maximums *max,
3218 struct intel_pipe_wm *merged)
3219 {
3220 struct drm_i915_private *dev_priv = to_i915(dev);
3221 int level, max_level = ilk_wm_max_level(dev_priv);
3222 int last_enabled_level = max_level;
3223
3224 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3225 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3226 config->num_pipes_active > 1)
3227 last_enabled_level = 0;
3228
3229 /* ILK: FBC WM must be disabled always */
3230 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3231
3232 /* merge each WM1+ level */
3233 for (level = 1; level <= max_level; level++) {
3234 struct intel_wm_level *wm = &merged->wm[level];
3235
3236 ilk_merge_wm_level(dev, level, wm);
3237
3238 if (level > last_enabled_level)
3239 wm->enable = false;
3240 else if (!ilk_validate_wm_level(level, max, wm))
3241 /* make sure all following levels get disabled */
3242 last_enabled_level = level - 1;
3243
3244 /*
3245 * The spec says it is preferred to disable
3246 * FBC WMs instead of disabling a WM level.
3247 */
3248 if (wm->fbc_val > max->fbc) {
3249 if (wm->enable)
3250 merged->fbc_wm_enabled = false;
3251 wm->fbc_val = 0;
3252 }
3253 }
3254
3255 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3256 /*
3257 * FIXME this is racy. FBC might get enabled later.
3258 * What we should check here is whether FBC can be
3259 * enabled sometime later.
3260 */
3261 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
3262 intel_fbc_is_active(dev_priv)) {
3263 for (level = 2; level <= max_level; level++) {
3264 struct intel_wm_level *wm = &merged->wm[level];
3265
3266 wm->enable = false;
3267 }
3268 }
3269 }
3270
3271 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3272 {
3273 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3274 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3275 }
3276
3277 /* The value we need to program into the WM_LPx latency field */
3278 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
3279 {
3280 struct drm_i915_private *dev_priv = to_i915(dev);
3281
3282 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3283 return 2 * level;
3284 else
3285 return dev_priv->wm.pri_latency[level];
3286 }
3287
3288 static void ilk_compute_wm_results(struct drm_device *dev,
3289 const struct intel_pipe_wm *merged,
3290 enum intel_ddb_partitioning partitioning,
3291 struct ilk_wm_values *results)
3292 {
3293 struct drm_i915_private *dev_priv = to_i915(dev);
3294 struct intel_crtc *intel_crtc;
3295 int level, wm_lp;
3296
3297 results->enable_fbc_wm = merged->fbc_wm_enabled;
3298 results->partitioning = partitioning;
3299
3300 /* LP1+ register values */
3301 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3302 const struct intel_wm_level *r;
3303
3304 level = ilk_wm_lp_to_level(wm_lp, merged);
3305
3306 r = &merged->wm[level];
3307
3308 /*
3309 * Maintain the watermark values even if the level is
3310 * disabled. Doing otherwise could cause underruns.
3311 */
3312 results->wm_lp[wm_lp - 1] =
3313 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
3314 (r->pri_val << WM1_LP_SR_SHIFT) |
3315 r->cur_val;
3316
3317 if (r->enable)
3318 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3319
3320 if (INTEL_GEN(dev_priv) >= 8)
3321 results->wm_lp[wm_lp - 1] |=
3322 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3323 else
3324 results->wm_lp[wm_lp - 1] |=
3325 r->fbc_val << WM1_LP_FBC_SHIFT;
3326
3327 /*
3328 * Always set WM1S_LP_EN when spr_val != 0, even if the
3329 * level is disabled. Doing otherwise could cause underruns.
3330 */
3331 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3332 WARN_ON(wm_lp != 1);
3333 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3334 } else
3335 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3336 }
3337
3338 /* LP0 register values */
3339 for_each_intel_crtc(dev, intel_crtc) {
3340 enum pipe pipe = intel_crtc->pipe;
3341 const struct intel_wm_level *r =
3342 &intel_crtc->wm.active.ilk.wm[0];
3343
3344 if (WARN_ON(!r->enable))
3345 continue;
3346
3347 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3348
3349 results->wm_pipe[pipe] =
3350 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3351 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3352 r->cur_val;
3353 }
3354 }
3355
3356 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3357 * case both are at the same level. Prefer r1 in case they're the same. */
3358 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
3359 struct intel_pipe_wm *r1,
3360 struct intel_pipe_wm *r2)
3361 {
3362 int level, max_level = ilk_wm_max_level(to_i915(dev));
3363 int level1 = 0, level2 = 0;
3364
3365 for (level = 1; level <= max_level; level++) {
3366 if (r1->wm[level].enable)
3367 level1 = level;
3368 if (r2->wm[level].enable)
3369 level2 = level;
3370 }
3371
3372 if (level1 == level2) {
3373 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3374 return r2;
3375 else
3376 return r1;
3377 } else if (level1 > level2) {
3378 return r1;
3379 } else {
3380 return r2;
3381 }
3382 }
3383
3384 /* dirty bits used to track which watermarks need changes */
3385 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3386 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3387 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3388 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3389 #define WM_DIRTY_FBC (1 << 24)
3390 #define WM_DIRTY_DDB (1 << 25)
3391
3392 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3393 const struct ilk_wm_values *old,
3394 const struct ilk_wm_values *new)
3395 {
3396 unsigned int dirty = 0;
3397 enum pipe pipe;
3398 int wm_lp;
3399
3400 for_each_pipe(dev_priv, pipe) {
3401 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
3402 dirty |= WM_DIRTY_LINETIME(pipe);
3403 /* Must disable LP1+ watermarks too */
3404 dirty |= WM_DIRTY_LP_ALL;
3405 }
3406
3407 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3408 dirty |= WM_DIRTY_PIPE(pipe);
3409 /* Must disable LP1+ watermarks too */
3410 dirty |= WM_DIRTY_LP_ALL;
3411 }
3412 }
3413
3414 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3415 dirty |= WM_DIRTY_FBC;
3416 /* Must disable LP1+ watermarks too */
3417 dirty |= WM_DIRTY_LP_ALL;
3418 }
3419
3420 if (old->partitioning != new->partitioning) {
3421 dirty |= WM_DIRTY_DDB;
3422 /* Must disable LP1+ watermarks too */
3423 dirty |= WM_DIRTY_LP_ALL;
3424 }
3425
3426 /* LP1+ watermarks already deemed dirty, no need to continue */
3427 if (dirty & WM_DIRTY_LP_ALL)
3428 return dirty;
3429
3430 /* Find the lowest numbered LP1+ watermark in need of an update... */
3431 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3432 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3433 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3434 break;
3435 }
3436
3437 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3438 for (; wm_lp <= 3; wm_lp++)
3439 dirty |= WM_DIRTY_LP(wm_lp);
3440
3441 return dirty;
3442 }
3443
3444 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3445 unsigned int dirty)
3446 {
3447 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3448 bool changed = false;
3449
3450 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3451 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3452 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3453 changed = true;
3454 }
3455 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3456 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3457 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3458 changed = true;
3459 }
3460 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3461 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3462 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3463 changed = true;
3464 }
3465
3466 /*
3467 * Don't touch WM1S_LP_EN here.
3468 * Doing so could cause underruns.
3469 */
3470
3471 return changed;
3472 }
3473
3474 /*
3475 * The spec says we shouldn't write when we don't need, because every write
3476 * causes WMs to be re-evaluated, expending some power.
3477 */
3478 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3479 struct ilk_wm_values *results)
3480 {
3481 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3482 unsigned int dirty;
3483 uint32_t val;
3484
3485 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3486 if (!dirty)
3487 return;
3488
3489 _ilk_disable_lp_wm(dev_priv, dirty);
3490
3491 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3492 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3493 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3494 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3495 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3496 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3497
3498 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3499 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3500 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3501 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3502 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3503 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
3504
3505 if (dirty & WM_DIRTY_DDB) {
3506 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3507 val = I915_READ(WM_MISC);
3508 if (results->partitioning == INTEL_DDB_PART_1_2)
3509 val &= ~WM_MISC_DATA_PARTITION_5_6;
3510 else
3511 val |= WM_MISC_DATA_PARTITION_5_6;
3512 I915_WRITE(WM_MISC, val);
3513 } else {
3514 val = I915_READ(DISP_ARB_CTL2);
3515 if (results->partitioning == INTEL_DDB_PART_1_2)
3516 val &= ~DISP_DATA_PARTITION_5_6;
3517 else
3518 val |= DISP_DATA_PARTITION_5_6;
3519 I915_WRITE(DISP_ARB_CTL2, val);
3520 }
3521 }
3522
3523 if (dirty & WM_DIRTY_FBC) {
3524 val = I915_READ(DISP_ARB_CTL);
3525 if (results->enable_fbc_wm)
3526 val &= ~DISP_FBC_WM_DIS;
3527 else
3528 val |= DISP_FBC_WM_DIS;
3529 I915_WRITE(DISP_ARB_CTL, val);
3530 }
3531
3532 if (dirty & WM_DIRTY_LP(1) &&
3533 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3534 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3535
3536 if (INTEL_GEN(dev_priv) >= 7) {
3537 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3538 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3539 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3540 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3541 }
3542
3543 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3544 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3545 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3546 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3547 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3548 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3549
3550 dev_priv->wm.hw = *results;
3551 }
3552
3553 bool ilk_disable_lp_wm(struct drm_device *dev)
3554 {
3555 struct drm_i915_private *dev_priv = to_i915(dev);
3556
3557 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3558 }
3559
3560 /*
3561 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3562 * so assume we'll always need it in order to avoid underruns.
3563 */
3564 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
3565 {
3566 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3567
3568 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
3569 return true;
3570
3571 return false;
3572 }
3573
3574 static bool
3575 intel_has_sagv(struct drm_i915_private *dev_priv)
3576 {
3577 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
3578 IS_CANNONLAKE(dev_priv))
3579 return true;
3580
3581 if (IS_SKYLAKE(dev_priv) &&
3582 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
3583 return true;
3584
3585 return false;
3586 }
3587
3588 /*
3589 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3590 * depending on power and performance requirements. The display engine access
3591 * to system memory is blocked during the adjustment time. Because of the
3592 * blocking time, having this enabled can cause full system hangs and/or pipe
3593 * underruns if we don't meet all of the following requirements:
3594 *
3595 * - <= 1 pipe enabled
3596 * - All planes can enable watermarks for latencies >= SAGV engine block time
3597 * - We're not using an interlaced display configuration
3598 */
3599 int
3600 intel_enable_sagv(struct drm_i915_private *dev_priv)
3601 {
3602 int ret;
3603
3604 if (!intel_has_sagv(dev_priv))
3605 return 0;
3606
3607 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3608 return 0;
3609
3610 DRM_DEBUG_KMS("Enabling the SAGV\n");
3611 mutex_lock(&dev_priv->rps.hw_lock);
3612
3613 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3614 GEN9_SAGV_ENABLE);
3615
3616 /* We don't need to wait for the SAGV when enabling */
3617 mutex_unlock(&dev_priv->rps.hw_lock);
3618
3619 /*
3620 * Some skl systems, pre-release machines in particular,
3621 * don't actually have an SAGV.
3622 */
3623 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3624 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3625 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3626 return 0;
3627 } else if (ret < 0) {
3628 DRM_ERROR("Failed to enable the SAGV\n");
3629 return ret;
3630 }
3631
3632 dev_priv->sagv_status = I915_SAGV_ENABLED;
3633 return 0;
3634 }
3635
3636 int
3637 intel_disable_sagv(struct drm_i915_private *dev_priv)
3638 {
3639 int ret;
3640
3641 if (!intel_has_sagv(dev_priv))
3642 return 0;
3643
3644 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3645 return 0;
3646
3647 DRM_DEBUG_KMS("Disabling the SAGV\n");
3648 mutex_lock(&dev_priv->rps.hw_lock);
3649
3650 /* bspec says to keep retrying for at least 1 ms */
3651 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3652 GEN9_SAGV_DISABLE,
3653 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3654 1);
3655 mutex_unlock(&dev_priv->rps.hw_lock);
3656
3657 /*
3658 * Some skl systems, pre-release machines in particular,
3659 * don't actually have an SAGV.
3660 */
3661 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3662 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3663 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3664 return 0;
3665 } else if (ret < 0) {
3666 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
3667 return ret;
3668 }
3669
3670 dev_priv->sagv_status = I915_SAGV_DISABLED;
3671 return 0;
3672 }
3673
3674 bool intel_can_enable_sagv(struct drm_atomic_state *state)
3675 {
3676 struct drm_device *dev = state->dev;
3677 struct drm_i915_private *dev_priv = to_i915(dev);
3678 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3679 struct intel_crtc *crtc;
3680 struct intel_plane *plane;
3681 struct intel_crtc_state *cstate;
3682 enum pipe pipe;
3683 int level, latency;
3684 int sagv_block_time_us = IS_GEN9(dev_priv) ? 30 : 20;
3685
3686 if (!intel_has_sagv(dev_priv))
3687 return false;
3688
3689 /*
3690 * SKL+ workaround: bspec recommends we disable the SAGV when we have
3691 * more then one pipe enabled
3692 *
3693 * If there are no active CRTCs, no additional checks need be performed
3694 */
3695 if (hweight32(intel_state->active_crtcs) == 0)
3696 return true;
3697 else if (hweight32(intel_state->active_crtcs) > 1)
3698 return false;
3699
3700 /* Since we're now guaranteed to only have one active CRTC... */
3701 pipe = ffs(intel_state->active_crtcs) - 1;
3702 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3703 cstate = to_intel_crtc_state(crtc->base.state);
3704
3705 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3706 return false;
3707
3708 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3709 struct skl_plane_wm *wm =
3710 &cstate->wm.skl.optimal.planes[plane->id];
3711
3712 /* Skip this plane if it's not enabled */
3713 if (!wm->wm[0].plane_en)
3714 continue;
3715
3716 /* Find the highest enabled wm level for this plane */
3717 for (level = ilk_wm_max_level(dev_priv);
3718 !wm->wm[level].plane_en; --level)
3719 { }
3720
3721 latency = dev_priv->wm.skl_latency[level];
3722
3723 if (skl_needs_memory_bw_wa(intel_state) &&
3724 plane->base.state->fb->modifier ==
3725 I915_FORMAT_MOD_X_TILED)
3726 latency += 15;
3727
3728 /*
3729 * If any of the planes on this pipe don't enable wm levels that
3730 * incur memory latencies higher than sagv_block_time_us we
3731 * can't enable the SAGV.
3732 */
3733 if (latency < sagv_block_time_us)
3734 return false;
3735 }
3736
3737 return true;
3738 }
3739
3740 static void
3741 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3742 const struct intel_crtc_state *cstate,
3743 struct skl_ddb_entry *alloc, /* out */
3744 int *num_active /* out */)
3745 {
3746 struct drm_atomic_state *state = cstate->base.state;
3747 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3748 struct drm_i915_private *dev_priv = to_i915(dev);
3749 struct drm_crtc *for_crtc = cstate->base.crtc;
3750 unsigned int pipe_size, ddb_size;
3751 int nth_active_pipe;
3752
3753 if (WARN_ON(!state) || !cstate->base.active) {
3754 alloc->start = 0;
3755 alloc->end = 0;
3756 *num_active = hweight32(dev_priv->active_crtcs);
3757 return;
3758 }
3759
3760 if (intel_state->active_pipe_changes)
3761 *num_active = hweight32(intel_state->active_crtcs);
3762 else
3763 *num_active = hweight32(dev_priv->active_crtcs);
3764
3765 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3766 WARN_ON(ddb_size == 0);
3767
3768 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3769
3770 /*
3771 * If the state doesn't change the active CRTC's, then there's
3772 * no need to recalculate; the existing pipe allocation limits
3773 * should remain unchanged. Note that we're safe from racing
3774 * commits since any racing commit that changes the active CRTC
3775 * list would need to grab _all_ crtc locks, including the one
3776 * we currently hold.
3777 */
3778 if (!intel_state->active_pipe_changes) {
3779 /*
3780 * alloc may be cleared by clear_intel_crtc_state,
3781 * copy from old state to be sure
3782 */
3783 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3784 return;
3785 }
3786
3787 nth_active_pipe = hweight32(intel_state->active_crtcs &
3788 (drm_crtc_mask(for_crtc) - 1));
3789 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3790 alloc->start = nth_active_pipe * ddb_size / *num_active;
3791 alloc->end = alloc->start + pipe_size;
3792 }
3793
3794 static unsigned int skl_cursor_allocation(int num_active)
3795 {
3796 if (num_active == 1)
3797 return 32;
3798
3799 return 8;
3800 }
3801
3802 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3803 {
3804 entry->start = reg & 0x3ff;
3805 entry->end = (reg >> 16) & 0x3ff;
3806 if (entry->end)
3807 entry->end += 1;
3808 }
3809
3810 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3811 struct skl_ddb_allocation *ddb /* out */)
3812 {
3813 struct intel_crtc *crtc;
3814
3815 memset(ddb, 0, sizeof(*ddb));
3816
3817 for_each_intel_crtc(&dev_priv->drm, crtc) {
3818 enum intel_display_power_domain power_domain;
3819 enum plane_id plane_id;
3820 enum pipe pipe = crtc->pipe;
3821
3822 power_domain = POWER_DOMAIN_PIPE(pipe);
3823 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3824 continue;
3825
3826 for_each_plane_id_on_crtc(crtc, plane_id) {
3827 u32 val;
3828
3829 if (plane_id != PLANE_CURSOR)
3830 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3831 else
3832 val = I915_READ(CUR_BUF_CFG(pipe));
3833
3834 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
3835 }
3836
3837 intel_display_power_put(dev_priv, power_domain);
3838 }
3839 }
3840
3841 /*
3842 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3843 * The bspec defines downscale amount as:
3844 *
3845 * """
3846 * Horizontal down scale amount = maximum[1, Horizontal source size /
3847 * Horizontal destination size]
3848 * Vertical down scale amount = maximum[1, Vertical source size /
3849 * Vertical destination size]
3850 * Total down scale amount = Horizontal down scale amount *
3851 * Vertical down scale amount
3852 * """
3853 *
3854 * Return value is provided in 16.16 fixed point form to retain fractional part.
3855 * Caller should take care of dividing & rounding off the value.
3856 */
3857 static uint_fixed_16_16_t
3858 skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
3859 const struct intel_plane_state *pstate)
3860 {
3861 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
3862 uint32_t src_w, src_h, dst_w, dst_h;
3863 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
3864 uint_fixed_16_16_t downscale_h, downscale_w;
3865
3866 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
3867 return u32_to_fixed16(0);
3868
3869 /* n.b., src is 16.16 fixed point, dst is whole integer */
3870 if (plane->id == PLANE_CURSOR) {
3871 /*
3872 * Cursors only support 0/180 degree rotation,
3873 * hence no need to account for rotation here.
3874 */
3875 src_w = pstate->base.src_w >> 16;
3876 src_h = pstate->base.src_h >> 16;
3877 dst_w = pstate->base.crtc_w;
3878 dst_h = pstate->base.crtc_h;
3879 } else {
3880 /*
3881 * Src coordinates are already rotated by 270 degrees for
3882 * the 90/270 degree plane rotation cases (to match the
3883 * GTT mapping), hence no need to account for rotation here.
3884 */
3885 src_w = drm_rect_width(&pstate->base.src) >> 16;
3886 src_h = drm_rect_height(&pstate->base.src) >> 16;
3887 dst_w = drm_rect_width(&pstate->base.dst);
3888 dst_h = drm_rect_height(&pstate->base.dst);
3889 }
3890
3891 fp_w_ratio = div_fixed16(src_w, dst_w);
3892 fp_h_ratio = div_fixed16(src_h, dst_h);
3893 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
3894 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
3895
3896 return mul_fixed16(downscale_w, downscale_h);
3897 }
3898
3899 static uint_fixed_16_16_t
3900 skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
3901 {
3902 uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
3903
3904 if (!crtc_state->base.enable)
3905 return pipe_downscale;
3906
3907 if (crtc_state->pch_pfit.enabled) {
3908 uint32_t src_w, src_h, dst_w, dst_h;
3909 uint32_t pfit_size = crtc_state->pch_pfit.size;
3910 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
3911 uint_fixed_16_16_t downscale_h, downscale_w;
3912
3913 src_w = crtc_state->pipe_src_w;
3914 src_h = crtc_state->pipe_src_h;
3915 dst_w = pfit_size >> 16;
3916 dst_h = pfit_size & 0xffff;
3917
3918 if (!dst_w || !dst_h)
3919 return pipe_downscale;
3920
3921 fp_w_ratio = div_fixed16(src_w, dst_w);
3922 fp_h_ratio = div_fixed16(src_h, dst_h);
3923 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
3924 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
3925
3926 pipe_downscale = mul_fixed16(downscale_w, downscale_h);
3927 }
3928
3929 return pipe_downscale;
3930 }
3931
3932 int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
3933 struct intel_crtc_state *cstate)
3934 {
3935 struct drm_crtc_state *crtc_state = &cstate->base;
3936 struct drm_atomic_state *state = crtc_state->state;
3937 struct drm_plane *plane;
3938 const struct drm_plane_state *pstate;
3939 struct intel_plane_state *intel_pstate;
3940 int crtc_clock, dotclk;
3941 uint32_t pipe_max_pixel_rate;
3942 uint_fixed_16_16_t pipe_downscale;
3943 uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
3944
3945 if (!cstate->base.enable)
3946 return 0;
3947
3948 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
3949 uint_fixed_16_16_t plane_downscale;
3950 uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
3951 int bpp;
3952
3953 if (!intel_wm_plane_visible(cstate,
3954 to_intel_plane_state(pstate)))
3955 continue;
3956
3957 if (WARN_ON(!pstate->fb))
3958 return -EINVAL;
3959
3960 intel_pstate = to_intel_plane_state(pstate);
3961 plane_downscale = skl_plane_downscale_amount(cstate,
3962 intel_pstate);
3963 bpp = pstate->fb->format->cpp[0] * 8;
3964 if (bpp == 64)
3965 plane_downscale = mul_fixed16(plane_downscale,
3966 fp_9_div_8);
3967
3968 max_downscale = max_fixed16(plane_downscale, max_downscale);
3969 }
3970 pipe_downscale = skl_pipe_downscale_amount(cstate);
3971
3972 pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
3973
3974 crtc_clock = crtc_state->adjusted_mode.crtc_clock;
3975 dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
3976
3977 if (IS_GEMINILAKE(to_i915(intel_crtc->base.dev)))
3978 dotclk *= 2;
3979
3980 pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
3981
3982 if (pipe_max_pixel_rate < crtc_clock) {
3983 DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
3984 return -EINVAL;
3985 }
3986
3987 return 0;
3988 }
3989
3990 static unsigned int
3991 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3992 const struct drm_plane_state *pstate,
3993 int y)
3994 {
3995 struct intel_plane *plane = to_intel_plane(pstate->plane);
3996 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3997 uint32_t data_rate;
3998 uint32_t width = 0, height = 0;
3999 struct drm_framebuffer *fb;
4000 u32 format;
4001 uint_fixed_16_16_t down_scale_amount;
4002
4003 if (!intel_pstate->base.visible)
4004 return 0;
4005
4006 fb = pstate->fb;
4007 format = fb->format->format;
4008
4009 if (plane->id == PLANE_CURSOR)
4010 return 0;
4011 if (y && format != DRM_FORMAT_NV12)
4012 return 0;
4013
4014 /*
4015 * Src coordinates are already rotated by 270 degrees for
4016 * the 90/270 degree plane rotation cases (to match the
4017 * GTT mapping), hence no need to account for rotation here.
4018 */
4019 width = drm_rect_width(&intel_pstate->base.src) >> 16;
4020 height = drm_rect_height(&intel_pstate->base.src) >> 16;
4021
4022 /* for planar format */
4023 if (format == DRM_FORMAT_NV12) {
4024 if (y) /* y-plane data rate */
4025 data_rate = width * height *
4026 fb->format->cpp[0];
4027 else /* uv-plane data rate */
4028 data_rate = (width / 2) * (height / 2) *
4029 fb->format->cpp[1];
4030 } else {
4031 /* for packed formats */
4032 data_rate = width * height * fb->format->cpp[0];
4033 }
4034
4035 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
4036
4037 return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4038 }
4039
4040 /*
4041 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
4042 * a 8192x4096@32bpp framebuffer:
4043 * 3 * 4096 * 8192 * 4 < 2^32
4044 */
4045 static unsigned int
4046 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4047 unsigned *plane_data_rate,
4048 unsigned *plane_y_data_rate)
4049 {
4050 struct drm_crtc_state *cstate = &intel_cstate->base;
4051 struct drm_atomic_state *state = cstate->state;
4052 struct drm_plane *plane;
4053 const struct drm_plane_state *pstate;
4054 unsigned int total_data_rate = 0;
4055
4056 if (WARN_ON(!state))
4057 return 0;
4058
4059 /* Calculate and cache data rate for each plane */
4060 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4061 enum plane_id plane_id = to_intel_plane(plane)->id;
4062 unsigned int rate;
4063
4064 /* packed/uv */
4065 rate = skl_plane_relative_data_rate(intel_cstate,
4066 pstate, 0);
4067 plane_data_rate[plane_id] = rate;
4068
4069 total_data_rate += rate;
4070
4071 /* y-plane */
4072 rate = skl_plane_relative_data_rate(intel_cstate,
4073 pstate, 1);
4074 plane_y_data_rate[plane_id] = rate;
4075
4076 total_data_rate += rate;
4077 }
4078
4079 return total_data_rate;
4080 }
4081
4082 static uint16_t
4083 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
4084 const int y)
4085 {
4086 struct drm_framebuffer *fb = pstate->fb;
4087 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
4088 uint32_t src_w, src_h;
4089 uint32_t min_scanlines = 8;
4090 uint8_t plane_bpp;
4091
4092 if (WARN_ON(!fb))
4093 return 0;
4094
4095 /* For packed formats, no y-plane, return 0 */
4096 if (y && fb->format->format != DRM_FORMAT_NV12)
4097 return 0;
4098
4099 /* For Non Y-tile return 8-blocks */
4100 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
4101 fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
4102 fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
4103 fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
4104 return 8;
4105
4106 /*
4107 * Src coordinates are already rotated by 270 degrees for
4108 * the 90/270 degree plane rotation cases (to match the
4109 * GTT mapping), hence no need to account for rotation here.
4110 */
4111 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
4112 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
4113
4114 /* Halve UV plane width and height for NV12 */
4115 if (fb->format->format == DRM_FORMAT_NV12 && !y) {
4116 src_w /= 2;
4117 src_h /= 2;
4118 }
4119
4120 if (fb->format->format == DRM_FORMAT_NV12 && !y)
4121 plane_bpp = fb->format->cpp[1];
4122 else
4123 plane_bpp = fb->format->cpp[0];
4124
4125 if (drm_rotation_90_or_270(pstate->rotation)) {
4126 switch (plane_bpp) {
4127 case 1:
4128 min_scanlines = 32;
4129 break;
4130 case 2:
4131 min_scanlines = 16;
4132 break;
4133 case 4:
4134 min_scanlines = 8;
4135 break;
4136 case 8:
4137 min_scanlines = 4;
4138 break;
4139 default:
4140 WARN(1, "Unsupported pixel depth %u for rotation",
4141 plane_bpp);
4142 min_scanlines = 32;
4143 }
4144 }
4145
4146 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
4147 }
4148
4149 static void
4150 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
4151 uint16_t *minimum, uint16_t *y_minimum)
4152 {
4153 const struct drm_plane_state *pstate;
4154 struct drm_plane *plane;
4155
4156 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
4157 enum plane_id plane_id = to_intel_plane(plane)->id;
4158
4159 if (plane_id == PLANE_CURSOR)
4160 continue;
4161
4162 if (!pstate->visible)
4163 continue;
4164
4165 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
4166 y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4167 }
4168
4169 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
4170 }
4171
4172 static int
4173 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4174 struct skl_ddb_allocation *ddb /* out */)
4175 {
4176 struct drm_atomic_state *state = cstate->base.state;
4177 struct drm_crtc *crtc = cstate->base.crtc;
4178 struct drm_device *dev = crtc->dev;
4179 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4180 enum pipe pipe = intel_crtc->pipe;
4181 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4182 uint16_t alloc_size, start;
4183 uint16_t minimum[I915_MAX_PLANES] = {};
4184 uint16_t y_minimum[I915_MAX_PLANES] = {};
4185 unsigned int total_data_rate;
4186 enum plane_id plane_id;
4187 int num_active;
4188 unsigned plane_data_rate[I915_MAX_PLANES] = {};
4189 unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
4190 uint16_t total_min_blocks = 0;
4191
4192 /* Clear the partitioning for disabled planes. */
4193 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
4194 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
4195
4196 if (WARN_ON(!state))
4197 return 0;
4198
4199 if (!cstate->base.active) {
4200 alloc->start = alloc->end = 0;
4201 return 0;
4202 }
4203
4204 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
4205 alloc_size = skl_ddb_entry_size(alloc);
4206 if (alloc_size == 0)
4207 return 0;
4208
4209 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
4210
4211 /*
4212 * 1. Allocate the mininum required blocks for each active plane
4213 * and allocate the cursor, it doesn't require extra allocation
4214 * proportional to the data rate.
4215 */
4216
4217 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4218 total_min_blocks += minimum[plane_id];
4219 total_min_blocks += y_minimum[plane_id];
4220 }
4221
4222 if (total_min_blocks > alloc_size) {
4223 DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
4224 DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks,
4225 alloc_size);
4226 return -EINVAL;
4227 }
4228
4229 alloc_size -= total_min_blocks;
4230 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
4231 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
4232
4233 /*
4234 * 2. Distribute the remaining space in proportion to the amount of
4235 * data each plane needs to fetch from memory.
4236 *
4237 * FIXME: we may not allocate every single block here.
4238 */
4239 total_data_rate = skl_get_total_relative_data_rate(cstate,
4240 plane_data_rate,
4241 plane_y_data_rate);
4242 if (total_data_rate == 0)
4243 return 0;
4244
4245 start = alloc->start;
4246 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4247 unsigned int data_rate, y_data_rate;
4248 uint16_t plane_blocks, y_plane_blocks = 0;
4249
4250 if (plane_id == PLANE_CURSOR)
4251 continue;
4252
4253 data_rate = plane_data_rate[plane_id];
4254
4255 /*
4256 * allocation for (packed formats) or (uv-plane part of planar format):
4257 * promote the expression to 64 bits to avoid overflowing, the
4258 * result is < available as data_rate / total_data_rate < 1
4259 */
4260 plane_blocks = minimum[plane_id];
4261 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
4262 total_data_rate);
4263
4264 /* Leave disabled planes at (0,0) */
4265 if (data_rate) {
4266 ddb->plane[pipe][plane_id].start = start;
4267 ddb->plane[pipe][plane_id].end = start + plane_blocks;
4268 }
4269
4270 start += plane_blocks;
4271
4272 /*
4273 * allocation for y_plane part of planar format:
4274 */
4275 y_data_rate = plane_y_data_rate[plane_id];
4276
4277 y_plane_blocks = y_minimum[plane_id];
4278 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
4279 total_data_rate);
4280
4281 if (y_data_rate) {
4282 ddb->y_plane[pipe][plane_id].start = start;
4283 ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
4284 }
4285
4286 start += y_plane_blocks;
4287 }
4288
4289 return 0;
4290 }
4291
4292 /*
4293 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4294 * for the read latency) and cpp should always be <= 8, so that
4295 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4296 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4297 */
4298 static uint_fixed_16_16_t
4299 skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
4300 uint8_t cpp, uint32_t latency)
4301 {
4302 uint32_t wm_intermediate_val;
4303 uint_fixed_16_16_t ret;
4304
4305 if (latency == 0)
4306 return FP_16_16_MAX;
4307
4308 wm_intermediate_val = latency * pixel_rate * cpp;
4309 ret = div_fixed16(wm_intermediate_val, 1000 * 512);
4310
4311 if (INTEL_GEN(dev_priv) >= 10)
4312 ret = add_fixed16_u32(ret, 1);
4313
4314 return ret;
4315 }
4316
4317 static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
4318 uint32_t pipe_htotal,
4319 uint32_t latency,
4320 uint_fixed_16_16_t plane_blocks_per_line)
4321 {
4322 uint32_t wm_intermediate_val;
4323 uint_fixed_16_16_t ret;
4324
4325 if (latency == 0)
4326 return FP_16_16_MAX;
4327
4328 wm_intermediate_val = latency * pixel_rate;
4329 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
4330 pipe_htotal * 1000);
4331 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
4332 return ret;
4333 }
4334
4335 static uint_fixed_16_16_t
4336 intel_get_linetime_us(struct intel_crtc_state *cstate)
4337 {
4338 uint32_t pixel_rate;
4339 uint32_t crtc_htotal;
4340 uint_fixed_16_16_t linetime_us;
4341
4342 if (!cstate->base.active)
4343 return u32_to_fixed16(0);
4344
4345 pixel_rate = cstate->pixel_rate;
4346
4347 if (WARN_ON(pixel_rate == 0))
4348 return u32_to_fixed16(0);
4349
4350 crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
4351 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
4352
4353 return linetime_us;
4354 }
4355
4356 static uint32_t
4357 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4358 const struct intel_plane_state *pstate)
4359 {
4360 uint64_t adjusted_pixel_rate;
4361 uint_fixed_16_16_t downscale_amount;
4362
4363 /* Shouldn't reach here on disabled planes... */
4364 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4365 return 0;
4366
4367 /*
4368 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4369 * with additional adjustments for plane-specific scaling.
4370 */
4371 adjusted_pixel_rate = cstate->pixel_rate;
4372 downscale_amount = skl_plane_downscale_amount(cstate, pstate);
4373
4374 return mul_round_up_u32_fixed16(adjusted_pixel_rate,
4375 downscale_amount);
4376 }
4377
4378 static int
4379 skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
4380 struct intel_crtc_state *cstate,
4381 const struct intel_plane_state *intel_pstate,
4382 struct skl_wm_params *wp)
4383 {
4384 struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
4385 const struct drm_plane_state *pstate = &intel_pstate->base;
4386 const struct drm_framebuffer *fb = pstate->fb;
4387 uint32_t interm_pbpl;
4388 struct intel_atomic_state *state =
4389 to_intel_atomic_state(cstate->base.state);
4390 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4391
4392 if (!intel_wm_plane_visible(cstate, intel_pstate))
4393 return 0;
4394
4395 wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
4396 fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
4397 fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4398 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4399 wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
4400 wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4401 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4402
4403 if (plane->id == PLANE_CURSOR) {
4404 wp->width = intel_pstate->base.crtc_w;
4405 } else {
4406 /*
4407 * Src coordinates are already rotated by 270 degrees for
4408 * the 90/270 degree plane rotation cases (to match the
4409 * GTT mapping), hence no need to account for rotation here.
4410 */
4411 wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
4412 }
4413
4414 wp->cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
4415 fb->format->cpp[0];
4416 wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
4417 intel_pstate);
4418
4419 if (drm_rotation_90_or_270(pstate->rotation)) {
4420
4421 switch (wp->cpp) {
4422 case 1:
4423 wp->y_min_scanlines = 16;
4424 break;
4425 case 2:
4426 wp->y_min_scanlines = 8;
4427 break;
4428 case 4:
4429 wp->y_min_scanlines = 4;
4430 break;
4431 default:
4432 MISSING_CASE(wp->cpp);
4433 return -EINVAL;
4434 }
4435 } else {
4436 wp->y_min_scanlines = 4;
4437 }
4438
4439 if (apply_memory_bw_wa)
4440 wp->y_min_scanlines *= 2;
4441
4442 wp->plane_bytes_per_line = wp->width * wp->cpp;
4443 if (wp->y_tiled) {
4444 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
4445 wp->y_min_scanlines, 512);
4446
4447 if (INTEL_GEN(dev_priv) >= 10)
4448 interm_pbpl++;
4449
4450 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
4451 wp->y_min_scanlines);
4452 } else if (wp->x_tiled && IS_GEN9(dev_priv)) {
4453 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512);
4454 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4455 } else {
4456 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512) + 1;
4457 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4458 }
4459
4460 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
4461 wp->plane_blocks_per_line);
4462 wp->linetime_us = fixed16_to_u32_round_up(
4463 intel_get_linetime_us(cstate));
4464
4465 return 0;
4466 }
4467
4468 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4469 struct intel_crtc_state *cstate,
4470 const struct intel_plane_state *intel_pstate,
4471 uint16_t ddb_allocation,
4472 int level,
4473 const struct skl_wm_params *wp,
4474 uint16_t *out_blocks, /* out */
4475 uint8_t *out_lines, /* out */
4476 bool *enabled /* out */)
4477 {
4478 const struct drm_plane_state *pstate = &intel_pstate->base;
4479 uint32_t latency = dev_priv->wm.skl_latency[level];
4480 uint_fixed_16_16_t method1, method2;
4481 uint_fixed_16_16_t selected_result;
4482 uint32_t res_blocks, res_lines;
4483 struct intel_atomic_state *state =
4484 to_intel_atomic_state(cstate->base.state);
4485 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4486
4487 if (latency == 0 ||
4488 !intel_wm_plane_visible(cstate, intel_pstate)) {
4489 *enabled = false;
4490 return 0;
4491 }
4492
4493 /* Display WA #1141: kbl,cfl */
4494 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
4495 IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
4496 dev_priv->ipc_enabled)
4497 latency += 4;
4498
4499 if (apply_memory_bw_wa && wp->x_tiled)
4500 latency += 15;
4501
4502 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
4503 wp->cpp, latency);
4504 method2 = skl_wm_method2(wp->plane_pixel_rate,
4505 cstate->base.adjusted_mode.crtc_htotal,
4506 latency,
4507 wp->plane_blocks_per_line);
4508
4509 if (wp->y_tiled) {
4510 selected_result = max_fixed16(method2, wp->y_tile_minimum);
4511 } else {
4512 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
4513 512 < 1) && (wp->plane_bytes_per_line / 512 < 1))
4514 selected_result = method2;
4515 else if (ddb_allocation >=
4516 fixed16_to_u32_round_up(wp->plane_blocks_per_line))
4517 selected_result = min_fixed16(method1, method2);
4518 else if (latency >= wp->linetime_us)
4519 selected_result = min_fixed16(method1, method2);
4520 else
4521 selected_result = method1;
4522 }
4523
4524 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
4525 res_lines = div_round_up_fixed16(selected_result,
4526 wp->plane_blocks_per_line);
4527
4528 /* Display WA #1125: skl,bxt,kbl,glk */
4529 if (level == 0 && wp->rc_surface)
4530 res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
4531
4532 /* Display WA #1126: skl,bxt,kbl,glk */
4533 if (level >= 1 && level <= 7) {
4534 if (wp->y_tiled) {
4535 res_blocks += fixed16_to_u32_round_up(
4536 wp->y_tile_minimum);
4537 res_lines += wp->y_min_scanlines;
4538 } else {
4539 res_blocks++;
4540 }
4541 }
4542
4543 if (res_blocks >= ddb_allocation || res_lines > 31) {
4544 *enabled = false;
4545
4546 /*
4547 * If there are no valid level 0 watermarks, then we can't
4548 * support this display configuration.
4549 */
4550 if (level) {
4551 return 0;
4552 } else {
4553 struct drm_plane *plane = pstate->plane;
4554
4555 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
4556 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
4557 plane->base.id, plane->name,
4558 res_blocks, ddb_allocation, res_lines);
4559 return -EINVAL;
4560 }
4561 }
4562
4563 *out_blocks = res_blocks;
4564 *out_lines = res_lines;
4565 *enabled = true;
4566
4567 return 0;
4568 }
4569
4570 static int
4571 skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4572 struct skl_ddb_allocation *ddb,
4573 struct intel_crtc_state *cstate,
4574 const struct intel_plane_state *intel_pstate,
4575 const struct skl_wm_params *wm_params,
4576 struct skl_plane_wm *wm)
4577 {
4578 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4579 struct drm_plane *plane = intel_pstate->base.plane;
4580 struct intel_plane *intel_plane = to_intel_plane(plane);
4581 uint16_t ddb_blocks;
4582 enum pipe pipe = intel_crtc->pipe;
4583 int level, max_level = ilk_wm_max_level(dev_priv);
4584 int ret;
4585
4586 if (WARN_ON(!intel_pstate->base.fb))
4587 return -EINVAL;
4588
4589 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
4590
4591 for (level = 0; level <= max_level; level++) {
4592 struct skl_wm_level *result = &wm->wm[level];
4593
4594 ret = skl_compute_plane_wm(dev_priv,
4595 cstate,
4596 intel_pstate,
4597 ddb_blocks,
4598 level,
4599 wm_params,
4600 &result->plane_res_b,
4601 &result->plane_res_l,
4602 &result->plane_en);
4603 if (ret)
4604 return ret;
4605 }
4606
4607 return 0;
4608 }
4609
4610 static uint32_t
4611 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
4612 {
4613 struct drm_atomic_state *state = cstate->base.state;
4614 struct drm_i915_private *dev_priv = to_i915(state->dev);
4615 uint_fixed_16_16_t linetime_us;
4616 uint32_t linetime_wm;
4617
4618 linetime_us = intel_get_linetime_us(cstate);
4619
4620 if (is_fixed16_zero(linetime_us))
4621 return 0;
4622
4623 linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
4624
4625 /* Display WA #1135: bxt:ALL GLK:ALL */
4626 if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) &&
4627 dev_priv->ipc_enabled)
4628 linetime_wm /= 2;
4629
4630 return linetime_wm;
4631 }
4632
4633 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4634 struct skl_wm_params *wp,
4635 struct skl_wm_level *wm_l0,
4636 uint16_t ddb_allocation,
4637 struct skl_wm_level *trans_wm /* out */)
4638 {
4639 struct drm_device *dev = cstate->base.crtc->dev;
4640 const struct drm_i915_private *dev_priv = to_i915(dev);
4641 uint16_t trans_min, trans_y_tile_min;
4642 const uint16_t trans_amount = 10; /* This is configurable amount */
4643 uint16_t trans_offset_b, res_blocks;
4644
4645 if (!cstate->base.active)
4646 goto exit;
4647
4648 /* Transition WM are not recommended by HW team for GEN9 */
4649 if (INTEL_GEN(dev_priv) <= 9)
4650 goto exit;
4651
4652 /* Transition WM don't make any sense if ipc is disabled */
4653 if (!dev_priv->ipc_enabled)
4654 goto exit;
4655
4656 if (INTEL_GEN(dev_priv) >= 10)
4657 trans_min = 4;
4658
4659 trans_offset_b = trans_min + trans_amount;
4660
4661 if (wp->y_tiled) {
4662 trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
4663 wp->y_tile_minimum);
4664 res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
4665 trans_offset_b;
4666 } else {
4667 res_blocks = wm_l0->plane_res_b + trans_offset_b;
4668
4669 /* WA BUG:1938466 add one block for non y-tile planes */
4670 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
4671 res_blocks += 1;
4672
4673 }
4674
4675 res_blocks += 1;
4676
4677 if (res_blocks < ddb_allocation) {
4678 trans_wm->plane_res_b = res_blocks;
4679 trans_wm->plane_en = true;
4680 return;
4681 }
4682
4683 exit:
4684 trans_wm->plane_en = false;
4685 }
4686
4687 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4688 struct skl_ddb_allocation *ddb,
4689 struct skl_pipe_wm *pipe_wm)
4690 {
4691 struct drm_device *dev = cstate->base.crtc->dev;
4692 struct drm_crtc_state *crtc_state = &cstate->base;
4693 const struct drm_i915_private *dev_priv = to_i915(dev);
4694 struct drm_plane *plane;
4695 const struct drm_plane_state *pstate;
4696 struct skl_plane_wm *wm;
4697 int ret;
4698
4699 /*
4700 * We'll only calculate watermarks for planes that are actually
4701 * enabled, so make sure all other planes are set as disabled.
4702 */
4703 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
4704
4705 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
4706 const struct intel_plane_state *intel_pstate =
4707 to_intel_plane_state(pstate);
4708 enum plane_id plane_id = to_intel_plane(plane)->id;
4709 struct skl_wm_params wm_params;
4710 enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
4711 uint16_t ddb_blocks;
4712
4713 wm = &pipe_wm->planes[plane_id];
4714 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
4715 memset(&wm_params, 0, sizeof(struct skl_wm_params));
4716
4717 ret = skl_compute_plane_wm_params(dev_priv, cstate,
4718 intel_pstate, &wm_params);
4719 if (ret)
4720 return ret;
4721
4722 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4723 intel_pstate, &wm_params, wm);
4724 if (ret)
4725 return ret;
4726 skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
4727 ddb_blocks, &wm->trans_wm);
4728 }
4729 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
4730
4731 return 0;
4732 }
4733
4734 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
4735 i915_reg_t reg,
4736 const struct skl_ddb_entry *entry)
4737 {
4738 if (entry->end)
4739 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
4740 else
4741 I915_WRITE(reg, 0);
4742 }
4743
4744 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
4745 i915_reg_t reg,
4746 const struct skl_wm_level *level)
4747 {
4748 uint32_t val = 0;
4749
4750 if (level->plane_en) {
4751 val |= PLANE_WM_EN;
4752 val |= level->plane_res_b;
4753 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
4754 }
4755
4756 I915_WRITE(reg, val);
4757 }
4758
4759 static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
4760 const struct skl_plane_wm *wm,
4761 const struct skl_ddb_allocation *ddb,
4762 enum plane_id plane_id)
4763 {
4764 struct drm_crtc *crtc = &intel_crtc->base;
4765 struct drm_device *dev = crtc->dev;
4766 struct drm_i915_private *dev_priv = to_i915(dev);
4767 int level, max_level = ilk_wm_max_level(dev_priv);
4768 enum pipe pipe = intel_crtc->pipe;
4769
4770 for (level = 0; level <= max_level; level++) {
4771 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
4772 &wm->wm[level]);
4773 }
4774 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
4775 &wm->trans_wm);
4776
4777 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
4778 &ddb->plane[pipe][plane_id]);
4779 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
4780 &ddb->y_plane[pipe][plane_id]);
4781 }
4782
4783 static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
4784 const struct skl_plane_wm *wm,
4785 const struct skl_ddb_allocation *ddb)
4786 {
4787 struct drm_crtc *crtc = &intel_crtc->base;
4788 struct drm_device *dev = crtc->dev;
4789 struct drm_i915_private *dev_priv = to_i915(dev);
4790 int level, max_level = ilk_wm_max_level(dev_priv);
4791 enum pipe pipe = intel_crtc->pipe;
4792
4793 for (level = 0; level <= max_level; level++) {
4794 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
4795 &wm->wm[level]);
4796 }
4797 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
4798
4799 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
4800 &ddb->plane[pipe][PLANE_CURSOR]);
4801 }
4802
4803 bool skl_wm_level_equals(const struct skl_wm_level *l1,
4804 const struct skl_wm_level *l2)
4805 {
4806 if (l1->plane_en != l2->plane_en)
4807 return false;
4808
4809 /* If both planes aren't enabled, the rest shouldn't matter */
4810 if (!l1->plane_en)
4811 return true;
4812
4813 return (l1->plane_res_l == l2->plane_res_l &&
4814 l1->plane_res_b == l2->plane_res_b);
4815 }
4816
4817 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
4818 const struct skl_ddb_entry *b)
4819 {
4820 return a->start < b->end && b->start < a->end;
4821 }
4822
4823 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
4824 const struct skl_ddb_entry *ddb,
4825 int ignore)
4826 {
4827 int i;
4828
4829 for (i = 0; i < I915_MAX_PIPES; i++)
4830 if (i != ignore && entries[i] &&
4831 skl_ddb_entries_overlap(ddb, entries[i]))
4832 return true;
4833
4834 return false;
4835 }
4836
4837 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
4838 const struct skl_pipe_wm *old_pipe_wm,
4839 struct skl_pipe_wm *pipe_wm, /* out */
4840 struct skl_ddb_allocation *ddb, /* out */
4841 bool *changed /* out */)
4842 {
4843 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
4844 int ret;
4845
4846 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
4847 if (ret)
4848 return ret;
4849
4850 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
4851 *changed = false;
4852 else
4853 *changed = true;
4854
4855 return 0;
4856 }
4857
4858 static uint32_t
4859 pipes_modified(struct drm_atomic_state *state)
4860 {
4861 struct drm_crtc *crtc;
4862 struct drm_crtc_state *cstate;
4863 uint32_t i, ret = 0;
4864
4865 for_each_new_crtc_in_state(state, crtc, cstate, i)
4866 ret |= drm_crtc_mask(crtc);
4867
4868 return ret;
4869 }
4870
4871 static int
4872 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
4873 {
4874 struct drm_atomic_state *state = cstate->base.state;
4875 struct drm_device *dev = state->dev;
4876 struct drm_crtc *crtc = cstate->base.crtc;
4877 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4878 struct drm_i915_private *dev_priv = to_i915(dev);
4879 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4880 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4881 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
4882 struct drm_plane_state *plane_state;
4883 struct drm_plane *plane;
4884 enum pipe pipe = intel_crtc->pipe;
4885
4886 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
4887
4888 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
4889 enum plane_id plane_id = to_intel_plane(plane)->id;
4890
4891 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
4892 &new_ddb->plane[pipe][plane_id]) &&
4893 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
4894 &new_ddb->y_plane[pipe][plane_id]))
4895 continue;
4896
4897 plane_state = drm_atomic_get_plane_state(state, plane);
4898 if (IS_ERR(plane_state))
4899 return PTR_ERR(plane_state);
4900 }
4901
4902 return 0;
4903 }
4904
4905 static int
4906 skl_compute_ddb(struct drm_atomic_state *state)
4907 {
4908 struct drm_device *dev = state->dev;
4909 struct drm_i915_private *dev_priv = to_i915(dev);
4910 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4911 struct intel_crtc *intel_crtc;
4912 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
4913 uint32_t realloc_pipes = pipes_modified(state);
4914 int ret;
4915
4916 /*
4917 * If this is our first atomic update following hardware readout,
4918 * we can't trust the DDB that the BIOS programmed for us. Let's
4919 * pretend that all pipes switched active status so that we'll
4920 * ensure a full DDB recompute.
4921 */
4922 if (dev_priv->wm.distrust_bios_wm) {
4923 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4924 state->acquire_ctx);
4925 if (ret)
4926 return ret;
4927
4928 intel_state->active_pipe_changes = ~0;
4929
4930 /*
4931 * We usually only initialize intel_state->active_crtcs if we
4932 * we're doing a modeset; make sure this field is always
4933 * initialized during the sanitization process that happens
4934 * on the first commit too.
4935 */
4936 if (!intel_state->modeset)
4937 intel_state->active_crtcs = dev_priv->active_crtcs;
4938 }
4939
4940 /*
4941 * If the modeset changes which CRTC's are active, we need to
4942 * recompute the DDB allocation for *all* active pipes, even
4943 * those that weren't otherwise being modified in any way by this
4944 * atomic commit. Due to the shrinking of the per-pipe allocations
4945 * when new active CRTC's are added, it's possible for a pipe that
4946 * we were already using and aren't changing at all here to suddenly
4947 * become invalid if its DDB needs exceeds its new allocation.
4948 *
4949 * Note that if we wind up doing a full DDB recompute, we can't let
4950 * any other display updates race with this transaction, so we need
4951 * to grab the lock on *all* CRTC's.
4952 */
4953 if (intel_state->active_pipe_changes) {
4954 realloc_pipes = ~0;
4955 intel_state->wm_results.dirty_pipes = ~0;
4956 }
4957
4958 /*
4959 * We're not recomputing for the pipes not included in the commit, so
4960 * make sure we start with the current state.
4961 */
4962 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4963
4964 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4965 struct intel_crtc_state *cstate;
4966
4967 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
4968 if (IS_ERR(cstate))
4969 return PTR_ERR(cstate);
4970
4971 ret = skl_allocate_pipe_ddb(cstate, ddb);
4972 if (ret)
4973 return ret;
4974
4975 ret = skl_ddb_add_affected_planes(cstate);
4976 if (ret)
4977 return ret;
4978 }
4979
4980 return 0;
4981 }
4982
4983 static void
4984 skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4985 struct skl_wm_values *src,
4986 enum pipe pipe)
4987 {
4988 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4989 sizeof(dst->ddb.y_plane[pipe]));
4990 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4991 sizeof(dst->ddb.plane[pipe]));
4992 }
4993
4994 static void
4995 skl_print_wm_changes(const struct drm_atomic_state *state)
4996 {
4997 const struct drm_device *dev = state->dev;
4998 const struct drm_i915_private *dev_priv = to_i915(dev);
4999 const struct intel_atomic_state *intel_state =
5000 to_intel_atomic_state(state);
5001 const struct drm_crtc *crtc;
5002 const struct drm_crtc_state *cstate;
5003 const struct intel_plane *intel_plane;
5004 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
5005 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
5006 int i;
5007
5008 for_each_new_crtc_in_state(state, crtc, cstate, i) {
5009 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5010 enum pipe pipe = intel_crtc->pipe;
5011
5012 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
5013 enum plane_id plane_id = intel_plane->id;
5014 const struct skl_ddb_entry *old, *new;
5015
5016 old = &old_ddb->plane[pipe][plane_id];
5017 new = &new_ddb->plane[pipe][plane_id];
5018
5019 if (skl_ddb_entry_equal(old, new))
5020 continue;
5021
5022 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
5023 intel_plane->base.base.id,
5024 intel_plane->base.name,
5025 old->start, old->end,
5026 new->start, new->end);
5027 }
5028 }
5029 }
5030
5031 static int
5032 skl_compute_wm(struct drm_atomic_state *state)
5033 {
5034 struct drm_crtc *crtc;
5035 struct drm_crtc_state *cstate;
5036 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5037 struct skl_wm_values *results = &intel_state->wm_results;
5038 struct drm_device *dev = state->dev;
5039 struct skl_pipe_wm *pipe_wm;
5040 bool changed = false;
5041 int ret, i;
5042
5043 /*
5044 * When we distrust bios wm we always need to recompute to set the
5045 * expected DDB allocations for each CRTC.
5046 */
5047 if (to_i915(dev)->wm.distrust_bios_wm)
5048 changed = true;
5049
5050 /*
5051 * If this transaction isn't actually touching any CRTC's, don't
5052 * bother with watermark calculation. Note that if we pass this
5053 * test, we're guaranteed to hold at least one CRTC state mutex,
5054 * which means we can safely use values like dev_priv->active_crtcs
5055 * since any racing commits that want to update them would need to
5056 * hold _all_ CRTC state mutexes.
5057 */
5058 for_each_new_crtc_in_state(state, crtc, cstate, i)
5059 changed = true;
5060
5061 if (!changed)
5062 return 0;
5063
5064 /* Clear all dirty flags */
5065 results->dirty_pipes = 0;
5066
5067 ret = skl_compute_ddb(state);
5068 if (ret)
5069 return ret;
5070
5071 /*
5072 * Calculate WM's for all pipes that are part of this transaction.
5073 * Note that the DDB allocation above may have added more CRTC's that
5074 * weren't otherwise being modified (and set bits in dirty_pipes) if
5075 * pipe allocations had to change.
5076 *
5077 * FIXME: Now that we're doing this in the atomic check phase, we
5078 * should allow skl_update_pipe_wm() to return failure in cases where
5079 * no suitable watermark values can be found.
5080 */
5081 for_each_new_crtc_in_state(state, crtc, cstate, i) {
5082 struct intel_crtc_state *intel_cstate =
5083 to_intel_crtc_state(cstate);
5084 const struct skl_pipe_wm *old_pipe_wm =
5085 &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
5086
5087 pipe_wm = &intel_cstate->wm.skl.optimal;
5088 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
5089 &results->ddb, &changed);
5090 if (ret)
5091 return ret;
5092
5093 if (changed)
5094 results->dirty_pipes |= drm_crtc_mask(crtc);
5095
5096 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
5097 /* This pipe's WM's did not change */
5098 continue;
5099
5100 intel_cstate->update_wm_pre = true;
5101 }
5102
5103 skl_print_wm_changes(state);
5104
5105 return 0;
5106 }
5107
5108 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
5109 struct intel_crtc_state *cstate)
5110 {
5111 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
5112 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5113 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
5114 const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
5115 enum pipe pipe = crtc->pipe;
5116 enum plane_id plane_id;
5117
5118 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
5119 return;
5120
5121 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
5122
5123 for_each_plane_id_on_crtc(crtc, plane_id) {
5124 if (plane_id != PLANE_CURSOR)
5125 skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
5126 ddb, plane_id);
5127 else
5128 skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
5129 ddb);
5130 }
5131 }
5132
5133 static void skl_initial_wm(struct intel_atomic_state *state,
5134 struct intel_crtc_state *cstate)
5135 {
5136 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5137 struct drm_device *dev = intel_crtc->base.dev;
5138 struct drm_i915_private *dev_priv = to_i915(dev);
5139 struct skl_wm_values *results = &state->wm_results;
5140 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
5141 enum pipe pipe = intel_crtc->pipe;
5142
5143 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
5144 return;
5145
5146 mutex_lock(&dev_priv->wm.wm_mutex);
5147
5148 if (cstate->base.active_changed)
5149 skl_atomic_update_crtc_wm(state, cstate);
5150
5151 skl_copy_wm_for_pipe(hw_vals, results, pipe);
5152
5153 mutex_unlock(&dev_priv->wm.wm_mutex);
5154 }
5155
5156 static void ilk_compute_wm_config(struct drm_device *dev,
5157 struct intel_wm_config *config)
5158 {
5159 struct intel_crtc *crtc;
5160
5161 /* Compute the currently _active_ config */
5162 for_each_intel_crtc(dev, crtc) {
5163 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5164
5165 if (!wm->pipe_enabled)
5166 continue;
5167
5168 config->sprites_enabled |= wm->sprites_enabled;
5169 config->sprites_scaled |= wm->sprites_scaled;
5170 config->num_pipes_active++;
5171 }
5172 }
5173
5174 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5175 {
5176 struct drm_device *dev = &dev_priv->drm;
5177 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5178 struct ilk_wm_maximums max;
5179 struct intel_wm_config config = {};
5180 struct ilk_wm_values results = {};
5181 enum intel_ddb_partitioning partitioning;
5182
5183 ilk_compute_wm_config(dev, &config);
5184
5185 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
5186 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
5187
5188 /* 5/6 split only in single pipe config on IVB+ */
5189 if (INTEL_GEN(dev_priv) >= 7 &&
5190 config.num_pipes_active == 1 && config.sprites_enabled) {
5191 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
5192 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
5193
5194 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
5195 } else {
5196 best_lp_wm = &lp_wm_1_2;
5197 }
5198
5199 partitioning = (best_lp_wm == &lp_wm_1_2) ?
5200 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5201
5202 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
5203
5204 ilk_write_wm_values(dev_priv, &results);
5205 }
5206
5207 static void ilk_initial_watermarks(struct intel_atomic_state *state,
5208 struct intel_crtc_state *cstate)
5209 {
5210 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5211 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5212
5213 mutex_lock(&dev_priv->wm.wm_mutex);
5214 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
5215 ilk_program_watermarks(dev_priv);
5216 mutex_unlock(&dev_priv->wm.wm_mutex);
5217 }
5218
5219 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
5220 struct intel_crtc_state *cstate)
5221 {
5222 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5223 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5224
5225 mutex_lock(&dev_priv->wm.wm_mutex);
5226 if (cstate->wm.need_postvbl_update) {
5227 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
5228 ilk_program_watermarks(dev_priv);
5229 }
5230 mutex_unlock(&dev_priv->wm.wm_mutex);
5231 }
5232
5233 static inline void skl_wm_level_from_reg_val(uint32_t val,
5234 struct skl_wm_level *level)
5235 {
5236 level->plane_en = val & PLANE_WM_EN;
5237 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
5238 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
5239 PLANE_WM_LINES_MASK;
5240 }
5241
5242 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
5243 struct skl_pipe_wm *out)
5244 {
5245 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5246 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5247 enum pipe pipe = intel_crtc->pipe;
5248 int level, max_level;
5249 enum plane_id plane_id;
5250 uint32_t val;
5251
5252 max_level = ilk_wm_max_level(dev_priv);
5253
5254 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
5255 struct skl_plane_wm *wm = &out->planes[plane_id];
5256
5257 for (level = 0; level <= max_level; level++) {
5258 if (plane_id != PLANE_CURSOR)
5259 val = I915_READ(PLANE_WM(pipe, plane_id, level));
5260 else
5261 val = I915_READ(CUR_WM(pipe, level));
5262
5263 skl_wm_level_from_reg_val(val, &wm->wm[level]);
5264 }
5265
5266 if (plane_id != PLANE_CURSOR)
5267 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5268 else
5269 val = I915_READ(CUR_WM_TRANS(pipe));
5270
5271 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5272 }
5273
5274 if (!intel_crtc->active)
5275 return;
5276
5277 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5278 }
5279
5280 void skl_wm_get_hw_state(struct drm_device *dev)
5281 {
5282 struct drm_i915_private *dev_priv = to_i915(dev);
5283 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
5284 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5285 struct drm_crtc *crtc;
5286 struct intel_crtc *intel_crtc;
5287 struct intel_crtc_state *cstate;
5288
5289 skl_ddb_get_hw_state(dev_priv, ddb);
5290 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5291 intel_crtc = to_intel_crtc(crtc);
5292 cstate = to_intel_crtc_state(crtc->state);
5293
5294 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
5295
5296 if (intel_crtc->active)
5297 hw->dirty_pipes |= drm_crtc_mask(crtc);
5298 }
5299
5300 if (dev_priv->active_crtcs) {
5301 /* Fully recompute DDB on first atomic commit */
5302 dev_priv->wm.distrust_bios_wm = true;
5303 } else {
5304 /* Easy/common case; just sanitize DDB now if everything off */
5305 memset(ddb, 0, sizeof(*ddb));
5306 }
5307 }
5308
5309 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
5310 {
5311 struct drm_device *dev = crtc->dev;
5312 struct drm_i915_private *dev_priv = to_i915(dev);
5313 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5315 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
5316 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
5317 enum pipe pipe = intel_crtc->pipe;
5318 static const i915_reg_t wm0_pipe_reg[] = {
5319 [PIPE_A] = WM0_PIPEA_ILK,
5320 [PIPE_B] = WM0_PIPEB_ILK,
5321 [PIPE_C] = WM0_PIPEC_IVB,
5322 };
5323
5324 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5325 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5326 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
5327
5328 memset(active, 0, sizeof(*active));
5329
5330 active->pipe_enabled = intel_crtc->active;
5331
5332 if (active->pipe_enabled) {
5333 u32 tmp = hw->wm_pipe[pipe];
5334
5335 /*
5336 * For active pipes LP0 watermark is marked as
5337 * enabled, and LP1+ watermaks as disabled since
5338 * we can't really reverse compute them in case
5339 * multiple pipes are active.
5340 */
5341 active->wm[0].enable = true;
5342 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5343 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5344 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5345 active->linetime = hw->wm_linetime[pipe];
5346 } else {
5347 int level, max_level = ilk_wm_max_level(dev_priv);
5348
5349 /*
5350 * For inactive pipes, all watermark levels
5351 * should be marked as enabled but zeroed,
5352 * which is what we'd compute them to.
5353 */
5354 for (level = 0; level <= max_level; level++)
5355 active->wm[level].enable = true;
5356 }
5357
5358 intel_crtc->wm.active.ilk = *active;
5359 }
5360
5361 #define _FW_WM(value, plane) \
5362 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5363 #define _FW_WM_VLV(value, plane) \
5364 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5365
5366 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
5367 struct g4x_wm_values *wm)
5368 {
5369 uint32_t tmp;
5370
5371 tmp = I915_READ(DSPFW1);
5372 wm->sr.plane = _FW_WM(tmp, SR);
5373 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5374 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
5375 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
5376
5377 tmp = I915_READ(DSPFW2);
5378 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
5379 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
5380 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
5381 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
5382 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5383 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
5384
5385 tmp = I915_READ(DSPFW3);
5386 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
5387 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5388 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
5389 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
5390 }
5391
5392 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
5393 struct vlv_wm_values *wm)
5394 {
5395 enum pipe pipe;
5396 uint32_t tmp;
5397
5398 for_each_pipe(dev_priv, pipe) {
5399 tmp = I915_READ(VLV_DDL(pipe));
5400
5401 wm->ddl[pipe].plane[PLANE_PRIMARY] =
5402 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5403 wm->ddl[pipe].plane[PLANE_CURSOR] =
5404 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5405 wm->ddl[pipe].plane[PLANE_SPRITE0] =
5406 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5407 wm->ddl[pipe].plane[PLANE_SPRITE1] =
5408 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5409 }
5410
5411 tmp = I915_READ(DSPFW1);
5412 wm->sr.plane = _FW_WM(tmp, SR);
5413 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5414 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
5415 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5416
5417 tmp = I915_READ(DSPFW2);
5418 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
5419 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5420 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5421
5422 tmp = I915_READ(DSPFW3);
5423 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5424
5425 if (IS_CHERRYVIEW(dev_priv)) {
5426 tmp = I915_READ(DSPFW7_CHV);
5427 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5428 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5429
5430 tmp = I915_READ(DSPFW8_CHV);
5431 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
5432 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5433
5434 tmp = I915_READ(DSPFW9_CHV);
5435 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
5436 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5437
5438 tmp = I915_READ(DSPHOWM);
5439 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5440 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
5441 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
5442 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
5443 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5444 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5445 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5446 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5447 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5448 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5449 } else {
5450 tmp = I915_READ(DSPFW7);
5451 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5452 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5453
5454 tmp = I915_READ(DSPHOWM);
5455 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5456 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5457 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5458 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5459 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5460 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5461 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5462 }
5463 }
5464
5465 #undef _FW_WM
5466 #undef _FW_WM_VLV
5467
5468 void g4x_wm_get_hw_state(struct drm_device *dev)
5469 {
5470 struct drm_i915_private *dev_priv = to_i915(dev);
5471 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5472 struct intel_crtc *crtc;
5473
5474 g4x_read_wm_values(dev_priv, wm);
5475
5476 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
5477
5478 for_each_intel_crtc(dev, crtc) {
5479 struct intel_crtc_state *crtc_state =
5480 to_intel_crtc_state(crtc->base.state);
5481 struct g4x_wm_state *active = &crtc->wm.active.g4x;
5482 struct g4x_pipe_wm *raw;
5483 enum pipe pipe = crtc->pipe;
5484 enum plane_id plane_id;
5485 int level, max_level;
5486
5487 active->cxsr = wm->cxsr;
5488 active->hpll_en = wm->hpll_en;
5489 active->fbc_en = wm->fbc_en;
5490
5491 active->sr = wm->sr;
5492 active->hpll = wm->hpll;
5493
5494 for_each_plane_id_on_crtc(crtc, plane_id) {
5495 active->wm.plane[plane_id] =
5496 wm->pipe[pipe].plane[plane_id];
5497 }
5498
5499 if (wm->cxsr && wm->hpll_en)
5500 max_level = G4X_WM_LEVEL_HPLL;
5501 else if (wm->cxsr)
5502 max_level = G4X_WM_LEVEL_SR;
5503 else
5504 max_level = G4X_WM_LEVEL_NORMAL;
5505
5506 level = G4X_WM_LEVEL_NORMAL;
5507 raw = &crtc_state->wm.g4x.raw[level];
5508 for_each_plane_id_on_crtc(crtc, plane_id)
5509 raw->plane[plane_id] = active->wm.plane[plane_id];
5510
5511 if (++level > max_level)
5512 goto out;
5513
5514 raw = &crtc_state->wm.g4x.raw[level];
5515 raw->plane[PLANE_PRIMARY] = active->sr.plane;
5516 raw->plane[PLANE_CURSOR] = active->sr.cursor;
5517 raw->plane[PLANE_SPRITE0] = 0;
5518 raw->fbc = active->sr.fbc;
5519
5520 if (++level > max_level)
5521 goto out;
5522
5523 raw = &crtc_state->wm.g4x.raw[level];
5524 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
5525 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
5526 raw->plane[PLANE_SPRITE0] = 0;
5527 raw->fbc = active->hpll.fbc;
5528
5529 out:
5530 for_each_plane_id_on_crtc(crtc, plane_id)
5531 g4x_raw_plane_wm_set(crtc_state, level,
5532 plane_id, USHRT_MAX);
5533 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
5534
5535 crtc_state->wm.g4x.optimal = *active;
5536 crtc_state->wm.g4x.intermediate = *active;
5537
5538 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
5539 pipe_name(pipe),
5540 wm->pipe[pipe].plane[PLANE_PRIMARY],
5541 wm->pipe[pipe].plane[PLANE_CURSOR],
5542 wm->pipe[pipe].plane[PLANE_SPRITE0]);
5543 }
5544
5545 DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
5546 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
5547 DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
5548 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
5549 DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
5550 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
5551 }
5552
5553 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
5554 {
5555 struct intel_plane *plane;
5556 struct intel_crtc *crtc;
5557
5558 mutex_lock(&dev_priv->wm.wm_mutex);
5559
5560 for_each_intel_plane(&dev_priv->drm, plane) {
5561 struct intel_crtc *crtc =
5562 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
5563 struct intel_crtc_state *crtc_state =
5564 to_intel_crtc_state(crtc->base.state);
5565 struct intel_plane_state *plane_state =
5566 to_intel_plane_state(plane->base.state);
5567 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
5568 enum plane_id plane_id = plane->id;
5569 int level;
5570
5571 if (plane_state->base.visible)
5572 continue;
5573
5574 for (level = 0; level < 3; level++) {
5575 struct g4x_pipe_wm *raw =
5576 &crtc_state->wm.g4x.raw[level];
5577
5578 raw->plane[plane_id] = 0;
5579 wm_state->wm.plane[plane_id] = 0;
5580 }
5581
5582 if (plane_id == PLANE_PRIMARY) {
5583 for (level = 0; level < 3; level++) {
5584 struct g4x_pipe_wm *raw =
5585 &crtc_state->wm.g4x.raw[level];
5586 raw->fbc = 0;
5587 }
5588
5589 wm_state->sr.fbc = 0;
5590 wm_state->hpll.fbc = 0;
5591 wm_state->fbc_en = false;
5592 }
5593 }
5594
5595 for_each_intel_crtc(&dev_priv->drm, crtc) {
5596 struct intel_crtc_state *crtc_state =
5597 to_intel_crtc_state(crtc->base.state);
5598
5599 crtc_state->wm.g4x.intermediate =
5600 crtc_state->wm.g4x.optimal;
5601 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
5602 }
5603
5604 g4x_program_watermarks(dev_priv);
5605
5606 mutex_unlock(&dev_priv->wm.wm_mutex);
5607 }
5608
5609 void vlv_wm_get_hw_state(struct drm_device *dev)
5610 {
5611 struct drm_i915_private *dev_priv = to_i915(dev);
5612 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
5613 struct intel_crtc *crtc;
5614 u32 val;
5615
5616 vlv_read_wm_values(dev_priv, wm);
5617
5618 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
5619 wm->level = VLV_WM_LEVEL_PM2;
5620
5621 if (IS_CHERRYVIEW(dev_priv)) {
5622 mutex_lock(&dev_priv->rps.hw_lock);
5623
5624 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5625 if (val & DSP_MAXFIFO_PM5_ENABLE)
5626 wm->level = VLV_WM_LEVEL_PM5;
5627
5628 /*
5629 * If DDR DVFS is disabled in the BIOS, Punit
5630 * will never ack the request. So if that happens
5631 * assume we don't have to enable/disable DDR DVFS
5632 * dynamically. To test that just set the REQ_ACK
5633 * bit to poke the Punit, but don't change the
5634 * HIGH/LOW bits so that we don't actually change
5635 * the current state.
5636 */
5637 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
5638 val |= FORCE_DDR_FREQ_REQ_ACK;
5639 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
5640
5641 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
5642 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
5643 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
5644 "assuming DDR DVFS is disabled\n");
5645 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
5646 } else {
5647 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
5648 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
5649 wm->level = VLV_WM_LEVEL_DDR_DVFS;
5650 }
5651
5652 mutex_unlock(&dev_priv->rps.hw_lock);
5653 }
5654
5655 for_each_intel_crtc(dev, crtc) {
5656 struct intel_crtc_state *crtc_state =
5657 to_intel_crtc_state(crtc->base.state);
5658 struct vlv_wm_state *active = &crtc->wm.active.vlv;
5659 const struct vlv_fifo_state *fifo_state =
5660 &crtc_state->wm.vlv.fifo_state;
5661 enum pipe pipe = crtc->pipe;
5662 enum plane_id plane_id;
5663 int level;
5664
5665 vlv_get_fifo_size(crtc_state);
5666
5667 active->num_levels = wm->level + 1;
5668 active->cxsr = wm->cxsr;
5669
5670 for (level = 0; level < active->num_levels; level++) {
5671 struct g4x_pipe_wm *raw =
5672 &crtc_state->wm.vlv.raw[level];
5673
5674 active->sr[level].plane = wm->sr.plane;
5675 active->sr[level].cursor = wm->sr.cursor;
5676
5677 for_each_plane_id_on_crtc(crtc, plane_id) {
5678 active->wm[level].plane[plane_id] =
5679 wm->pipe[pipe].plane[plane_id];
5680
5681 raw->plane[plane_id] =
5682 vlv_invert_wm_value(active->wm[level].plane[plane_id],
5683 fifo_state->plane[plane_id]);
5684 }
5685 }
5686
5687 for_each_plane_id_on_crtc(crtc, plane_id)
5688 vlv_raw_plane_wm_set(crtc_state, level,
5689 plane_id, USHRT_MAX);
5690 vlv_invalidate_wms(crtc, active, level);
5691
5692 crtc_state->wm.vlv.optimal = *active;
5693 crtc_state->wm.vlv.intermediate = *active;
5694
5695 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
5696 pipe_name(pipe),
5697 wm->pipe[pipe].plane[PLANE_PRIMARY],
5698 wm->pipe[pipe].plane[PLANE_CURSOR],
5699 wm->pipe[pipe].plane[PLANE_SPRITE0],
5700 wm->pipe[pipe].plane[PLANE_SPRITE1]);
5701 }
5702
5703 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
5704 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
5705 }
5706
5707 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
5708 {
5709 struct intel_plane *plane;
5710 struct intel_crtc *crtc;
5711
5712 mutex_lock(&dev_priv->wm.wm_mutex);
5713
5714 for_each_intel_plane(&dev_priv->drm, plane) {
5715 struct intel_crtc *crtc =
5716 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
5717 struct intel_crtc_state *crtc_state =
5718 to_intel_crtc_state(crtc->base.state);
5719 struct intel_plane_state *plane_state =
5720 to_intel_plane_state(plane->base.state);
5721 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
5722 const struct vlv_fifo_state *fifo_state =
5723 &crtc_state->wm.vlv.fifo_state;
5724 enum plane_id plane_id = plane->id;
5725 int level;
5726
5727 if (plane_state->base.visible)
5728 continue;
5729
5730 for (level = 0; level < wm_state->num_levels; level++) {
5731 struct g4x_pipe_wm *raw =
5732 &crtc_state->wm.vlv.raw[level];
5733
5734 raw->plane[plane_id] = 0;
5735
5736 wm_state->wm[level].plane[plane_id] =
5737 vlv_invert_wm_value(raw->plane[plane_id],
5738 fifo_state->plane[plane_id]);
5739 }
5740 }
5741
5742 for_each_intel_crtc(&dev_priv->drm, crtc) {
5743 struct intel_crtc_state *crtc_state =
5744 to_intel_crtc_state(crtc->base.state);
5745
5746 crtc_state->wm.vlv.intermediate =
5747 crtc_state->wm.vlv.optimal;
5748 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
5749 }
5750
5751 vlv_program_watermarks(dev_priv);
5752
5753 mutex_unlock(&dev_priv->wm.wm_mutex);
5754 }
5755
5756 void ilk_wm_get_hw_state(struct drm_device *dev)
5757 {
5758 struct drm_i915_private *dev_priv = to_i915(dev);
5759 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5760 struct drm_crtc *crtc;
5761
5762 for_each_crtc(dev, crtc)
5763 ilk_pipe_wm_get_hw_state(crtc);
5764
5765 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
5766 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
5767 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
5768
5769 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
5770 if (INTEL_GEN(dev_priv) >= 7) {
5771 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
5772 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
5773 }
5774
5775 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5776 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
5777 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
5778 else if (IS_IVYBRIDGE(dev_priv))
5779 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
5780 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
5781
5782 hw->enable_fbc_wm =
5783 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
5784 }
5785
5786 /**
5787 * intel_update_watermarks - update FIFO watermark values based on current modes
5788 *
5789 * Calculate watermark values for the various WM regs based on current mode
5790 * and plane configuration.
5791 *
5792 * There are several cases to deal with here:
5793 * - normal (i.e. non-self-refresh)
5794 * - self-refresh (SR) mode
5795 * - lines are large relative to FIFO size (buffer can hold up to 2)
5796 * - lines are small relative to FIFO size (buffer can hold more than 2
5797 * lines), so need to account for TLB latency
5798 *
5799 * The normal calculation is:
5800 * watermark = dotclock * bytes per pixel * latency
5801 * where latency is platform & configuration dependent (we assume pessimal
5802 * values here).
5803 *
5804 * The SR calculation is:
5805 * watermark = (trunc(latency/line time)+1) * surface width *
5806 * bytes per pixel
5807 * where
5808 * line time = htotal / dotclock
5809 * surface width = hdisplay for normal plane and 64 for cursor
5810 * and latency is assumed to be high, as above.
5811 *
5812 * The final value programmed to the register should always be rounded up,
5813 * and include an extra 2 entries to account for clock crossings.
5814 *
5815 * We don't use the sprite, so we can ignore that. And on Crestline we have
5816 * to set the non-SR watermarks to 8.
5817 */
5818 void intel_update_watermarks(struct intel_crtc *crtc)
5819 {
5820 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5821
5822 if (dev_priv->display.update_wm)
5823 dev_priv->display.update_wm(crtc);
5824 }
5825
5826 void intel_enable_ipc(struct drm_i915_private *dev_priv)
5827 {
5828 u32 val;
5829
5830 /* Display WA #0477 WaDisableIPC: skl */
5831 if (IS_SKYLAKE(dev_priv)) {
5832 dev_priv->ipc_enabled = false;
5833 return;
5834 }
5835
5836 val = I915_READ(DISP_ARB_CTL2);
5837
5838 if (dev_priv->ipc_enabled)
5839 val |= DISP_IPC_ENABLE;
5840 else
5841 val &= ~DISP_IPC_ENABLE;
5842
5843 I915_WRITE(DISP_ARB_CTL2, val);
5844 }
5845
5846 void intel_init_ipc(struct drm_i915_private *dev_priv)
5847 {
5848 dev_priv->ipc_enabled = false;
5849 if (!HAS_IPC(dev_priv))
5850 return;
5851
5852 dev_priv->ipc_enabled = true;
5853 intel_enable_ipc(dev_priv);
5854 }
5855
5856 /*
5857 * Lock protecting IPS related data structures
5858 */
5859 DEFINE_SPINLOCK(mchdev_lock);
5860
5861 /* Global for IPS driver to get at the current i915 device. Protected by
5862 * mchdev_lock. */
5863 static struct drm_i915_private *i915_mch_dev;
5864
5865 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
5866 {
5867 u16 rgvswctl;
5868
5869 lockdep_assert_held(&mchdev_lock);
5870
5871 rgvswctl = I915_READ16(MEMSWCTL);
5872 if (rgvswctl & MEMCTL_CMD_STS) {
5873 DRM_DEBUG("gpu busy, RCS change rejected\n");
5874 return false; /* still busy with another command */
5875 }
5876
5877 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5878 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5879 I915_WRITE16(MEMSWCTL, rgvswctl);
5880 POSTING_READ16(MEMSWCTL);
5881
5882 rgvswctl |= MEMCTL_CMD_STS;
5883 I915_WRITE16(MEMSWCTL, rgvswctl);
5884
5885 return true;
5886 }
5887
5888 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
5889 {
5890 u32 rgvmodectl;
5891 u8 fmax, fmin, fstart, vstart;
5892
5893 spin_lock_irq(&mchdev_lock);
5894
5895 rgvmodectl = I915_READ(MEMMODECTL);
5896
5897 /* Enable temp reporting */
5898 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
5899 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
5900
5901 /* 100ms RC evaluation intervals */
5902 I915_WRITE(RCUPEI, 100000);
5903 I915_WRITE(RCDNEI, 100000);
5904
5905 /* Set max/min thresholds to 90ms and 80ms respectively */
5906 I915_WRITE(RCBMAXAVG, 90000);
5907 I915_WRITE(RCBMINAVG, 80000);
5908
5909 I915_WRITE(MEMIHYST, 1);
5910
5911 /* Set up min, max, and cur for interrupt handling */
5912 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
5913 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5914 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5915 MEMMODE_FSTART_SHIFT;
5916
5917 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
5918 PXVFREQ_PX_SHIFT;
5919
5920 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
5921 dev_priv->ips.fstart = fstart;
5922
5923 dev_priv->ips.max_delay = fstart;
5924 dev_priv->ips.min_delay = fmin;
5925 dev_priv->ips.cur_delay = fstart;
5926
5927 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5928 fmax, fmin, fstart);
5929
5930 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5931
5932 /*
5933 * Interrupts will be enabled in ironlake_irq_postinstall
5934 */
5935
5936 I915_WRITE(VIDSTART, vstart);
5937 POSTING_READ(VIDSTART);
5938
5939 rgvmodectl |= MEMMODE_SWMODE_EN;
5940 I915_WRITE(MEMMODECTL, rgvmodectl);
5941
5942 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
5943 DRM_ERROR("stuck trying to change perf mode\n");
5944 mdelay(1);
5945
5946 ironlake_set_drps(dev_priv, fstart);
5947
5948 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
5949 I915_READ(DDREC) + I915_READ(CSIEC);
5950 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
5951 dev_priv->ips.last_count2 = I915_READ(GFXEC);
5952 dev_priv->ips.last_time2 = ktime_get_raw_ns();
5953
5954 spin_unlock_irq(&mchdev_lock);
5955 }
5956
5957 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
5958 {
5959 u16 rgvswctl;
5960
5961 spin_lock_irq(&mchdev_lock);
5962
5963 rgvswctl = I915_READ16(MEMSWCTL);
5964
5965 /* Ack interrupts, disable EFC interrupt */
5966 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
5967 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
5968 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
5969 I915_WRITE(DEIIR, DE_PCU_EVENT);
5970 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
5971
5972 /* Go back to the starting frequency */
5973 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
5974 mdelay(1);
5975 rgvswctl |= MEMCTL_CMD_STS;
5976 I915_WRITE(MEMSWCTL, rgvswctl);
5977 mdelay(1);
5978
5979 spin_unlock_irq(&mchdev_lock);
5980 }
5981
5982 /* There's a funny hw issue where the hw returns all 0 when reading from
5983 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
5984 * ourselves, instead of doing a rmw cycle (which might result in us clearing
5985 * all limits and the gpu stuck at whatever frequency it is at atm).
5986 */
5987 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
5988 {
5989 u32 limits;
5990
5991 /* Only set the down limit when we've reached the lowest level to avoid
5992 * getting more interrupts, otherwise leave this clear. This prevents a
5993 * race in the hw when coming out of rc6: There's a tiny window where
5994 * the hw runs at the minimal clock before selecting the desired
5995 * frequency, if the down threshold expires in that window we will not
5996 * receive a down interrupt. */
5997 if (INTEL_GEN(dev_priv) >= 9) {
5998 limits = (dev_priv->rps.max_freq_softlimit) << 23;
5999 if (val <= dev_priv->rps.min_freq_softlimit)
6000 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
6001 } else {
6002 limits = dev_priv->rps.max_freq_softlimit << 24;
6003 if (val <= dev_priv->rps.min_freq_softlimit)
6004 limits |= dev_priv->rps.min_freq_softlimit << 16;
6005 }
6006
6007 return limits;
6008 }
6009
6010 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
6011 {
6012 int new_power;
6013 u32 threshold_up = 0, threshold_down = 0; /* in % */
6014 u32 ei_up = 0, ei_down = 0;
6015
6016 new_power = dev_priv->rps.power;
6017 switch (dev_priv->rps.power) {
6018 case LOW_POWER:
6019 if (val > dev_priv->rps.efficient_freq + 1 &&
6020 val > dev_priv->rps.cur_freq)
6021 new_power = BETWEEN;
6022 break;
6023
6024 case BETWEEN:
6025 if (val <= dev_priv->rps.efficient_freq &&
6026 val < dev_priv->rps.cur_freq)
6027 new_power = LOW_POWER;
6028 else if (val >= dev_priv->rps.rp0_freq &&
6029 val > dev_priv->rps.cur_freq)
6030 new_power = HIGH_POWER;
6031 break;
6032
6033 case HIGH_POWER:
6034 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
6035 val < dev_priv->rps.cur_freq)
6036 new_power = BETWEEN;
6037 break;
6038 }
6039 /* Max/min bins are special */
6040 if (val <= dev_priv->rps.min_freq_softlimit)
6041 new_power = LOW_POWER;
6042 if (val >= dev_priv->rps.max_freq_softlimit)
6043 new_power = HIGH_POWER;
6044 if (new_power == dev_priv->rps.power)
6045 return;
6046
6047 /* Note the units here are not exactly 1us, but 1280ns. */
6048 switch (new_power) {
6049 case LOW_POWER:
6050 /* Upclock if more than 95% busy over 16ms */
6051 ei_up = 16000;
6052 threshold_up = 95;
6053
6054 /* Downclock if less than 85% busy over 32ms */
6055 ei_down = 32000;
6056 threshold_down = 85;
6057 break;
6058
6059 case BETWEEN:
6060 /* Upclock if more than 90% busy over 13ms */
6061 ei_up = 13000;
6062 threshold_up = 90;
6063
6064 /* Downclock if less than 75% busy over 32ms */
6065 ei_down = 32000;
6066 threshold_down = 75;
6067 break;
6068
6069 case HIGH_POWER:
6070 /* Upclock if more than 85% busy over 10ms */
6071 ei_up = 10000;
6072 threshold_up = 85;
6073
6074 /* Downclock if less than 60% busy over 32ms */
6075 ei_down = 32000;
6076 threshold_down = 60;
6077 break;
6078 }
6079
6080 /* When byt can survive without system hang with dynamic
6081 * sw freq adjustments, this restriction can be lifted.
6082 */
6083 if (IS_VALLEYVIEW(dev_priv))
6084 goto skip_hw_write;
6085
6086 I915_WRITE(GEN6_RP_UP_EI,
6087 GT_INTERVAL_FROM_US(dev_priv, ei_up));
6088 I915_WRITE(GEN6_RP_UP_THRESHOLD,
6089 GT_INTERVAL_FROM_US(dev_priv,
6090 ei_up * threshold_up / 100));
6091
6092 I915_WRITE(GEN6_RP_DOWN_EI,
6093 GT_INTERVAL_FROM_US(dev_priv, ei_down));
6094 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
6095 GT_INTERVAL_FROM_US(dev_priv,
6096 ei_down * threshold_down / 100));
6097
6098 I915_WRITE(GEN6_RP_CONTROL,
6099 GEN6_RP_MEDIA_TURBO |
6100 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6101 GEN6_RP_MEDIA_IS_GFX |
6102 GEN6_RP_ENABLE |
6103 GEN6_RP_UP_BUSY_AVG |
6104 GEN6_RP_DOWN_IDLE_AVG);
6105
6106 skip_hw_write:
6107 dev_priv->rps.power = new_power;
6108 dev_priv->rps.up_threshold = threshold_up;
6109 dev_priv->rps.down_threshold = threshold_down;
6110 dev_priv->rps.last_adj = 0;
6111 }
6112
6113 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
6114 {
6115 u32 mask = 0;
6116
6117 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
6118 if (val > dev_priv->rps.min_freq_softlimit)
6119 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
6120 if (val < dev_priv->rps.max_freq_softlimit)
6121 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
6122
6123 mask &= dev_priv->pm_rps_events;
6124
6125 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
6126 }
6127
6128 /* gen6_set_rps is called to update the frequency request, but should also be
6129 * called when the range (min_delay and max_delay) is modified so that we can
6130 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
6131 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
6132 {
6133 /* min/max delay may still have been modified so be sure to
6134 * write the limits value.
6135 */
6136 if (val != dev_priv->rps.cur_freq) {
6137 gen6_set_rps_thresholds(dev_priv, val);
6138
6139 if (INTEL_GEN(dev_priv) >= 9)
6140 I915_WRITE(GEN6_RPNSWREQ,
6141 GEN9_FREQUENCY(val));
6142 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6143 I915_WRITE(GEN6_RPNSWREQ,
6144 HSW_FREQUENCY(val));
6145 else
6146 I915_WRITE(GEN6_RPNSWREQ,
6147 GEN6_FREQUENCY(val) |
6148 GEN6_OFFSET(0) |
6149 GEN6_AGGRESSIVE_TURBO);
6150 }
6151
6152 /* Make sure we continue to get interrupts
6153 * until we hit the minimum or maximum frequencies.
6154 */
6155 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
6156 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6157
6158 dev_priv->rps.cur_freq = val;
6159 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6160
6161 return 0;
6162 }
6163
6164 static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
6165 {
6166 int err;
6167
6168 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
6169 "Odd GPU freq value\n"))
6170 val &= ~1;
6171
6172 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6173
6174 if (val != dev_priv->rps.cur_freq) {
6175 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
6176 if (err)
6177 return err;
6178
6179 gen6_set_rps_thresholds(dev_priv, val);
6180 }
6181
6182 dev_priv->rps.cur_freq = val;
6183 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6184
6185 return 0;
6186 }
6187
6188 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
6189 *
6190 * * If Gfx is Idle, then
6191 * 1. Forcewake Media well.
6192 * 2. Request idle freq.
6193 * 3. Release Forcewake of Media well.
6194 */
6195 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
6196 {
6197 u32 val = dev_priv->rps.idle_freq;
6198 int err;
6199
6200 if (dev_priv->rps.cur_freq <= val)
6201 return;
6202
6203 /* The punit delays the write of the frequency and voltage until it
6204 * determines the GPU is awake. During normal usage we don't want to
6205 * waste power changing the frequency if the GPU is sleeping (rc6).
6206 * However, the GPU and driver is now idle and we do not want to delay
6207 * switching to minimum voltage (reducing power whilst idle) as we do
6208 * not expect to be woken in the near future and so must flush the
6209 * change by waking the device.
6210 *
6211 * We choose to take the media powerwell (either would do to trick the
6212 * punit into committing the voltage change) as that takes a lot less
6213 * power than the render powerwell.
6214 */
6215 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
6216 err = valleyview_set_rps(dev_priv, val);
6217 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
6218
6219 if (err)
6220 DRM_ERROR("Failed to set RPS for idle\n");
6221 }
6222
6223 void gen6_rps_busy(struct drm_i915_private *dev_priv)
6224 {
6225 mutex_lock(&dev_priv->rps.hw_lock);
6226 if (dev_priv->rps.enabled) {
6227 u8 freq;
6228
6229 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
6230 gen6_rps_reset_ei(dev_priv);
6231 I915_WRITE(GEN6_PMINTRMSK,
6232 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
6233
6234 gen6_enable_rps_interrupts(dev_priv);
6235
6236 /* Use the user's desired frequency as a guide, but for better
6237 * performance, jump directly to RPe as our starting frequency.
6238 */
6239 freq = max(dev_priv->rps.cur_freq,
6240 dev_priv->rps.efficient_freq);
6241
6242 if (intel_set_rps(dev_priv,
6243 clamp(freq,
6244 dev_priv->rps.min_freq_softlimit,
6245 dev_priv->rps.max_freq_softlimit)))
6246 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
6247 }
6248 mutex_unlock(&dev_priv->rps.hw_lock);
6249 }
6250
6251 void gen6_rps_idle(struct drm_i915_private *dev_priv)
6252 {
6253 /* Flush our bottom-half so that it does not race with us
6254 * setting the idle frequency and so that it is bounded by
6255 * our rpm wakeref. And then disable the interrupts to stop any
6256 * futher RPS reclocking whilst we are asleep.
6257 */
6258 gen6_disable_rps_interrupts(dev_priv);
6259
6260 mutex_lock(&dev_priv->rps.hw_lock);
6261 if (dev_priv->rps.enabled) {
6262 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6263 vlv_set_rps_idle(dev_priv);
6264 else
6265 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
6266 dev_priv->rps.last_adj = 0;
6267 I915_WRITE(GEN6_PMINTRMSK,
6268 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
6269 }
6270 mutex_unlock(&dev_priv->rps.hw_lock);
6271 }
6272
6273 void gen6_rps_boost(struct drm_i915_gem_request *rq,
6274 struct intel_rps_client *rps)
6275 {
6276 struct drm_i915_private *i915 = rq->i915;
6277 unsigned long flags;
6278 bool boost;
6279
6280 /* This is intentionally racy! We peek at the state here, then
6281 * validate inside the RPS worker.
6282 */
6283 if (!i915->rps.enabled)
6284 return;
6285
6286 boost = false;
6287 spin_lock_irqsave(&rq->lock, flags);
6288 if (!rq->waitboost && !i915_gem_request_completed(rq)) {
6289 atomic_inc(&i915->rps.num_waiters);
6290 rq->waitboost = true;
6291 boost = true;
6292 }
6293 spin_unlock_irqrestore(&rq->lock, flags);
6294 if (!boost)
6295 return;
6296
6297 if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
6298 schedule_work(&i915->rps.work);
6299
6300 atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
6301 }
6302
6303 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
6304 {
6305 int err;
6306
6307 lockdep_assert_held(&dev_priv->rps.hw_lock);
6308 GEM_BUG_ON(val > dev_priv->rps.max_freq);
6309 GEM_BUG_ON(val < dev_priv->rps.min_freq);
6310
6311 if (!dev_priv->rps.enabled) {
6312 dev_priv->rps.cur_freq = val;
6313 return 0;
6314 }
6315
6316 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6317 err = valleyview_set_rps(dev_priv, val);
6318 else
6319 err = gen6_set_rps(dev_priv, val);
6320
6321 return err;
6322 }
6323
6324 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
6325 {
6326 I915_WRITE(GEN6_RC_CONTROL, 0);
6327 I915_WRITE(GEN9_PG_ENABLE, 0);
6328 }
6329
6330 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
6331 {
6332 I915_WRITE(GEN6_RP_CONTROL, 0);
6333 }
6334
6335 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
6336 {
6337 I915_WRITE(GEN6_RC_CONTROL, 0);
6338 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6339 I915_WRITE(GEN6_RP_CONTROL, 0);
6340 }
6341
6342 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
6343 {
6344 I915_WRITE(GEN6_RC_CONTROL, 0);
6345 }
6346
6347 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
6348 {
6349 /* we're doing forcewake before Disabling RC6,
6350 * This what the BIOS expects when going into suspend */
6351 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6352
6353 I915_WRITE(GEN6_RC_CONTROL, 0);
6354
6355 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6356 }
6357
6358 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
6359 {
6360 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6361 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
6362 mode = GEN6_RC_CTL_RC6_ENABLE;
6363 else
6364 mode = 0;
6365 }
6366 if (HAS_RC6p(dev_priv))
6367 DRM_DEBUG_DRIVER("Enabling RC6 states: "
6368 "RC6 %s RC6p %s RC6pp %s\n",
6369 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
6370 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
6371 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
6372
6373 else
6374 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
6375 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
6376 }
6377
6378 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
6379 {
6380 struct i915_ggtt *ggtt = &dev_priv->ggtt;
6381 bool enable_rc6 = true;
6382 unsigned long rc6_ctx_base;
6383 u32 rc_ctl;
6384 int rc_sw_target;
6385
6386 rc_ctl = I915_READ(GEN6_RC_CONTROL);
6387 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
6388 RC_SW_TARGET_STATE_SHIFT;
6389 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
6390 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
6391 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
6392 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
6393 rc_sw_target);
6394
6395 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
6396 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
6397 enable_rc6 = false;
6398 }
6399
6400 /*
6401 * The exact context size is not known for BXT, so assume a page size
6402 * for this check.
6403 */
6404 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
6405 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
6406 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
6407 ggtt->stolen_reserved_size))) {
6408 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
6409 enable_rc6 = false;
6410 }
6411
6412 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
6413 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
6414 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
6415 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
6416 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
6417 enable_rc6 = false;
6418 }
6419
6420 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
6421 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
6422 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
6423 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
6424 enable_rc6 = false;
6425 }
6426
6427 if (!I915_READ(GEN6_GFXPAUSE)) {
6428 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
6429 enable_rc6 = false;
6430 }
6431
6432 if (!I915_READ(GEN8_MISC_CTRL0)) {
6433 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
6434 enable_rc6 = false;
6435 }
6436
6437 return enable_rc6;
6438 }
6439
6440 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
6441 {
6442 /* No RC6 before Ironlake and code is gone for ilk. */
6443 if (INTEL_INFO(dev_priv)->gen < 6)
6444 return 0;
6445
6446 if (!enable_rc6)
6447 return 0;
6448
6449 if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
6450 DRM_INFO("RC6 disabled by BIOS\n");
6451 return 0;
6452 }
6453
6454 /* Respect the kernel parameter if it is set */
6455 if (enable_rc6 >= 0) {
6456 int mask;
6457
6458 if (HAS_RC6p(dev_priv))
6459 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
6460 INTEL_RC6pp_ENABLE;
6461 else
6462 mask = INTEL_RC6_ENABLE;
6463
6464 if ((enable_rc6 & mask) != enable_rc6)
6465 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
6466 "(requested %d, valid %d)\n",
6467 enable_rc6 & mask, enable_rc6, mask);
6468
6469 return enable_rc6 & mask;
6470 }
6471
6472 if (IS_IVYBRIDGE(dev_priv))
6473 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
6474
6475 return INTEL_RC6_ENABLE;
6476 }
6477
6478 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
6479 {
6480 /* All of these values are in units of 50MHz */
6481
6482 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
6483 if (IS_GEN9_LP(dev_priv)) {
6484 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
6485 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
6486 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
6487 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
6488 } else {
6489 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6490 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
6491 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
6492 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
6493 }
6494 /* hw_max = RP0 until we check for overclocking */
6495 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
6496
6497 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
6498 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
6499 IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6500 u32 ddcc_status = 0;
6501
6502 if (sandybridge_pcode_read(dev_priv,
6503 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
6504 &ddcc_status) == 0)
6505 dev_priv->rps.efficient_freq =
6506 clamp_t(u8,
6507 ((ddcc_status >> 8) & 0xff),
6508 dev_priv->rps.min_freq,
6509 dev_priv->rps.max_freq);
6510 }
6511
6512 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6513 /* Store the frequency values in 16.66 MHZ units, which is
6514 * the natural hardware unit for SKL
6515 */
6516 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
6517 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
6518 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
6519 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
6520 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
6521 }
6522 }
6523
6524 static void reset_rps(struct drm_i915_private *dev_priv,
6525 int (*set)(struct drm_i915_private *, u8))
6526 {
6527 u8 freq = dev_priv->rps.cur_freq;
6528
6529 /* force a reset */
6530 dev_priv->rps.power = -1;
6531 dev_priv->rps.cur_freq = -1;
6532
6533 if (set(dev_priv, freq))
6534 DRM_ERROR("Failed to reset RPS to initial values\n");
6535 }
6536
6537 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
6538 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
6539 {
6540 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6541
6542 /* Program defaults and thresholds for RPS*/
6543 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6544 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
6545
6546 /* 1 second timeout*/
6547 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
6548 GT_INTERVAL_FROM_US(dev_priv, 1000000));
6549
6550 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
6551
6552 /* Leaning on the below call to gen6_set_rps to program/setup the
6553 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
6554 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
6555 reset_rps(dev_priv, gen6_set_rps);
6556
6557 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6558 }
6559
6560 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
6561 {
6562 struct intel_engine_cs *engine;
6563 enum intel_engine_id id;
6564 uint32_t rc6_mask = 0;
6565
6566 /* 1a: Software RC state - RC0 */
6567 I915_WRITE(GEN6_RC_STATE, 0);
6568
6569 /* 1b: Get forcewake during program sequence. Although the driver
6570 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6571 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6572
6573 /* 2a: Disable RC states. */
6574 I915_WRITE(GEN6_RC_CONTROL, 0);
6575
6576 /* 2b: Program RC6 thresholds.*/
6577
6578 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
6579 if (IS_SKYLAKE(dev_priv))
6580 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
6581 else
6582 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
6583 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6584 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6585 for_each_engine(engine, dev_priv, id)
6586 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6587
6588 if (HAS_GUC(dev_priv))
6589 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
6590
6591 I915_WRITE(GEN6_RC_SLEEP, 0);
6592
6593 /* 2c: Program Coarse Power Gating Policies. */
6594 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
6595 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
6596
6597 /* 3a: Enable RC6 */
6598 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6599 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
6600 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
6601 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
6602 I915_WRITE(GEN6_RC_CONTROL,
6603 GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
6604
6605 /*
6606 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
6607 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
6608 */
6609 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
6610 I915_WRITE(GEN9_PG_ENABLE, 0);
6611 else
6612 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
6613 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
6614
6615 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6616 }
6617
6618 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
6619 {
6620 struct intel_engine_cs *engine;
6621 enum intel_engine_id id;
6622 uint32_t rc6_mask = 0;
6623
6624 /* 1a: Software RC state - RC0 */
6625 I915_WRITE(GEN6_RC_STATE, 0);
6626
6627 /* 1c & 1d: Get forcewake during program sequence. Although the driver
6628 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6629 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6630
6631 /* 2a: Disable RC states. */
6632 I915_WRITE(GEN6_RC_CONTROL, 0);
6633
6634 /* 2b: Program RC6 thresholds.*/
6635 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
6636 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6637 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6638 for_each_engine(engine, dev_priv, id)
6639 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6640 I915_WRITE(GEN6_RC_SLEEP, 0);
6641 if (IS_BROADWELL(dev_priv))
6642 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
6643 else
6644 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
6645
6646 /* 3: Enable RC6 */
6647 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6648 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
6649 intel_print_rc6_info(dev_priv, rc6_mask);
6650 if (IS_BROADWELL(dev_priv))
6651 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
6652 GEN7_RC_CTL_TO_MODE |
6653 rc6_mask);
6654 else
6655 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
6656 GEN6_RC_CTL_EI_MODE(1) |
6657 rc6_mask);
6658
6659 /* 4 Program defaults and thresholds for RPS*/
6660 I915_WRITE(GEN6_RPNSWREQ,
6661 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
6662 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6663 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
6664 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
6665 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
6666
6667 /* Docs recommend 900MHz, and 300 MHz respectively */
6668 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6669 dev_priv->rps.max_freq_softlimit << 24 |
6670 dev_priv->rps.min_freq_softlimit << 16);
6671
6672 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
6673 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
6674 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
6675 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
6676
6677 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6678
6679 /* 5: Enable RPS */
6680 I915_WRITE(GEN6_RP_CONTROL,
6681 GEN6_RP_MEDIA_TURBO |
6682 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6683 GEN6_RP_MEDIA_IS_GFX |
6684 GEN6_RP_ENABLE |
6685 GEN6_RP_UP_BUSY_AVG |
6686 GEN6_RP_DOWN_IDLE_AVG);
6687
6688 /* 6: Ring frequency + overclocking (our driver does this later */
6689
6690 reset_rps(dev_priv, gen6_set_rps);
6691
6692 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6693 }
6694
6695 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
6696 {
6697 struct intel_engine_cs *engine;
6698 enum intel_engine_id id;
6699 u32 rc6vids, rc6_mask = 0;
6700 u32 gtfifodbg;
6701 int rc6_mode;
6702 int ret;
6703
6704 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6705
6706 /* Here begins a magic sequence of register writes to enable
6707 * auto-downclocking.
6708 *
6709 * Perhaps there might be some value in exposing these to
6710 * userspace...
6711 */
6712 I915_WRITE(GEN6_RC_STATE, 0);
6713
6714 /* Clear the DBG now so we don't confuse earlier errors */
6715 gtfifodbg = I915_READ(GTFIFODBG);
6716 if (gtfifodbg) {
6717 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
6718 I915_WRITE(GTFIFODBG, gtfifodbg);
6719 }
6720
6721 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6722
6723 /* disable the counters and set deterministic thresholds */
6724 I915_WRITE(GEN6_RC_CONTROL, 0);
6725
6726 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6727 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6728 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6729 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6730 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6731
6732 for_each_engine(engine, dev_priv, id)
6733 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6734
6735 I915_WRITE(GEN6_RC_SLEEP, 0);
6736 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6737 if (IS_IVYBRIDGE(dev_priv))
6738 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
6739 else
6740 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6741 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
6742 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6743
6744 /* Check if we are enabling RC6 */
6745 rc6_mode = intel_enable_rc6();
6746 if (rc6_mode & INTEL_RC6_ENABLE)
6747 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
6748
6749 /* We don't use those on Haswell */
6750 if (!IS_HASWELL(dev_priv)) {
6751 if (rc6_mode & INTEL_RC6p_ENABLE)
6752 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
6753
6754 if (rc6_mode & INTEL_RC6pp_ENABLE)
6755 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
6756 }
6757
6758 intel_print_rc6_info(dev_priv, rc6_mask);
6759
6760 I915_WRITE(GEN6_RC_CONTROL,
6761 rc6_mask |
6762 GEN6_RC_CTL_EI_MODE(1) |
6763 GEN6_RC_CTL_HW_ENABLE);
6764
6765 /* Power down if completely idle for over 50ms */
6766 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
6767 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6768
6769 reset_rps(dev_priv, gen6_set_rps);
6770
6771 rc6vids = 0;
6772 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
6773 if (IS_GEN6(dev_priv) && ret) {
6774 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
6775 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
6776 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
6777 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
6778 rc6vids &= 0xffff00;
6779 rc6vids |= GEN6_ENCODE_RC6_VID(450);
6780 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
6781 if (ret)
6782 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
6783 }
6784
6785 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6786 }
6787
6788 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6789 {
6790 int min_freq = 15;
6791 unsigned int gpu_freq;
6792 unsigned int max_ia_freq, min_ring_freq;
6793 unsigned int max_gpu_freq, min_gpu_freq;
6794 int scaling_factor = 180;
6795 struct cpufreq_policy *policy;
6796
6797 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6798
6799 policy = cpufreq_cpu_get(0);
6800 if (policy) {
6801 max_ia_freq = policy->cpuinfo.max_freq;
6802 cpufreq_cpu_put(policy);
6803 } else {
6804 /*
6805 * Default to measured freq if none found, PCU will ensure we
6806 * don't go over
6807 */
6808 max_ia_freq = tsc_khz;
6809 }
6810
6811 /* Convert from kHz to MHz */
6812 max_ia_freq /= 1000;
6813
6814 min_ring_freq = I915_READ(DCLK) & 0xf;
6815 /* convert DDR frequency from units of 266.6MHz to bandwidth */
6816 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
6817
6818 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6819 /* Convert GT frequency to 50 HZ units */
6820 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
6821 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
6822 } else {
6823 min_gpu_freq = dev_priv->rps.min_freq;
6824 max_gpu_freq = dev_priv->rps.max_freq;
6825 }
6826
6827 /*
6828 * For each potential GPU frequency, load a ring frequency we'd like
6829 * to use for memory access. We do this by specifying the IA frequency
6830 * the PCU should use as a reference to determine the ring frequency.
6831 */
6832 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
6833 int diff = max_gpu_freq - gpu_freq;
6834 unsigned int ia_freq = 0, ring_freq = 0;
6835
6836 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6837 /*
6838 * ring_freq = 2 * GT. ring_freq is in 100MHz units
6839 * No floor required for ring frequency on SKL.
6840 */
6841 ring_freq = gpu_freq;
6842 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
6843 /* max(2 * GT, DDR). NB: GT is 50MHz units */
6844 ring_freq = max(min_ring_freq, gpu_freq);
6845 } else if (IS_HASWELL(dev_priv)) {
6846 ring_freq = mult_frac(gpu_freq, 5, 4);
6847 ring_freq = max(min_ring_freq, ring_freq);
6848 /* leave ia_freq as the default, chosen by cpufreq */
6849 } else {
6850 /* On older processors, there is no separate ring
6851 * clock domain, so in order to boost the bandwidth
6852 * of the ring, we need to upclock the CPU (ia_freq).
6853 *
6854 * For GPU frequencies less than 750MHz,
6855 * just use the lowest ring freq.
6856 */
6857 if (gpu_freq < min_freq)
6858 ia_freq = 800;
6859 else
6860 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
6861 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
6862 }
6863
6864 sandybridge_pcode_write(dev_priv,
6865 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
6866 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
6867 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
6868 gpu_freq);
6869 }
6870 }
6871
6872 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
6873 {
6874 u32 val, rp0;
6875
6876 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
6877
6878 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
6879 case 8:
6880 /* (2 * 4) config */
6881 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
6882 break;
6883 case 12:
6884 /* (2 * 6) config */
6885 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
6886 break;
6887 case 16:
6888 /* (2 * 8) config */
6889 default:
6890 /* Setting (2 * 8) Min RP0 for any other combination */
6891 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
6892 break;
6893 }
6894
6895 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
6896
6897 return rp0;
6898 }
6899
6900 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
6901 {
6902 u32 val, rpe;
6903
6904 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
6905 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
6906
6907 return rpe;
6908 }
6909
6910 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
6911 {
6912 u32 val, rp1;
6913
6914 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
6915 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
6916
6917 return rp1;
6918 }
6919
6920 static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
6921 {
6922 u32 val, rpn;
6923
6924 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
6925 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
6926 FB_GFX_FREQ_FUSE_MASK);
6927
6928 return rpn;
6929 }
6930
6931 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
6932 {
6933 u32 val, rp1;
6934
6935 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
6936
6937 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
6938
6939 return rp1;
6940 }
6941
6942 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
6943 {
6944 u32 val, rp0;
6945
6946 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
6947
6948 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
6949 /* Clamp to max */
6950 rp0 = min_t(u32, rp0, 0xea);
6951
6952 return rp0;
6953 }
6954
6955 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
6956 {
6957 u32 val, rpe;
6958
6959 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
6960 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
6961 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
6962 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
6963
6964 return rpe;
6965 }
6966
6967 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
6968 {
6969 u32 val;
6970
6971 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
6972 /*
6973 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
6974 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
6975 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
6976 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
6977 * to make sure it matches what Punit accepts.
6978 */
6979 return max_t(u32, val, 0xc0);
6980 }
6981
6982 /* Check that the pctx buffer wasn't move under us. */
6983 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
6984 {
6985 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
6986
6987 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
6988 dev_priv->vlv_pctx->stolen->start);
6989 }
6990
6991
6992 /* Check that the pcbr address is not empty. */
6993 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
6994 {
6995 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
6996
6997 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
6998 }
6999
7000 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
7001 {
7002 struct i915_ggtt *ggtt = &dev_priv->ggtt;
7003 unsigned long pctx_paddr, paddr;
7004 u32 pcbr;
7005 int pctx_size = 32*1024;
7006
7007 pcbr = I915_READ(VLV_PCBR);
7008 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
7009 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7010 paddr = (dev_priv->mm.stolen_base +
7011 (ggtt->stolen_size - pctx_size));
7012
7013 pctx_paddr = (paddr & (~4095));
7014 I915_WRITE(VLV_PCBR, pctx_paddr);
7015 }
7016
7017 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7018 }
7019
7020 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
7021 {
7022 struct drm_i915_gem_object *pctx;
7023 unsigned long pctx_paddr;
7024 u32 pcbr;
7025 int pctx_size = 24*1024;
7026
7027 pcbr = I915_READ(VLV_PCBR);
7028 if (pcbr) {
7029 /* BIOS set it up already, grab the pre-alloc'd space */
7030 int pcbr_offset;
7031
7032 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
7033 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
7034 pcbr_offset,
7035 I915_GTT_OFFSET_NONE,
7036 pctx_size);
7037 goto out;
7038 }
7039
7040 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7041
7042 /*
7043 * From the Gunit register HAS:
7044 * The Gfx driver is expected to program this register and ensure
7045 * proper allocation within Gfx stolen memory. For example, this
7046 * register should be programmed such than the PCBR range does not
7047 * overlap with other ranges, such as the frame buffer, protected
7048 * memory, or any other relevant ranges.
7049 */
7050 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
7051 if (!pctx) {
7052 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
7053 goto out;
7054 }
7055
7056 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
7057 I915_WRITE(VLV_PCBR, pctx_paddr);
7058
7059 out:
7060 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7061 dev_priv->vlv_pctx = pctx;
7062 }
7063
7064 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
7065 {
7066 if (WARN_ON(!dev_priv->vlv_pctx))
7067 return;
7068
7069 i915_gem_object_put(dev_priv->vlv_pctx);
7070 dev_priv->vlv_pctx = NULL;
7071 }
7072
7073 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
7074 {
7075 dev_priv->rps.gpll_ref_freq =
7076 vlv_get_cck_clock(dev_priv, "GPLL ref",
7077 CCK_GPLL_CLOCK_CONTROL,
7078 dev_priv->czclk_freq);
7079
7080 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
7081 dev_priv->rps.gpll_ref_freq);
7082 }
7083
7084 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
7085 {
7086 u32 val;
7087
7088 valleyview_setup_pctx(dev_priv);
7089
7090 vlv_init_gpll_ref_freq(dev_priv);
7091
7092 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7093 switch ((val >> 6) & 3) {
7094 case 0:
7095 case 1:
7096 dev_priv->mem_freq = 800;
7097 break;
7098 case 2:
7099 dev_priv->mem_freq = 1066;
7100 break;
7101 case 3:
7102 dev_priv->mem_freq = 1333;
7103 break;
7104 }
7105 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7106
7107 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
7108 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
7109 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7110 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
7111 dev_priv->rps.max_freq);
7112
7113 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
7114 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7115 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
7116 dev_priv->rps.efficient_freq);
7117
7118 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
7119 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7120 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
7121 dev_priv->rps.rp1_freq);
7122
7123 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
7124 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7125 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
7126 dev_priv->rps.min_freq);
7127 }
7128
7129 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
7130 {
7131 u32 val;
7132
7133 cherryview_setup_pctx(dev_priv);
7134
7135 vlv_init_gpll_ref_freq(dev_priv);
7136
7137 mutex_lock(&dev_priv->sb_lock);
7138 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
7139 mutex_unlock(&dev_priv->sb_lock);
7140
7141 switch ((val >> 2) & 0x7) {
7142 case 3:
7143 dev_priv->mem_freq = 2000;
7144 break;
7145 default:
7146 dev_priv->mem_freq = 1600;
7147 break;
7148 }
7149 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7150
7151 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
7152 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
7153 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7154 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
7155 dev_priv->rps.max_freq);
7156
7157 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
7158 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7159 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
7160 dev_priv->rps.efficient_freq);
7161
7162 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
7163 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7164 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
7165 dev_priv->rps.rp1_freq);
7166
7167 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
7168 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7169 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
7170 dev_priv->rps.min_freq);
7171
7172 WARN_ONCE((dev_priv->rps.max_freq |
7173 dev_priv->rps.efficient_freq |
7174 dev_priv->rps.rp1_freq |
7175 dev_priv->rps.min_freq) & 1,
7176 "Odd GPU freq values\n");
7177 }
7178
7179 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7180 {
7181 valleyview_cleanup_pctx(dev_priv);
7182 }
7183
7184 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
7185 {
7186 struct intel_engine_cs *engine;
7187 enum intel_engine_id id;
7188 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
7189
7190 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7191
7192 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
7193 GT_FIFO_FREE_ENTRIES_CHV);
7194 if (gtfifodbg) {
7195 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7196 gtfifodbg);
7197 I915_WRITE(GTFIFODBG, gtfifodbg);
7198 }
7199
7200 cherryview_check_pctx(dev_priv);
7201
7202 /* 1a & 1b: Get forcewake during program sequence. Although the driver
7203 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7204 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7205
7206 /* Disable RC states. */
7207 I915_WRITE(GEN6_RC_CONTROL, 0);
7208
7209 /* 2a: Program RC6 thresholds.*/
7210 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
7211 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7212 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7213
7214 for_each_engine(engine, dev_priv, id)
7215 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7216 I915_WRITE(GEN6_RC_SLEEP, 0);
7217
7218 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
7219 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
7220
7221 /* allows RC6 residency counter to work */
7222 I915_WRITE(VLV_COUNTER_CONTROL,
7223 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
7224 VLV_MEDIA_RC6_COUNT_EN |
7225 VLV_RENDER_RC6_COUNT_EN));
7226
7227 /* For now we assume BIOS is allocating and populating the PCBR */
7228 pcbr = I915_READ(VLV_PCBR);
7229
7230 /* 3: Enable RC6 */
7231 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
7232 (pcbr >> VLV_PCBR_ADDR_SHIFT))
7233 rc6_mode = GEN7_RC_CTL_TO_MODE;
7234
7235 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
7236
7237 /* 4 Program defaults and thresholds for RPS*/
7238 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7239 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
7240 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
7241 I915_WRITE(GEN6_RP_UP_EI, 66000);
7242 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
7243
7244 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7245
7246 /* 5: Enable RPS */
7247 I915_WRITE(GEN6_RP_CONTROL,
7248 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7249 GEN6_RP_MEDIA_IS_GFX |
7250 GEN6_RP_ENABLE |
7251 GEN6_RP_UP_BUSY_AVG |
7252 GEN6_RP_DOWN_IDLE_AVG);
7253
7254 /* Setting Fixed Bias */
7255 val = VLV_OVERRIDE_EN |
7256 VLV_SOC_TDP_EN |
7257 CHV_BIAS_CPU_50_SOC_50;
7258 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
7259
7260 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7261
7262 /* RPS code assumes GPLL is used */
7263 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
7264
7265 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7266 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
7267
7268 reset_rps(dev_priv, valleyview_set_rps);
7269
7270 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7271 }
7272
7273 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
7274 {
7275 struct intel_engine_cs *engine;
7276 enum intel_engine_id id;
7277 u32 gtfifodbg, val, rc6_mode = 0;
7278
7279 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7280
7281 valleyview_check_pctx(dev_priv);
7282
7283 gtfifodbg = I915_READ(GTFIFODBG);
7284 if (gtfifodbg) {
7285 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7286 gtfifodbg);
7287 I915_WRITE(GTFIFODBG, gtfifodbg);
7288 }
7289
7290 /* If VLV, Forcewake all wells, else re-direct to regular path */
7291 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7292
7293 /* Disable RC states. */
7294 I915_WRITE(GEN6_RC_CONTROL, 0);
7295
7296 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7297 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
7298 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
7299 I915_WRITE(GEN6_RP_UP_EI, 66000);
7300 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
7301
7302 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7303
7304 I915_WRITE(GEN6_RP_CONTROL,
7305 GEN6_RP_MEDIA_TURBO |
7306 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7307 GEN6_RP_MEDIA_IS_GFX |
7308 GEN6_RP_ENABLE |
7309 GEN6_RP_UP_BUSY_AVG |
7310 GEN6_RP_DOWN_IDLE_CONT);
7311
7312 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
7313 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7314 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7315
7316 for_each_engine(engine, dev_priv, id)
7317 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7318
7319 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
7320
7321 /* allows RC6 residency counter to work */
7322 I915_WRITE(VLV_COUNTER_CONTROL,
7323 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
7324 VLV_MEDIA_RC0_COUNT_EN |
7325 VLV_RENDER_RC0_COUNT_EN |
7326 VLV_MEDIA_RC6_COUNT_EN |
7327 VLV_RENDER_RC6_COUNT_EN));
7328
7329 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
7330 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
7331
7332 intel_print_rc6_info(dev_priv, rc6_mode);
7333
7334 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
7335
7336 /* Setting Fixed Bias */
7337 val = VLV_OVERRIDE_EN |
7338 VLV_SOC_TDP_EN |
7339 VLV_BIAS_CPU_125_SOC_875;
7340 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
7341
7342 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7343
7344 /* RPS code assumes GPLL is used */
7345 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
7346
7347 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7348 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
7349
7350 reset_rps(dev_priv, valleyview_set_rps);
7351
7352 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7353 }
7354
7355 static unsigned long intel_pxfreq(u32 vidfreq)
7356 {
7357 unsigned long freq;
7358 int div = (vidfreq & 0x3f0000) >> 16;
7359 int post = (vidfreq & 0x3000) >> 12;
7360 int pre = (vidfreq & 0x7);
7361
7362 if (!pre)
7363 return 0;
7364
7365 freq = ((div * 133333) / ((1<<post) * pre));
7366
7367 return freq;
7368 }
7369
7370 static const struct cparams {
7371 u16 i;
7372 u16 t;
7373 u16 m;
7374 u16 c;
7375 } cparams[] = {
7376 { 1, 1333, 301, 28664 },
7377 { 1, 1066, 294, 24460 },
7378 { 1, 800, 294, 25192 },
7379 { 0, 1333, 276, 27605 },
7380 { 0, 1066, 276, 27605 },
7381 { 0, 800, 231, 23784 },
7382 };
7383
7384 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
7385 {
7386 u64 total_count, diff, ret;
7387 u32 count1, count2, count3, m = 0, c = 0;
7388 unsigned long now = jiffies_to_msecs(jiffies), diff1;
7389 int i;
7390
7391 lockdep_assert_held(&mchdev_lock);
7392
7393 diff1 = now - dev_priv->ips.last_time1;
7394
7395 /* Prevent division-by-zero if we are asking too fast.
7396 * Also, we don't get interesting results if we are polling
7397 * faster than once in 10ms, so just return the saved value
7398 * in such cases.
7399 */
7400 if (diff1 <= 10)
7401 return dev_priv->ips.chipset_power;
7402
7403 count1 = I915_READ(DMIEC);
7404 count2 = I915_READ(DDREC);
7405 count3 = I915_READ(CSIEC);
7406
7407 total_count = count1 + count2 + count3;
7408
7409 /* FIXME: handle per-counter overflow */
7410 if (total_count < dev_priv->ips.last_count1) {
7411 diff = ~0UL - dev_priv->ips.last_count1;
7412 diff += total_count;
7413 } else {
7414 diff = total_count - dev_priv->ips.last_count1;
7415 }
7416
7417 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
7418 if (cparams[i].i == dev_priv->ips.c_m &&
7419 cparams[i].t == dev_priv->ips.r_t) {
7420 m = cparams[i].m;
7421 c = cparams[i].c;
7422 break;
7423 }
7424 }
7425
7426 diff = div_u64(diff, diff1);
7427 ret = ((m * diff) + c);
7428 ret = div_u64(ret, 10);
7429
7430 dev_priv->ips.last_count1 = total_count;
7431 dev_priv->ips.last_time1 = now;
7432
7433 dev_priv->ips.chipset_power = ret;
7434
7435 return ret;
7436 }
7437
7438 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
7439 {
7440 unsigned long val;
7441
7442 if (INTEL_INFO(dev_priv)->gen != 5)
7443 return 0;
7444
7445 spin_lock_irq(&mchdev_lock);
7446
7447 val = __i915_chipset_val(dev_priv);
7448
7449 spin_unlock_irq(&mchdev_lock);
7450
7451 return val;
7452 }
7453
7454 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
7455 {
7456 unsigned long m, x, b;
7457 u32 tsfs;
7458
7459 tsfs = I915_READ(TSFS);
7460
7461 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
7462 x = I915_READ8(TR1);
7463
7464 b = tsfs & TSFS_INTR_MASK;
7465
7466 return ((m * x) / 127) - b;
7467 }
7468
7469 static int _pxvid_to_vd(u8 pxvid)
7470 {
7471 if (pxvid == 0)
7472 return 0;
7473
7474 if (pxvid >= 8 && pxvid < 31)
7475 pxvid = 31;
7476
7477 return (pxvid + 2) * 125;
7478 }
7479
7480 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
7481 {
7482 const int vd = _pxvid_to_vd(pxvid);
7483 const int vm = vd - 1125;
7484
7485 if (INTEL_INFO(dev_priv)->is_mobile)
7486 return vm > 0 ? vm : 0;
7487
7488 return vd;
7489 }
7490
7491 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
7492 {
7493 u64 now, diff, diffms;
7494 u32 count;
7495
7496 lockdep_assert_held(&mchdev_lock);
7497
7498 now = ktime_get_raw_ns();
7499 diffms = now - dev_priv->ips.last_time2;
7500 do_div(diffms, NSEC_PER_MSEC);
7501
7502 /* Don't divide by 0 */
7503 if (!diffms)
7504 return;
7505
7506 count = I915_READ(GFXEC);
7507
7508 if (count < dev_priv->ips.last_count2) {
7509 diff = ~0UL - dev_priv->ips.last_count2;
7510 diff += count;
7511 } else {
7512 diff = count - dev_priv->ips.last_count2;
7513 }
7514
7515 dev_priv->ips.last_count2 = count;
7516 dev_priv->ips.last_time2 = now;
7517
7518 /* More magic constants... */
7519 diff = diff * 1181;
7520 diff = div_u64(diff, diffms * 10);
7521 dev_priv->ips.gfx_power = diff;
7522 }
7523
7524 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
7525 {
7526 if (INTEL_INFO(dev_priv)->gen != 5)
7527 return;
7528
7529 spin_lock_irq(&mchdev_lock);
7530
7531 __i915_update_gfx_val(dev_priv);
7532
7533 spin_unlock_irq(&mchdev_lock);
7534 }
7535
7536 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
7537 {
7538 unsigned long t, corr, state1, corr2, state2;
7539 u32 pxvid, ext_v;
7540
7541 lockdep_assert_held(&mchdev_lock);
7542
7543 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
7544 pxvid = (pxvid >> 24) & 0x7f;
7545 ext_v = pvid_to_extvid(dev_priv, pxvid);
7546
7547 state1 = ext_v;
7548
7549 t = i915_mch_val(dev_priv);
7550
7551 /* Revel in the empirically derived constants */
7552
7553 /* Correction factor in 1/100000 units */
7554 if (t > 80)
7555 corr = ((t * 2349) + 135940);
7556 else if (t >= 50)
7557 corr = ((t * 964) + 29317);
7558 else /* < 50 */
7559 corr = ((t * 301) + 1004);
7560
7561 corr = corr * ((150142 * state1) / 10000 - 78642);
7562 corr /= 100000;
7563 corr2 = (corr * dev_priv->ips.corr);
7564
7565 state2 = (corr2 * state1) / 10000;
7566 state2 /= 100; /* convert to mW */
7567
7568 __i915_update_gfx_val(dev_priv);
7569
7570 return dev_priv->ips.gfx_power + state2;
7571 }
7572
7573 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
7574 {
7575 unsigned long val;
7576
7577 if (INTEL_INFO(dev_priv)->gen != 5)
7578 return 0;
7579
7580 spin_lock_irq(&mchdev_lock);
7581
7582 val = __i915_gfx_val(dev_priv);
7583
7584 spin_unlock_irq(&mchdev_lock);
7585
7586 return val;
7587 }
7588
7589 /**
7590 * i915_read_mch_val - return value for IPS use
7591 *
7592 * Calculate and return a value for the IPS driver to use when deciding whether
7593 * we have thermal and power headroom to increase CPU or GPU power budget.
7594 */
7595 unsigned long i915_read_mch_val(void)
7596 {
7597 struct drm_i915_private *dev_priv;
7598 unsigned long chipset_val, graphics_val, ret = 0;
7599
7600 spin_lock_irq(&mchdev_lock);
7601 if (!i915_mch_dev)
7602 goto out_unlock;
7603 dev_priv = i915_mch_dev;
7604
7605 chipset_val = __i915_chipset_val(dev_priv);
7606 graphics_val = __i915_gfx_val(dev_priv);
7607
7608 ret = chipset_val + graphics_val;
7609
7610 out_unlock:
7611 spin_unlock_irq(&mchdev_lock);
7612
7613 return ret;
7614 }
7615 EXPORT_SYMBOL_GPL(i915_read_mch_val);
7616
7617 /**
7618 * i915_gpu_raise - raise GPU frequency limit
7619 *
7620 * Raise the limit; IPS indicates we have thermal headroom.
7621 */
7622 bool i915_gpu_raise(void)
7623 {
7624 struct drm_i915_private *dev_priv;
7625 bool ret = true;
7626
7627 spin_lock_irq(&mchdev_lock);
7628 if (!i915_mch_dev) {
7629 ret = false;
7630 goto out_unlock;
7631 }
7632 dev_priv = i915_mch_dev;
7633
7634 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
7635 dev_priv->ips.max_delay--;
7636
7637 out_unlock:
7638 spin_unlock_irq(&mchdev_lock);
7639
7640 return ret;
7641 }
7642 EXPORT_SYMBOL_GPL(i915_gpu_raise);
7643
7644 /**
7645 * i915_gpu_lower - lower GPU frequency limit
7646 *
7647 * IPS indicates we're close to a thermal limit, so throttle back the GPU
7648 * frequency maximum.
7649 */
7650 bool i915_gpu_lower(void)
7651 {
7652 struct drm_i915_private *dev_priv;
7653 bool ret = true;
7654
7655 spin_lock_irq(&mchdev_lock);
7656 if (!i915_mch_dev) {
7657 ret = false;
7658 goto out_unlock;
7659 }
7660 dev_priv = i915_mch_dev;
7661
7662 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
7663 dev_priv->ips.max_delay++;
7664
7665 out_unlock:
7666 spin_unlock_irq(&mchdev_lock);
7667
7668 return ret;
7669 }
7670 EXPORT_SYMBOL_GPL(i915_gpu_lower);
7671
7672 /**
7673 * i915_gpu_busy - indicate GPU business to IPS
7674 *
7675 * Tell the IPS driver whether or not the GPU is busy.
7676 */
7677 bool i915_gpu_busy(void)
7678 {
7679 bool ret = false;
7680
7681 spin_lock_irq(&mchdev_lock);
7682 if (i915_mch_dev)
7683 ret = i915_mch_dev->gt.awake;
7684 spin_unlock_irq(&mchdev_lock);
7685
7686 return ret;
7687 }
7688 EXPORT_SYMBOL_GPL(i915_gpu_busy);
7689
7690 /**
7691 * i915_gpu_turbo_disable - disable graphics turbo
7692 *
7693 * Disable graphics turbo by resetting the max frequency and setting the
7694 * current frequency to the default.
7695 */
7696 bool i915_gpu_turbo_disable(void)
7697 {
7698 struct drm_i915_private *dev_priv;
7699 bool ret = true;
7700
7701 spin_lock_irq(&mchdev_lock);
7702 if (!i915_mch_dev) {
7703 ret = false;
7704 goto out_unlock;
7705 }
7706 dev_priv = i915_mch_dev;
7707
7708 dev_priv->ips.max_delay = dev_priv->ips.fstart;
7709
7710 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
7711 ret = false;
7712
7713 out_unlock:
7714 spin_unlock_irq(&mchdev_lock);
7715
7716 return ret;
7717 }
7718 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
7719
7720 /**
7721 * Tells the intel_ips driver that the i915 driver is now loaded, if
7722 * IPS got loaded first.
7723 *
7724 * This awkward dance is so that neither module has to depend on the
7725 * other in order for IPS to do the appropriate communication of
7726 * GPU turbo limits to i915.
7727 */
7728 static void
7729 ips_ping_for_i915_load(void)
7730 {
7731 void (*link)(void);
7732
7733 link = symbol_get(ips_link_to_i915_driver);
7734 if (link) {
7735 link();
7736 symbol_put(ips_link_to_i915_driver);
7737 }
7738 }
7739
7740 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
7741 {
7742 /* We only register the i915 ips part with intel-ips once everything is
7743 * set up, to avoid intel-ips sneaking in and reading bogus values. */
7744 spin_lock_irq(&mchdev_lock);
7745 i915_mch_dev = dev_priv;
7746 spin_unlock_irq(&mchdev_lock);
7747
7748 ips_ping_for_i915_load();
7749 }
7750
7751 void intel_gpu_ips_teardown(void)
7752 {
7753 spin_lock_irq(&mchdev_lock);
7754 i915_mch_dev = NULL;
7755 spin_unlock_irq(&mchdev_lock);
7756 }
7757
7758 static void intel_init_emon(struct drm_i915_private *dev_priv)
7759 {
7760 u32 lcfuse;
7761 u8 pxw[16];
7762 int i;
7763
7764 /* Disable to program */
7765 I915_WRITE(ECR, 0);
7766 POSTING_READ(ECR);
7767
7768 /* Program energy weights for various events */
7769 I915_WRITE(SDEW, 0x15040d00);
7770 I915_WRITE(CSIEW0, 0x007f0000);
7771 I915_WRITE(CSIEW1, 0x1e220004);
7772 I915_WRITE(CSIEW2, 0x04000004);
7773
7774 for (i = 0; i < 5; i++)
7775 I915_WRITE(PEW(i), 0);
7776 for (i = 0; i < 3; i++)
7777 I915_WRITE(DEW(i), 0);
7778
7779 /* Program P-state weights to account for frequency power adjustment */
7780 for (i = 0; i < 16; i++) {
7781 u32 pxvidfreq = I915_READ(PXVFREQ(i));
7782 unsigned long freq = intel_pxfreq(pxvidfreq);
7783 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
7784 PXVFREQ_PX_SHIFT;
7785 unsigned long val;
7786
7787 val = vid * vid;
7788 val *= (freq / 1000);
7789 val *= 255;
7790 val /= (127*127*900);
7791 if (val > 0xff)
7792 DRM_ERROR("bad pxval: %ld\n", val);
7793 pxw[i] = val;
7794 }
7795 /* Render standby states get 0 weight */
7796 pxw[14] = 0;
7797 pxw[15] = 0;
7798
7799 for (i = 0; i < 4; i++) {
7800 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
7801 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
7802 I915_WRITE(PXW(i), val);
7803 }
7804
7805 /* Adjust magic regs to magic values (more experimental results) */
7806 I915_WRITE(OGW0, 0);
7807 I915_WRITE(OGW1, 0);
7808 I915_WRITE(EG0, 0x00007f00);
7809 I915_WRITE(EG1, 0x0000000e);
7810 I915_WRITE(EG2, 0x000e0000);
7811 I915_WRITE(EG3, 0x68000300);
7812 I915_WRITE(EG4, 0x42000000);
7813 I915_WRITE(EG5, 0x00140031);
7814 I915_WRITE(EG6, 0);
7815 I915_WRITE(EG7, 0);
7816
7817 for (i = 0; i < 8; i++)
7818 I915_WRITE(PXWL(i), 0);
7819
7820 /* Enable PMON + select events */
7821 I915_WRITE(ECR, 0x80000019);
7822
7823 lcfuse = I915_READ(LCFUSE02);
7824
7825 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
7826 }
7827
7828 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7829 {
7830 /*
7831 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7832 * requirement.
7833 */
7834 if (!i915_modparams.enable_rc6) {
7835 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7836 intel_runtime_pm_get(dev_priv);
7837 }
7838
7839 mutex_lock(&dev_priv->drm.struct_mutex);
7840 mutex_lock(&dev_priv->rps.hw_lock);
7841
7842 /* Initialize RPS limits (for userspace) */
7843 if (IS_CHERRYVIEW(dev_priv))
7844 cherryview_init_gt_powersave(dev_priv);
7845 else if (IS_VALLEYVIEW(dev_priv))
7846 valleyview_init_gt_powersave(dev_priv);
7847 else if (INTEL_GEN(dev_priv) >= 6)
7848 gen6_init_rps_frequencies(dev_priv);
7849
7850 /* Derive initial user preferences/limits from the hardware limits */
7851 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
7852 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
7853
7854 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
7855 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
7856
7857 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
7858 dev_priv->rps.min_freq_softlimit =
7859 max_t(int,
7860 dev_priv->rps.efficient_freq,
7861 intel_freq_opcode(dev_priv, 450));
7862
7863 /* After setting max-softlimit, find the overclock max freq */
7864 if (IS_GEN6(dev_priv) ||
7865 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
7866 u32 params = 0;
7867
7868 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
7869 if (params & BIT(31)) { /* OC supported */
7870 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
7871 (dev_priv->rps.max_freq & 0xff) * 50,
7872 (params & 0xff) * 50);
7873 dev_priv->rps.max_freq = params & 0xff;
7874 }
7875 }
7876
7877 /* Finally allow us to boost to max by default */
7878 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
7879
7880 mutex_unlock(&dev_priv->rps.hw_lock);
7881 mutex_unlock(&dev_priv->drm.struct_mutex);
7882
7883 intel_autoenable_gt_powersave(dev_priv);
7884 }
7885
7886 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7887 {
7888 if (IS_VALLEYVIEW(dev_priv))
7889 valleyview_cleanup_gt_powersave(dev_priv);
7890
7891 if (!i915_modparams.enable_rc6)
7892 intel_runtime_pm_put(dev_priv);
7893 }
7894
7895 /**
7896 * intel_suspend_gt_powersave - suspend PM work and helper threads
7897 * @dev_priv: i915 device
7898 *
7899 * We don't want to disable RC6 or other features here, we just want
7900 * to make sure any work we've queued has finished and won't bother
7901 * us while we're suspended.
7902 */
7903 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
7904 {
7905 if (INTEL_GEN(dev_priv) < 6)
7906 return;
7907
7908 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
7909 intel_runtime_pm_put(dev_priv);
7910
7911 /* gen6_rps_idle() will be called later to disable interrupts */
7912 }
7913
7914 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
7915 {
7916 dev_priv->rps.enabled = true; /* force disabling */
7917 intel_disable_gt_powersave(dev_priv);
7918
7919 gen6_reset_rps_interrupts(dev_priv);
7920 }
7921
7922 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
7923 {
7924 if (!READ_ONCE(dev_priv->rps.enabled))
7925 return;
7926
7927 mutex_lock(&dev_priv->rps.hw_lock);
7928
7929 if (INTEL_GEN(dev_priv) >= 9) {
7930 gen9_disable_rc6(dev_priv);
7931 gen9_disable_rps(dev_priv);
7932 } else if (IS_CHERRYVIEW(dev_priv)) {
7933 cherryview_disable_rps(dev_priv);
7934 } else if (IS_VALLEYVIEW(dev_priv)) {
7935 valleyview_disable_rps(dev_priv);
7936 } else if (INTEL_GEN(dev_priv) >= 6) {
7937 gen6_disable_rps(dev_priv);
7938 } else if (IS_IRONLAKE_M(dev_priv)) {
7939 ironlake_disable_drps(dev_priv);
7940 }
7941
7942 dev_priv->rps.enabled = false;
7943 mutex_unlock(&dev_priv->rps.hw_lock);
7944 }
7945
7946 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
7947 {
7948 /* We shouldn't be disabling as we submit, so this should be less
7949 * racy than it appears!
7950 */
7951 if (READ_ONCE(dev_priv->rps.enabled))
7952 return;
7953
7954 /* Powersaving is controlled by the host when inside a VM */
7955 if (intel_vgpu_active(dev_priv))
7956 return;
7957
7958 mutex_lock(&dev_priv->rps.hw_lock);
7959
7960 if (IS_CHERRYVIEW(dev_priv)) {
7961 cherryview_enable_rps(dev_priv);
7962 } else if (IS_VALLEYVIEW(dev_priv)) {
7963 valleyview_enable_rps(dev_priv);
7964 } else if (INTEL_GEN(dev_priv) >= 9) {
7965 gen9_enable_rc6(dev_priv);
7966 gen9_enable_rps(dev_priv);
7967 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
7968 gen6_update_ring_freq(dev_priv);
7969 } else if (IS_BROADWELL(dev_priv)) {
7970 gen8_enable_rps(dev_priv);
7971 gen6_update_ring_freq(dev_priv);
7972 } else if (INTEL_GEN(dev_priv) >= 6) {
7973 gen6_enable_rps(dev_priv);
7974 gen6_update_ring_freq(dev_priv);
7975 } else if (IS_IRONLAKE_M(dev_priv)) {
7976 ironlake_enable_drps(dev_priv);
7977 intel_init_emon(dev_priv);
7978 }
7979
7980 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
7981 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
7982
7983 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
7984 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
7985
7986 dev_priv->rps.enabled = true;
7987 mutex_unlock(&dev_priv->rps.hw_lock);
7988 }
7989
7990 static void __intel_autoenable_gt_powersave(struct work_struct *work)
7991 {
7992 struct drm_i915_private *dev_priv =
7993 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
7994 struct intel_engine_cs *rcs;
7995 struct drm_i915_gem_request *req;
7996
7997 if (READ_ONCE(dev_priv->rps.enabled))
7998 goto out;
7999
8000 rcs = dev_priv->engine[RCS];
8001 if (rcs->last_retired_context)
8002 goto out;
8003
8004 if (!rcs->init_context)
8005 goto out;
8006
8007 mutex_lock(&dev_priv->drm.struct_mutex);
8008
8009 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
8010 if (IS_ERR(req))
8011 goto unlock;
8012
8013 if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
8014 rcs->init_context(req);
8015
8016 /* Mark the device busy, calling intel_enable_gt_powersave() */
8017 i915_add_request(req);
8018
8019 unlock:
8020 mutex_unlock(&dev_priv->drm.struct_mutex);
8021 out:
8022 intel_runtime_pm_put(dev_priv);
8023 }
8024
8025 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
8026 {
8027 if (READ_ONCE(dev_priv->rps.enabled))
8028 return;
8029
8030 if (IS_IRONLAKE_M(dev_priv)) {
8031 ironlake_enable_drps(dev_priv);
8032 intel_init_emon(dev_priv);
8033 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
8034 /*
8035 * PCU communication is slow and this doesn't need to be
8036 * done at any specific time, so do this out of our fast path
8037 * to make resume and init faster.
8038 *
8039 * We depend on the HW RC6 power context save/restore
8040 * mechanism when entering D3 through runtime PM suspend. So
8041 * disable RPM until RPS/RC6 is properly setup. We can only
8042 * get here via the driver load/system resume/runtime resume
8043 * paths, so the _noresume version is enough (and in case of
8044 * runtime resume it's necessary).
8045 */
8046 if (queue_delayed_work(dev_priv->wq,
8047 &dev_priv->rps.autoenable_work,
8048 round_jiffies_up_relative(HZ)))
8049 intel_runtime_pm_get_noresume(dev_priv);
8050 }
8051 }
8052
8053 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
8054 {
8055 /*
8056 * On Ibex Peak and Cougar Point, we need to disable clock
8057 * gating for the panel power sequencer or it will fail to
8058 * start up when no ports are active.
8059 */
8060 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8061 }
8062
8063 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
8064 {
8065 enum pipe pipe;
8066
8067 for_each_pipe(dev_priv, pipe) {
8068 I915_WRITE(DSPCNTR(pipe),
8069 I915_READ(DSPCNTR(pipe)) |
8070 DISPPLANE_TRICKLE_FEED_DISABLE);
8071
8072 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
8073 POSTING_READ(DSPSURF(pipe));
8074 }
8075 }
8076
8077 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
8078 {
8079 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
8080 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
8081 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
8082
8083 /*
8084 * Don't touch WM1S_LP_EN here.
8085 * Doing so could cause underruns.
8086 */
8087 }
8088
8089 static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
8090 {
8091 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8092
8093 /*
8094 * Required for FBC
8095 * WaFbcDisableDpfcClockGating:ilk
8096 */
8097 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
8098 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
8099 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
8100
8101 I915_WRITE(PCH_3DCGDIS0,
8102 MARIUNIT_CLOCK_GATE_DISABLE |
8103 SVSMUNIT_CLOCK_GATE_DISABLE);
8104 I915_WRITE(PCH_3DCGDIS1,
8105 VFMUNIT_CLOCK_GATE_DISABLE);
8106
8107 /*
8108 * According to the spec the following bits should be set in
8109 * order to enable memory self-refresh
8110 * The bit 22/21 of 0x42004
8111 * The bit 5 of 0x42020
8112 * The bit 15 of 0x45000
8113 */
8114 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8115 (I915_READ(ILK_DISPLAY_CHICKEN2) |
8116 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8117 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
8118 I915_WRITE(DISP_ARB_CTL,
8119 (I915_READ(DISP_ARB_CTL) |
8120 DISP_FBC_WM_DIS));
8121
8122 ilk_init_lp_watermarks(dev_priv);
8123
8124 /*
8125 * Based on the document from hardware guys the following bits
8126 * should be set unconditionally in order to enable FBC.
8127 * The bit 22 of 0x42000
8128 * The bit 22 of 0x42004
8129 * The bit 7,8,9 of 0x42020.
8130 */
8131 if (IS_IRONLAKE_M(dev_priv)) {
8132 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
8133 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8134 I915_READ(ILK_DISPLAY_CHICKEN1) |
8135 ILK_FBCQ_DIS);
8136 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8137 I915_READ(ILK_DISPLAY_CHICKEN2) |
8138 ILK_DPARB_GATE);
8139 }
8140
8141 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8142
8143 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8144 I915_READ(ILK_DISPLAY_CHICKEN2) |
8145 ILK_ELPIN_409_SELECT);
8146 I915_WRITE(_3D_CHICKEN2,
8147 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8148 _3D_CHICKEN2_WM_READ_PIPELINED);
8149
8150 /* WaDisableRenderCachePipelinedFlush:ilk */
8151 I915_WRITE(CACHE_MODE_0,
8152 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
8153
8154 /* WaDisable_RenderCache_OperationalFlush:ilk */
8155 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8156
8157 g4x_disable_trickle_feed(dev_priv);
8158
8159 ibx_init_clock_gating(dev_priv);
8160 }
8161
8162 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
8163 {
8164 int pipe;
8165 uint32_t val;
8166
8167 /*
8168 * On Ibex Peak and Cougar Point, we need to disable clock
8169 * gating for the panel power sequencer or it will fail to
8170 * start up when no ports are active.
8171 */
8172 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
8173 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
8174 PCH_CPUNIT_CLOCK_GATE_DISABLE);
8175 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8176 DPLS_EDP_PPS_FIX_DIS);
8177 /* The below fixes the weird display corruption, a few pixels shifted
8178 * downward, on (only) LVDS of some HP laptops with IVY.
8179 */
8180 for_each_pipe(dev_priv, pipe) {
8181 val = I915_READ(TRANS_CHICKEN2(pipe));
8182 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
8183 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8184 if (dev_priv->vbt.fdi_rx_polarity_inverted)
8185 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8186 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
8187 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
8188 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
8189 I915_WRITE(TRANS_CHICKEN2(pipe), val);
8190 }
8191 /* WADP0ClockGatingDisable */
8192 for_each_pipe(dev_priv, pipe) {
8193 I915_WRITE(TRANS_CHICKEN1(pipe),
8194 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
8195 }
8196 }
8197
8198 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
8199 {
8200 uint32_t tmp;
8201
8202 tmp = I915_READ(MCH_SSKPD);
8203 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
8204 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
8205 tmp);
8206 }
8207
8208 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
8209 {
8210 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8211
8212 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8213
8214 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8215 I915_READ(ILK_DISPLAY_CHICKEN2) |
8216 ILK_ELPIN_409_SELECT);
8217
8218 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
8219 I915_WRITE(_3D_CHICKEN,
8220 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
8221
8222 /* WaDisable_RenderCache_OperationalFlush:snb */
8223 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8224
8225 /*
8226 * BSpec recoomends 8x4 when MSAA is used,
8227 * however in practice 16x4 seems fastest.
8228 *
8229 * Note that PS/WM thread counts depend on the WIZ hashing
8230 * disable bit, which we don't touch here, but it's good
8231 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8232 */
8233 I915_WRITE(GEN6_GT_MODE,
8234 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8235
8236 ilk_init_lp_watermarks(dev_priv);
8237
8238 I915_WRITE(CACHE_MODE_0,
8239 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
8240
8241 I915_WRITE(GEN6_UCGCTL1,
8242 I915_READ(GEN6_UCGCTL1) |
8243 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
8244 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
8245
8246 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8247 * gating disable must be set. Failure to set it results in
8248 * flickering pixels due to Z write ordering failures after
8249 * some amount of runtime in the Mesa "fire" demo, and Unigine
8250 * Sanctuary and Tropics, and apparently anything else with
8251 * alpha test or pixel discard.
8252 *
8253 * According to the spec, bit 11 (RCCUNIT) must also be set,
8254 * but we didn't debug actual testcases to find it out.
8255 *
8256 * WaDisableRCCUnitClockGating:snb
8257 * WaDisableRCPBUnitClockGating:snb
8258 */
8259 I915_WRITE(GEN6_UCGCTL2,
8260 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8261 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8262
8263 /* WaStripsFansDisableFastClipPerformanceFix:snb */
8264 I915_WRITE(_3D_CHICKEN3,
8265 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
8266
8267 /*
8268 * Bspec says:
8269 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
8270 * 3DSTATE_SF number of SF output attributes is more than 16."
8271 */
8272 I915_WRITE(_3D_CHICKEN3,
8273 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
8274
8275 /*
8276 * According to the spec the following bits should be
8277 * set in order to enable memory self-refresh and fbc:
8278 * The bit21 and bit22 of 0x42000
8279 * The bit21 and bit22 of 0x42004
8280 * The bit5 and bit7 of 0x42020
8281 * The bit14 of 0x70180
8282 * The bit14 of 0x71180
8283 *
8284 * WaFbcAsynchFlipDisableFbcQueue:snb
8285 */
8286 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8287 I915_READ(ILK_DISPLAY_CHICKEN1) |
8288 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8289 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8290 I915_READ(ILK_DISPLAY_CHICKEN2) |
8291 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8292 I915_WRITE(ILK_DSPCLK_GATE_D,
8293 I915_READ(ILK_DSPCLK_GATE_D) |
8294 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
8295 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
8296
8297 g4x_disable_trickle_feed(dev_priv);
8298
8299 cpt_init_clock_gating(dev_priv);
8300
8301 gen6_check_mch_setup(dev_priv);
8302 }
8303
8304 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
8305 {
8306 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
8307
8308 /*
8309 * WaVSThreadDispatchOverride:ivb,vlv
8310 *
8311 * This actually overrides the dispatch
8312 * mode for all thread types.
8313 */
8314 reg &= ~GEN7_FF_SCHED_MASK;
8315 reg |= GEN7_FF_TS_SCHED_HW;
8316 reg |= GEN7_FF_VS_SCHED_HW;
8317 reg |= GEN7_FF_DS_SCHED_HW;
8318
8319 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
8320 }
8321
8322 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
8323 {
8324 /*
8325 * TODO: this bit should only be enabled when really needed, then
8326 * disabled when not needed anymore in order to save power.
8327 */
8328 if (HAS_PCH_LPT_LP(dev_priv))
8329 I915_WRITE(SOUTH_DSPCLK_GATE_D,
8330 I915_READ(SOUTH_DSPCLK_GATE_D) |
8331 PCH_LP_PARTITION_LEVEL_DISABLE);
8332
8333 /* WADPOClockGatingDisable:hsw */
8334 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
8335 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
8336 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
8337 }
8338
8339 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
8340 {
8341 if (HAS_PCH_LPT_LP(dev_priv)) {
8342 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
8343
8344 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
8345 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8346 }
8347 }
8348
8349 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
8350 int general_prio_credits,
8351 int high_prio_credits)
8352 {
8353 u32 misccpctl;
8354
8355 /* WaTempDisableDOPClkGating:bdw */
8356 misccpctl = I915_READ(GEN7_MISCCPCTL);
8357 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
8358
8359 I915_WRITE(GEN8_L3SQCREG1,
8360 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
8361 L3_HIGH_PRIO_CREDITS(high_prio_credits));
8362
8363 /*
8364 * Wait at least 100 clocks before re-enabling clock gating.
8365 * See the definition of L3SQCREG1 in BSpec.
8366 */
8367 POSTING_READ(GEN8_L3SQCREG1);
8368 udelay(1);
8369 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
8370 }
8371
8372 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
8373 {
8374 if (!HAS_PCH_CNP(dev_priv))
8375 return;
8376
8377 /* Wa #1181 */
8378 I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
8379 CNP_PWM_CGE_GATING_DISABLE);
8380 }
8381
8382 static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
8383 {
8384 u32 val;
8385 cnp_init_clock_gating(dev_priv);
8386
8387 /* This is not an Wa. Enable for better image quality */
8388 I915_WRITE(_3D_CHICKEN3,
8389 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
8390
8391 /* WaEnableChickenDCPR:cnl */
8392 I915_WRITE(GEN8_CHICKEN_DCPR_1,
8393 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
8394
8395 /* WaFbcWakeMemOn:cnl */
8396 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
8397 DISP_FBC_MEMORY_WAKE);
8398
8399 /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
8400 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
8401 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
8402 I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
8403 SARBUNIT_CLKGATE_DIS);
8404
8405 /* Display WA #1133: WaFbcSkipSegments:cnl */
8406 val = I915_READ(ILK_DPFC_CHICKEN);
8407 val &= ~GLK_SKIP_SEG_COUNT_MASK;
8408 val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
8409 I915_WRITE(ILK_DPFC_CHICKEN, val);
8410 }
8411
8412 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
8413 {
8414 cnp_init_clock_gating(dev_priv);
8415 gen9_init_clock_gating(dev_priv);
8416
8417 /* WaFbcNukeOnHostModify:cfl */
8418 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
8419 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8420 }
8421
8422 static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
8423 {
8424 gen9_init_clock_gating(dev_priv);
8425
8426 /* WaDisableSDEUnitClockGating:kbl */
8427 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
8428 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8429 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8430
8431 /* WaDisableGamClockGating:kbl */
8432 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
8433 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
8434 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
8435
8436 /* WaFbcNukeOnHostModify:kbl */
8437 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
8438 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8439 }
8440
8441 static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
8442 {
8443 gen9_init_clock_gating(dev_priv);
8444
8445 /* WAC6entrylatency:skl */
8446 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
8447 FBC_LLC_FULLY_OPEN);
8448
8449 /* WaFbcNukeOnHostModify:skl */
8450 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
8451 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8452 }
8453
8454 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
8455 {
8456 enum pipe pipe;
8457
8458 ilk_init_lp_watermarks(dev_priv);
8459
8460 /* WaSwitchSolVfFArbitrationPriority:bdw */
8461 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
8462
8463 /* WaPsrDPAMaskVBlankInSRD:bdw */
8464 I915_WRITE(CHICKEN_PAR1_1,
8465 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
8466
8467 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
8468 for_each_pipe(dev_priv, pipe) {
8469 I915_WRITE(CHICKEN_PIPESL_1(pipe),
8470 I915_READ(CHICKEN_PIPESL_1(pipe)) |
8471 BDW_DPRS_MASK_VBLANK_SRD);
8472 }
8473
8474 /* WaVSRefCountFullforceMissDisable:bdw */
8475 /* WaDSRefCountFullforceMissDisable:bdw */
8476 I915_WRITE(GEN7_FF_THREAD_MODE,
8477 I915_READ(GEN7_FF_THREAD_MODE) &
8478 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
8479
8480 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
8481 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
8482
8483 /* WaDisableSDEUnitClockGating:bdw */
8484 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8485 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8486
8487 /* WaProgramL3SqcReg1Default:bdw */
8488 gen8_set_l3sqc_credits(dev_priv, 30, 2);
8489
8490 /*
8491 * WaGttCachingOffByDefault:bdw
8492 * GTT cache may not work with big pages, so if those
8493 * are ever enabled GTT cache may need to be disabled.
8494 */
8495 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
8496
8497 /* WaKVMNotificationOnConfigChange:bdw */
8498 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
8499 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
8500
8501 lpt_init_clock_gating(dev_priv);
8502
8503 /* WaDisableDopClockGating:bdw
8504 *
8505 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
8506 * clock gating.
8507 */
8508 I915_WRITE(GEN6_UCGCTL1,
8509 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
8510 }
8511
8512 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
8513 {
8514 ilk_init_lp_watermarks(dev_priv);
8515
8516 /* L3 caching of data atomics doesn't work -- disable it. */
8517 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
8518 I915_WRITE(HSW_ROW_CHICKEN3,
8519 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
8520
8521 /* This is required by WaCatErrorRejectionIssue:hsw */
8522 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8523 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8524 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8525
8526 /* WaVSRefCountFullforceMissDisable:hsw */
8527 I915_WRITE(GEN7_FF_THREAD_MODE,
8528 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
8529
8530 /* WaDisable_RenderCache_OperationalFlush:hsw */
8531 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8532
8533 /* enable HiZ Raw Stall Optimization */
8534 I915_WRITE(CACHE_MODE_0_GEN7,
8535 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
8536
8537 /* WaDisable4x2SubspanOptimization:hsw */
8538 I915_WRITE(CACHE_MODE_1,
8539 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8540
8541 /*
8542 * BSpec recommends 8x4 when MSAA is used,
8543 * however in practice 16x4 seems fastest.
8544 *
8545 * Note that PS/WM thread counts depend on the WIZ hashing
8546 * disable bit, which we don't touch here, but it's good
8547 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8548 */
8549 I915_WRITE(GEN7_GT_MODE,
8550 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8551
8552 /* WaSampleCChickenBitEnable:hsw */
8553 I915_WRITE(HALF_SLICE_CHICKEN3,
8554 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
8555
8556 /* WaSwitchSolVfFArbitrationPriority:hsw */
8557 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
8558
8559 /* WaRsPkgCStateDisplayPMReq:hsw */
8560 I915_WRITE(CHICKEN_PAR1_1,
8561 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
8562
8563 lpt_init_clock_gating(dev_priv);
8564 }
8565
8566 static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
8567 {
8568 uint32_t snpcr;
8569
8570 ilk_init_lp_watermarks(dev_priv);
8571
8572 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
8573
8574 /* WaDisableEarlyCull:ivb */
8575 I915_WRITE(_3D_CHICKEN3,
8576 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
8577
8578 /* WaDisableBackToBackFlipFix:ivb */
8579 I915_WRITE(IVB_CHICKEN3,
8580 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8581 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8582
8583 /* WaDisablePSDDualDispatchEnable:ivb */
8584 if (IS_IVB_GT1(dev_priv))
8585 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
8586 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
8587
8588 /* WaDisable_RenderCache_OperationalFlush:ivb */
8589 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8590
8591 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
8592 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8593 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8594
8595 /* WaApplyL3ControlAndL3ChickenMode:ivb */
8596 I915_WRITE(GEN7_L3CNTLREG1,
8597 GEN7_WA_FOR_GEN7_L3_CONTROL);
8598 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8599 GEN7_WA_L3_CHICKEN_MODE);
8600 if (IS_IVB_GT1(dev_priv))
8601 I915_WRITE(GEN7_ROW_CHICKEN2,
8602 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8603 else {
8604 /* must write both registers */
8605 I915_WRITE(GEN7_ROW_CHICKEN2,
8606 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8607 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
8608 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8609 }
8610
8611 /* WaForceL3Serialization:ivb */
8612 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
8613 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
8614
8615 /*
8616 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8617 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
8618 */
8619 I915_WRITE(GEN6_UCGCTL2,
8620 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8621
8622 /* This is required by WaCatErrorRejectionIssue:ivb */
8623 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8624 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8625 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8626
8627 g4x_disable_trickle_feed(dev_priv);
8628
8629 gen7_setup_fixed_func_scheduler(dev_priv);
8630
8631 if (0) { /* causes HiZ corruption on ivb:gt1 */
8632 /* enable HiZ Raw Stall Optimization */
8633 I915_WRITE(CACHE_MODE_0_GEN7,
8634 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
8635 }
8636
8637 /* WaDisable4x2SubspanOptimization:ivb */
8638 I915_WRITE(CACHE_MODE_1,
8639 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8640
8641 /*
8642 * BSpec recommends 8x4 when MSAA is used,
8643 * however in practice 16x4 seems fastest.
8644 *
8645 * Note that PS/WM thread counts depend on the WIZ hashing
8646 * disable bit, which we don't touch here, but it's good
8647 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8648 */
8649 I915_WRITE(GEN7_GT_MODE,
8650 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8651
8652 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
8653 snpcr &= ~GEN6_MBC_SNPCR_MASK;
8654 snpcr |= GEN6_MBC_SNPCR_MED;
8655 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
8656
8657 if (!HAS_PCH_NOP(dev_priv))
8658 cpt_init_clock_gating(dev_priv);
8659
8660 gen6_check_mch_setup(dev_priv);
8661 }
8662
8663 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
8664 {
8665 /* WaDisableEarlyCull:vlv */
8666 I915_WRITE(_3D_CHICKEN3,
8667 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
8668
8669 /* WaDisableBackToBackFlipFix:vlv */
8670 I915_WRITE(IVB_CHICKEN3,
8671 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8672 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8673
8674 /* WaPsdDispatchEnable:vlv */
8675 /* WaDisablePSDDualDispatchEnable:vlv */
8676 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
8677 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
8678 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
8679
8680 /* WaDisable_RenderCache_OperationalFlush:vlv */
8681 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8682
8683 /* WaForceL3Serialization:vlv */
8684 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
8685 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
8686
8687 /* WaDisableDopClockGating:vlv */
8688 I915_WRITE(GEN7_ROW_CHICKEN2,
8689 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8690
8691 /* This is required by WaCatErrorRejectionIssue:vlv */
8692 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8693 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8694 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8695
8696 gen7_setup_fixed_func_scheduler(dev_priv);
8697
8698 /*
8699 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8700 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
8701 */
8702 I915_WRITE(GEN6_UCGCTL2,
8703 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8704
8705 /* WaDisableL3Bank2xClockGate:vlv
8706 * Disabling L3 clock gating- MMIO 940c[25] = 1
8707 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
8708 I915_WRITE(GEN7_UCGCTL4,
8709 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
8710
8711 /*
8712 * BSpec says this must be set, even though
8713 * WaDisable4x2SubspanOptimization isn't listed for VLV.
8714 */
8715 I915_WRITE(CACHE_MODE_1,
8716 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8717
8718 /*
8719 * BSpec recommends 8x4 when MSAA is used,
8720 * however in practice 16x4 seems fastest.
8721 *
8722 * Note that PS/WM thread counts depend on the WIZ hashing
8723 * disable bit, which we don't touch here, but it's good
8724 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8725 */
8726 I915_WRITE(GEN7_GT_MODE,
8727 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8728
8729 /*
8730 * WaIncreaseL3CreditsForVLVB0:vlv
8731 * This is the hardware default actually.
8732 */
8733 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
8734
8735 /*
8736 * WaDisableVLVClockGating_VBIIssue:vlv
8737 * Disable clock gating on th GCFG unit to prevent a delay
8738 * in the reporting of vblank events.
8739 */
8740 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
8741 }
8742
8743 static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
8744 {
8745 /* WaVSRefCountFullforceMissDisable:chv */
8746 /* WaDSRefCountFullforceMissDisable:chv */
8747 I915_WRITE(GEN7_FF_THREAD_MODE,
8748 I915_READ(GEN7_FF_THREAD_MODE) &
8749 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
8750
8751 /* WaDisableSemaphoreAndSyncFlipWait:chv */
8752 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
8753 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
8754
8755 /* WaDisableCSUnitClockGating:chv */
8756 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
8757 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
8758
8759 /* WaDisableSDEUnitClockGating:chv */
8760 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8761 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8762
8763 /*
8764 * WaProgramL3SqcReg1Default:chv
8765 * See gfxspecs/Related Documents/Performance Guide/
8766 * LSQC Setting Recommendations.
8767 */
8768 gen8_set_l3sqc_credits(dev_priv, 38, 2);
8769
8770 /*
8771 * GTT cache may not work with big pages, so if those
8772 * are ever enabled GTT cache may need to be disabled.
8773 */
8774 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
8775 }
8776
8777 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
8778 {
8779 uint32_t dspclk_gate;
8780
8781 I915_WRITE(RENCLK_GATE_D1, 0);
8782 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8783 GS_UNIT_CLOCK_GATE_DISABLE |
8784 CL_UNIT_CLOCK_GATE_DISABLE);
8785 I915_WRITE(RAMCLK_GATE_D, 0);
8786 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8787 OVRUNIT_CLOCK_GATE_DISABLE |
8788 OVCUNIT_CLOCK_GATE_DISABLE;
8789 if (IS_GM45(dev_priv))
8790 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8791 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8792
8793 /* WaDisableRenderCachePipelinedFlush */
8794 I915_WRITE(CACHE_MODE_0,
8795 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
8796
8797 /* WaDisable_RenderCache_OperationalFlush:g4x */
8798 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8799
8800 g4x_disable_trickle_feed(dev_priv);
8801 }
8802
8803 static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
8804 {
8805 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8806 I915_WRITE(RENCLK_GATE_D2, 0);
8807 I915_WRITE(DSPCLK_GATE_D, 0);
8808 I915_WRITE(RAMCLK_GATE_D, 0);
8809 I915_WRITE16(DEUC, 0);
8810 I915_WRITE(MI_ARB_STATE,
8811 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8812
8813 /* WaDisable_RenderCache_OperationalFlush:gen4 */
8814 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8815 }
8816
8817 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
8818 {
8819 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8820 I965_RCC_CLOCK_GATE_DISABLE |
8821 I965_RCPB_CLOCK_GATE_DISABLE |
8822 I965_ISC_CLOCK_GATE_DISABLE |
8823 I965_FBC_CLOCK_GATE_DISABLE);
8824 I915_WRITE(RENCLK_GATE_D2, 0);
8825 I915_WRITE(MI_ARB_STATE,
8826 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8827
8828 /* WaDisable_RenderCache_OperationalFlush:gen4 */
8829 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8830 }
8831
8832 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
8833 {
8834 u32 dstate = I915_READ(D_STATE);
8835
8836 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8837 DSTATE_DOT_CLOCK_GATING;
8838 I915_WRITE(D_STATE, dstate);
8839
8840 if (IS_PINEVIEW(dev_priv))
8841 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
8842
8843 /* IIR "flip pending" means done if this bit is set */
8844 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
8845
8846 /* interrupts should cause a wake up from C3 */
8847 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
8848
8849 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
8850 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
8851
8852 I915_WRITE(MI_ARB_STATE,
8853 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8854 }
8855
8856 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
8857 {
8858 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8859
8860 /* interrupts should cause a wake up from C3 */
8861 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
8862 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
8863
8864 I915_WRITE(MEM_MODE,
8865 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
8866 }
8867
8868 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
8869 {
8870 I915_WRITE(MEM_MODE,
8871 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
8872 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
8873 }
8874
8875 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
8876 {
8877 dev_priv->display.init_clock_gating(dev_priv);
8878 }
8879
8880 void intel_suspend_hw(struct drm_i915_private *dev_priv)
8881 {
8882 if (HAS_PCH_LPT(dev_priv))
8883 lpt_suspend_hw(dev_priv);
8884 }
8885
8886 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
8887 {
8888 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
8889 }
8890
8891 /**
8892 * intel_init_clock_gating_hooks - setup the clock gating hooks
8893 * @dev_priv: device private
8894 *
8895 * Setup the hooks that configure which clocks of a given platform can be
8896 * gated and also apply various GT and display specific workarounds for these
8897 * platforms. Note that some GT specific workarounds are applied separately
8898 * when GPU contexts or batchbuffers start their execution.
8899 */
8900 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
8901 {
8902 if (IS_CANNONLAKE(dev_priv))
8903 dev_priv->display.init_clock_gating = cnl_init_clock_gating;
8904 else if (IS_COFFEELAKE(dev_priv))
8905 dev_priv->display.init_clock_gating = cfl_init_clock_gating;
8906 else if (IS_SKYLAKE(dev_priv))
8907 dev_priv->display.init_clock_gating = skl_init_clock_gating;
8908 else if (IS_KABYLAKE(dev_priv))
8909 dev_priv->display.init_clock_gating = kbl_init_clock_gating;
8910 else if (IS_BROXTON(dev_priv))
8911 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
8912 else if (IS_GEMINILAKE(dev_priv))
8913 dev_priv->display.init_clock_gating = glk_init_clock_gating;
8914 else if (IS_BROADWELL(dev_priv))
8915 dev_priv->display.init_clock_gating = bdw_init_clock_gating;
8916 else if (IS_CHERRYVIEW(dev_priv))
8917 dev_priv->display.init_clock_gating = chv_init_clock_gating;
8918 else if (IS_HASWELL(dev_priv))
8919 dev_priv->display.init_clock_gating = hsw_init_clock_gating;
8920 else if (IS_IVYBRIDGE(dev_priv))
8921 dev_priv->display.init_clock_gating = ivb_init_clock_gating;
8922 else if (IS_VALLEYVIEW(dev_priv))
8923 dev_priv->display.init_clock_gating = vlv_init_clock_gating;
8924 else if (IS_GEN6(dev_priv))
8925 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
8926 else if (IS_GEN5(dev_priv))
8927 dev_priv->display.init_clock_gating = ilk_init_clock_gating;
8928 else if (IS_G4X(dev_priv))
8929 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
8930 else if (IS_I965GM(dev_priv))
8931 dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
8932 else if (IS_I965G(dev_priv))
8933 dev_priv->display.init_clock_gating = i965g_init_clock_gating;
8934 else if (IS_GEN3(dev_priv))
8935 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8936 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
8937 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8938 else if (IS_GEN2(dev_priv))
8939 dev_priv->display.init_clock_gating = i830_init_clock_gating;
8940 else {
8941 MISSING_CASE(INTEL_DEVID(dev_priv));
8942 dev_priv->display.init_clock_gating = nop_init_clock_gating;
8943 }
8944 }
8945
8946 /* Set up chip specific power management-related functions */
8947 void intel_init_pm(struct drm_i915_private *dev_priv)
8948 {
8949 intel_fbc_init(dev_priv);
8950
8951 /* For cxsr */
8952 if (IS_PINEVIEW(dev_priv))
8953 i915_pineview_get_mem_freq(dev_priv);
8954 else if (IS_GEN5(dev_priv))
8955 i915_ironlake_get_mem_freq(dev_priv);
8956
8957 /* For FIFO watermark updates */
8958 if (INTEL_GEN(dev_priv) >= 9) {
8959 skl_setup_wm_latency(dev_priv);
8960 dev_priv->display.initial_watermarks = skl_initial_wm;
8961 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
8962 dev_priv->display.compute_global_watermarks = skl_compute_wm;
8963 } else if (HAS_PCH_SPLIT(dev_priv)) {
8964 ilk_setup_wm_latency(dev_priv);
8965
8966 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
8967 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
8968 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
8969 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
8970 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
8971 dev_priv->display.compute_intermediate_wm =
8972 ilk_compute_intermediate_wm;
8973 dev_priv->display.initial_watermarks =
8974 ilk_initial_watermarks;
8975 dev_priv->display.optimize_watermarks =
8976 ilk_optimize_watermarks;
8977 } else {
8978 DRM_DEBUG_KMS("Failed to read display plane latency. "
8979 "Disable CxSR\n");
8980 }
8981 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8982 vlv_setup_wm_latency(dev_priv);
8983 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
8984 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
8985 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
8986 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
8987 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
8988 } else if (IS_G4X(dev_priv)) {
8989 g4x_setup_wm_latency(dev_priv);
8990 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
8991 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
8992 dev_priv->display.initial_watermarks = g4x_initial_watermarks;
8993 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
8994 } else if (IS_PINEVIEW(dev_priv)) {
8995 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
8996 dev_priv->is_ddr3,
8997 dev_priv->fsb_freq,
8998 dev_priv->mem_freq)) {
8999 DRM_INFO("failed to find known CxSR latency "
9000 "(found ddr%s fsb freq %d, mem freq %d), "
9001 "disabling CxSR\n",
9002 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9003 dev_priv->fsb_freq, dev_priv->mem_freq);
9004 /* Disable CxSR and never update its watermark again */
9005 intel_set_memory_cxsr(dev_priv, false);
9006 dev_priv->display.update_wm = NULL;
9007 } else
9008 dev_priv->display.update_wm = pineview_update_wm;
9009 } else if (IS_GEN4(dev_priv)) {
9010 dev_priv->display.update_wm = i965_update_wm;
9011 } else if (IS_GEN3(dev_priv)) {
9012 dev_priv->display.update_wm = i9xx_update_wm;
9013 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9014 } else if (IS_GEN2(dev_priv)) {
9015 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
9016 dev_priv->display.update_wm = i845_update_wm;
9017 dev_priv->display.get_fifo_size = i845_get_fifo_size;
9018 } else {
9019 dev_priv->display.update_wm = i9xx_update_wm;
9020 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9021 }
9022 } else {
9023 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
9024 }
9025 }
9026
9027 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
9028 {
9029 uint32_t flags =
9030 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
9031
9032 switch (flags) {
9033 case GEN6_PCODE_SUCCESS:
9034 return 0;
9035 case GEN6_PCODE_UNIMPLEMENTED_CMD:
9036 return -ENODEV;
9037 case GEN6_PCODE_ILLEGAL_CMD:
9038 return -ENXIO;
9039 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
9040 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
9041 return -EOVERFLOW;
9042 case GEN6_PCODE_TIMEOUT:
9043 return -ETIMEDOUT;
9044 default:
9045 MISSING_CASE(flags);
9046 return 0;
9047 }
9048 }
9049
9050 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
9051 {
9052 uint32_t flags =
9053 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
9054
9055 switch (flags) {
9056 case GEN6_PCODE_SUCCESS:
9057 return 0;
9058 case GEN6_PCODE_ILLEGAL_CMD:
9059 return -ENXIO;
9060 case GEN7_PCODE_TIMEOUT:
9061 return -ETIMEDOUT;
9062 case GEN7_PCODE_ILLEGAL_DATA:
9063 return -EINVAL;
9064 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
9065 return -EOVERFLOW;
9066 default:
9067 MISSING_CASE(flags);
9068 return 0;
9069 }
9070 }
9071
9072 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
9073 {
9074 int status;
9075
9076 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
9077
9078 /* GEN6_PCODE_* are outside of the forcewake domain, we can
9079 * use te fw I915_READ variants to reduce the amount of work
9080 * required when reading/writing.
9081 */
9082
9083 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
9084 DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
9085 mbox, __builtin_return_address(0));
9086 return -EAGAIN;
9087 }
9088
9089 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
9090 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
9091 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
9092
9093 if (__intel_wait_for_register_fw(dev_priv,
9094 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
9095 500, 0, NULL)) {
9096 DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
9097 mbox, __builtin_return_address(0));
9098 return -ETIMEDOUT;
9099 }
9100
9101 *val = I915_READ_FW(GEN6_PCODE_DATA);
9102 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
9103
9104 if (INTEL_GEN(dev_priv) > 6)
9105 status = gen7_check_mailbox_status(dev_priv);
9106 else
9107 status = gen6_check_mailbox_status(dev_priv);
9108
9109 if (status) {
9110 DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
9111 mbox, __builtin_return_address(0), status);
9112 return status;
9113 }
9114
9115 return 0;
9116 }
9117
9118 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
9119 u32 mbox, u32 val)
9120 {
9121 int status;
9122
9123 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
9124
9125 /* GEN6_PCODE_* are outside of the forcewake domain, we can
9126 * use te fw I915_READ variants to reduce the amount of work
9127 * required when reading/writing.
9128 */
9129
9130 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
9131 DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
9132 val, mbox, __builtin_return_address(0));
9133 return -EAGAIN;
9134 }
9135
9136 I915_WRITE_FW(GEN6_PCODE_DATA, val);
9137 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
9138 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
9139
9140 if (__intel_wait_for_register_fw(dev_priv,
9141 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
9142 500, 0, NULL)) {
9143 DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
9144 val, mbox, __builtin_return_address(0));
9145 return -ETIMEDOUT;
9146 }
9147
9148 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
9149
9150 if (INTEL_GEN(dev_priv) > 6)
9151 status = gen7_check_mailbox_status(dev_priv);
9152 else
9153 status = gen6_check_mailbox_status(dev_priv);
9154
9155 if (status) {
9156 DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
9157 val, mbox, __builtin_return_address(0), status);
9158 return status;
9159 }
9160
9161 return 0;
9162 }
9163
9164 static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
9165 u32 request, u32 reply_mask, u32 reply,
9166 u32 *status)
9167 {
9168 u32 val = request;
9169
9170 *status = sandybridge_pcode_read(dev_priv, mbox, &val);
9171
9172 return *status || ((val & reply_mask) == reply);
9173 }
9174
9175 /**
9176 * skl_pcode_request - send PCODE request until acknowledgment
9177 * @dev_priv: device private
9178 * @mbox: PCODE mailbox ID the request is targeted for
9179 * @request: request ID
9180 * @reply_mask: mask used to check for request acknowledgment
9181 * @reply: value used to check for request acknowledgment
9182 * @timeout_base_ms: timeout for polling with preemption enabled
9183 *
9184 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
9185 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
9186 * The request is acknowledged once the PCODE reply dword equals @reply after
9187 * applying @reply_mask. Polling is first attempted with preemption enabled
9188 * for @timeout_base_ms and if this times out for another 50 ms with
9189 * preemption disabled.
9190 *
9191 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
9192 * other error as reported by PCODE.
9193 */
9194 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
9195 u32 reply_mask, u32 reply, int timeout_base_ms)
9196 {
9197 u32 status;
9198 int ret;
9199
9200 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
9201
9202 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
9203 &status)
9204
9205 /*
9206 * Prime the PCODE by doing a request first. Normally it guarantees
9207 * that a subsequent request, at most @timeout_base_ms later, succeeds.
9208 * _wait_for() doesn't guarantee when its passed condition is evaluated
9209 * first, so send the first request explicitly.
9210 */
9211 if (COND) {
9212 ret = 0;
9213 goto out;
9214 }
9215 ret = _wait_for(COND, timeout_base_ms * 1000, 10);
9216 if (!ret)
9217 goto out;
9218
9219 /*
9220 * The above can time out if the number of requests was low (2 in the
9221 * worst case) _and_ PCODE was busy for some reason even after a
9222 * (queued) request and @timeout_base_ms delay. As a workaround retry
9223 * the poll with preemption disabled to maximize the number of
9224 * requests. Increase the timeout from @timeout_base_ms to 50ms to
9225 * account for interrupts that could reduce the number of these
9226 * requests, and for any quirks of the PCODE firmware that delays
9227 * the request completion.
9228 */
9229 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
9230 WARN_ON_ONCE(timeout_base_ms > 3);
9231 preempt_disable();
9232 ret = wait_for_atomic(COND, 50);
9233 preempt_enable();
9234
9235 out:
9236 return ret ? ret : status;
9237 #undef COND
9238 }
9239
9240 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
9241 {
9242 /*
9243 * N = val - 0xb7
9244 * Slow = Fast = GPLL ref * N
9245 */
9246 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
9247 }
9248
9249 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
9250 {
9251 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
9252 }
9253
9254 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
9255 {
9256 /*
9257 * N = val / 2
9258 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
9259 */
9260 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
9261 }
9262
9263 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
9264 {
9265 /* CHV needs even values */
9266 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
9267 }
9268
9269 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
9270 {
9271 if (INTEL_GEN(dev_priv) >= 9)
9272 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
9273 GEN9_FREQ_SCALER);
9274 else if (IS_CHERRYVIEW(dev_priv))
9275 return chv_gpu_freq(dev_priv, val);
9276 else if (IS_VALLEYVIEW(dev_priv))
9277 return byt_gpu_freq(dev_priv, val);
9278 else
9279 return val * GT_FREQUENCY_MULTIPLIER;
9280 }
9281
9282 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
9283 {
9284 if (INTEL_GEN(dev_priv) >= 9)
9285 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
9286 GT_FREQUENCY_MULTIPLIER);
9287 else if (IS_CHERRYVIEW(dev_priv))
9288 return chv_freq_opcode(dev_priv, val);
9289 else if (IS_VALLEYVIEW(dev_priv))
9290 return byt_freq_opcode(dev_priv, val);
9291 else
9292 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
9293 }
9294
9295 void intel_pm_setup(struct drm_i915_private *dev_priv)
9296 {
9297 mutex_init(&dev_priv->rps.hw_lock);
9298
9299 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
9300 __intel_autoenable_gt_powersave);
9301 atomic_set(&dev_priv->rps.num_waiters, 0);
9302
9303 dev_priv->pm.suspended = false;
9304 atomic_set(&dev_priv->pm.wakeref_count, 0);
9305 }
9306
9307 static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
9308 const i915_reg_t reg)
9309 {
9310 u32 lower, upper, tmp;
9311 int loop = 2;
9312
9313 /* The register accessed do not need forcewake. We borrow
9314 * uncore lock to prevent concurrent access to range reg.
9315 */
9316 spin_lock_irq(&dev_priv->uncore.lock);
9317
9318 /* vlv and chv residency counters are 40 bits in width.
9319 * With a control bit, we can choose between upper or lower
9320 * 32bit window into this counter.
9321 *
9322 * Although we always use the counter in high-range mode elsewhere,
9323 * userspace may attempt to read the value before rc6 is initialised,
9324 * before we have set the default VLV_COUNTER_CONTROL value. So always
9325 * set the high bit to be safe.
9326 */
9327 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9328 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9329 upper = I915_READ_FW(reg);
9330 do {
9331 tmp = upper;
9332
9333 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9334 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
9335 lower = I915_READ_FW(reg);
9336
9337 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9338 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9339 upper = I915_READ_FW(reg);
9340 } while (upper != tmp && --loop);
9341
9342 /* Everywhere else we always use VLV_COUNTER_CONTROL with the
9343 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
9344 * now.
9345 */
9346
9347 spin_unlock_irq(&dev_priv->uncore.lock);
9348
9349 return lower | (u64)upper << 8;
9350 }
9351
9352 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
9353 const i915_reg_t reg)
9354 {
9355 u64 time_hw, units, div;
9356
9357 if (!intel_enable_rc6())
9358 return 0;
9359
9360 intel_runtime_pm_get(dev_priv);
9361
9362 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
9363 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9364 units = 1000;
9365 div = dev_priv->czclk_freq;
9366
9367 time_hw = vlv_residency_raw(dev_priv, reg);
9368 } else if (IS_GEN9_LP(dev_priv)) {
9369 units = 1000;
9370 div = 1200; /* 833.33ns */
9371
9372 time_hw = I915_READ(reg);
9373 } else {
9374 units = 128000; /* 1.28us */
9375 div = 100000;
9376
9377 time_hw = I915_READ(reg);
9378 }
9379
9380 intel_runtime_pm_put(dev_priv);
9381 return DIV_ROUND_UP_ULL(time_hw * units, div);
9382 }