]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/i915/intel_pm.c
drm/i915: SAGV is not SKL-only, so rename a few things
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / i915 / intel_pm.c
CommitLineData
85208be0
ED
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
2b4e57bd 28#include <linux/cpufreq.h>
9c2f7a9d 29#include <drm/drm_plane_helper.h>
85208be0
ED
30#include "i915_drv.h"
31#include "intel_drv.h"
eb48eb00
DV
32#include "../../../platform/x86/intel_ips.h"
33#include <linux/module.h>
85208be0 34
dc39fff7 35/**
18afd443
JN
36 * DOC: RC6
37 *
dc39fff7
BW
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
42 *
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
46 *
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
53 */
54#define INTEL_RC6_ENABLE (1<<0)
55#define INTEL_RC6p_ENABLE (1<<1)
56#define INTEL_RC6pp_ENABLE (1<<2)
57
b033bb6d 58static void gen9_init_clock_gating(struct drm_device *dev)
a82abe43 59{
32608ca2
ID
60 struct drm_i915_private *dev_priv = dev->dev_private;
61
b033bb6d 62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
dc00b6a0
DV
63 I915_WRITE(CHICKEN_PAR1_1,
64 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65
b033bb6d
MK
66 I915_WRITE(GEN8_CONFIG0,
67 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
590e8ff0
MK
68
69 /* WaEnableChickenDCPR:skl,bxt,kbl */
70 I915_WRITE(GEN8_CHICKEN_DCPR_1,
71 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
0f78dee6
MK
72
73 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
303d4ea5
MK
74 /* WaFbcWakeMemOn:skl,bxt,kbl */
75 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
76 DISP_FBC_WM_DIS |
77 DISP_FBC_MEMORY_WAKE);
d1b4eefd
MK
78
79 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
80 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
81 ILK_DPFC_DISABLE_DUMMY0);
b033bb6d
MK
82}
83
84static void bxt_init_clock_gating(struct drm_device *dev)
85{
fac5e23e 86 struct drm_i915_private *dev_priv = to_i915(dev);
b033bb6d
MK
87
88 gen9_init_clock_gating(dev);
89
a7546159
NH
90 /* WaDisableSDEUnitClockGating:bxt */
91 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
92 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
93
32608ca2
ID
94 /*
95 * FIXME:
868434c5 96 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
32608ca2 97 */
32608ca2 98 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
868434c5 99 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
d965e7ac
ID
100
101 /*
102 * Wa: Backlight PWM may stop in the asserted state, causing backlight
103 * to stay fully on.
104 */
105 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
106 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
107 PWM1_GATING_DIS | PWM2_GATING_DIS);
a82abe43
ID
108}
109
c921aba8
DV
110static void i915_pineview_get_mem_freq(struct drm_device *dev)
111{
fac5e23e 112 struct drm_i915_private *dev_priv = to_i915(dev);
c921aba8
DV
113 u32 tmp;
114
115 tmp = I915_READ(CLKCFG);
116
117 switch (tmp & CLKCFG_FSB_MASK) {
118 case CLKCFG_FSB_533:
119 dev_priv->fsb_freq = 533; /* 133*4 */
120 break;
121 case CLKCFG_FSB_800:
122 dev_priv->fsb_freq = 800; /* 200*4 */
123 break;
124 case CLKCFG_FSB_667:
125 dev_priv->fsb_freq = 667; /* 167*4 */
126 break;
127 case CLKCFG_FSB_400:
128 dev_priv->fsb_freq = 400; /* 100*4 */
129 break;
130 }
131
132 switch (tmp & CLKCFG_MEM_MASK) {
133 case CLKCFG_MEM_533:
134 dev_priv->mem_freq = 533;
135 break;
136 case CLKCFG_MEM_667:
137 dev_priv->mem_freq = 667;
138 break;
139 case CLKCFG_MEM_800:
140 dev_priv->mem_freq = 800;
141 break;
142 }
143
144 /* detect pineview DDR3 setting */
145 tmp = I915_READ(CSHRDDR3CTL);
146 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
147}
148
149static void i915_ironlake_get_mem_freq(struct drm_device *dev)
150{
fac5e23e 151 struct drm_i915_private *dev_priv = to_i915(dev);
c921aba8
DV
152 u16 ddrpll, csipll;
153
154 ddrpll = I915_READ16(DDRMPLL1);
155 csipll = I915_READ16(CSIPLL0);
156
157 switch (ddrpll & 0xff) {
158 case 0xc:
159 dev_priv->mem_freq = 800;
160 break;
161 case 0x10:
162 dev_priv->mem_freq = 1066;
163 break;
164 case 0x14:
165 dev_priv->mem_freq = 1333;
166 break;
167 case 0x18:
168 dev_priv->mem_freq = 1600;
169 break;
170 default:
171 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
172 ddrpll & 0xff);
173 dev_priv->mem_freq = 0;
174 break;
175 }
176
20e4d407 177 dev_priv->ips.r_t = dev_priv->mem_freq;
c921aba8
DV
178
179 switch (csipll & 0x3ff) {
180 case 0x00c:
181 dev_priv->fsb_freq = 3200;
182 break;
183 case 0x00e:
184 dev_priv->fsb_freq = 3733;
185 break;
186 case 0x010:
187 dev_priv->fsb_freq = 4266;
188 break;
189 case 0x012:
190 dev_priv->fsb_freq = 4800;
191 break;
192 case 0x014:
193 dev_priv->fsb_freq = 5333;
194 break;
195 case 0x016:
196 dev_priv->fsb_freq = 5866;
197 break;
198 case 0x018:
199 dev_priv->fsb_freq = 6400;
200 break;
201 default:
202 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
203 csipll & 0x3ff);
204 dev_priv->fsb_freq = 0;
205 break;
206 }
207
208 if (dev_priv->fsb_freq == 3200) {
20e4d407 209 dev_priv->ips.c_m = 0;
c921aba8 210 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
20e4d407 211 dev_priv->ips.c_m = 1;
c921aba8 212 } else {
20e4d407 213 dev_priv->ips.c_m = 2;
c921aba8
DV
214 }
215}
216
b445e3b0
ED
217static const struct cxsr_latency cxsr_latency_table[] = {
218 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
219 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
220 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
221 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
222 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
223
224 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
225 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
226 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
227 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
228 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
229
230 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
231 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
232 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
233 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
234 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
235
236 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
237 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
238 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
239 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
240 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
241
242 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
243 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
244 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
245 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
246 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
247
248 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
249 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
250 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
251 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
252 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
253};
254
63c62275 255static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
b445e3b0
ED
256 int is_ddr3,
257 int fsb,
258 int mem)
259{
260 const struct cxsr_latency *latency;
261 int i;
262
263 if (fsb == 0 || mem == 0)
264 return NULL;
265
266 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
267 latency = &cxsr_latency_table[i];
268 if (is_desktop == latency->is_desktop &&
269 is_ddr3 == latency->is_ddr3 &&
270 fsb == latency->fsb_freq && mem == latency->mem_freq)
271 return latency;
272 }
273
274 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
275
276 return NULL;
277}
278
fc1ac8de
VS
279static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
280{
281 u32 val;
282
283 mutex_lock(&dev_priv->rps.hw_lock);
284
285 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
286 if (enable)
287 val &= ~FORCE_DDR_HIGH_FREQ;
288 else
289 val |= FORCE_DDR_HIGH_FREQ;
290 val &= ~FORCE_DDR_LOW_FREQ;
291 val |= FORCE_DDR_FREQ_REQ_ACK;
292 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
293
294 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
295 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
296 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
297
298 mutex_unlock(&dev_priv->rps.hw_lock);
299}
300
cfb41411
VS
301static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
302{
303 u32 val;
304
305 mutex_lock(&dev_priv->rps.hw_lock);
306
307 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
308 if (enable)
309 val |= DSP_MAXFIFO_PM5_ENABLE;
310 else
311 val &= ~DSP_MAXFIFO_PM5_ENABLE;
312 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
313
314 mutex_unlock(&dev_priv->rps.hw_lock);
315}
316
f4998963
VS
317#define FW_WM(value, plane) \
318 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
319
5209b1f4 320void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
b445e3b0 321{
91c8a326 322 struct drm_device *dev = &dev_priv->drm;
5209b1f4 323 u32 val;
b445e3b0 324
666a4537 325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5209b1f4 326 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
a7a6c498 327 POSTING_READ(FW_BLC_SELF_VLV);
852eb00d 328 dev_priv->wm.vlv.cxsr = enable;
5209b1f4
ID
329 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
330 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
a7a6c498 331 POSTING_READ(FW_BLC_SELF);
5209b1f4
ID
332 } else if (IS_PINEVIEW(dev)) {
333 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
334 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
335 I915_WRITE(DSPFW3, val);
a7a6c498 336 POSTING_READ(DSPFW3);
5209b1f4
ID
337 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
338 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
339 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
340 I915_WRITE(FW_BLC_SELF, val);
a7a6c498 341 POSTING_READ(FW_BLC_SELF);
5209b1f4 342 } else if (IS_I915GM(dev)) {
acb91359
VS
343 /*
344 * FIXME can't find a bit like this for 915G, and
345 * and yet it does have the related watermark in
346 * FW_BLC_SELF. What's going on?
347 */
5209b1f4
ID
348 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
349 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
350 I915_WRITE(INSTPM, val);
a7a6c498 351 POSTING_READ(INSTPM);
5209b1f4
ID
352 } else {
353 return;
354 }
b445e3b0 355
5209b1f4
ID
356 DRM_DEBUG_KMS("memory self-refresh is %s\n",
357 enable ? "enabled" : "disabled");
b445e3b0
ED
358}
359
fc1ac8de 360
b445e3b0
ED
361/*
362 * Latency for FIFO fetches is dependent on several factors:
363 * - memory configuration (speed, channels)
364 * - chipset
365 * - current MCH state
366 * It can be fairly high in some situations, so here we assume a fairly
367 * pessimal value. It's a tradeoff between extra memory fetches (if we
368 * set this value too high, the FIFO will fetch frequently to stay full)
369 * and power consumption (set it too low to save power and we might see
370 * FIFO underruns and display "flicker").
371 *
372 * A value of 5us seems to be a good balance; safe for very low end
373 * platforms but not overly aggressive on lower latency configs.
374 */
5aef6003 375static const int pessimal_latency_ns = 5000;
b445e3b0 376
b5004720
VS
377#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
378 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
379
380static int vlv_get_fifo_size(struct drm_device *dev,
381 enum pipe pipe, int plane)
382{
fac5e23e 383 struct drm_i915_private *dev_priv = to_i915(dev);
b5004720
VS
384 int sprite0_start, sprite1_start, size;
385
386 switch (pipe) {
387 uint32_t dsparb, dsparb2, dsparb3;
388 case PIPE_A:
389 dsparb = I915_READ(DSPARB);
390 dsparb2 = I915_READ(DSPARB2);
391 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
392 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
393 break;
394 case PIPE_B:
395 dsparb = I915_READ(DSPARB);
396 dsparb2 = I915_READ(DSPARB2);
397 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
398 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
399 break;
400 case PIPE_C:
401 dsparb2 = I915_READ(DSPARB2);
402 dsparb3 = I915_READ(DSPARB3);
403 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
404 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
405 break;
406 default:
407 return 0;
408 }
409
410 switch (plane) {
411 case 0:
412 size = sprite0_start;
413 break;
414 case 1:
415 size = sprite1_start - sprite0_start;
416 break;
417 case 2:
418 size = 512 - 1 - sprite1_start;
419 break;
420 default:
421 return 0;
422 }
423
424 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
425 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
426 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
427 size);
428
429 return size;
430}
431
1fa61106 432static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0 433{
fac5e23e 434 struct drm_i915_private *dev_priv = to_i915(dev);
b445e3b0
ED
435 uint32_t dsparb = I915_READ(DSPARB);
436 int size;
437
438 size = dsparb & 0x7f;
439 if (plane)
440 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
441
442 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
443 plane ? "B" : "A", size);
444
445 return size;
446}
447
feb56b93 448static int i830_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0 449{
fac5e23e 450 struct drm_i915_private *dev_priv = to_i915(dev);
b445e3b0
ED
451 uint32_t dsparb = I915_READ(DSPARB);
452 int size;
453
454 size = dsparb & 0x1ff;
455 if (plane)
456 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
457 size >>= 1; /* Convert to cachelines */
458
459 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
460 plane ? "B" : "A", size);
461
462 return size;
463}
464
1fa61106 465static int i845_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0 466{
fac5e23e 467 struct drm_i915_private *dev_priv = to_i915(dev);
b445e3b0
ED
468 uint32_t dsparb = I915_READ(DSPARB);
469 int size;
470
471 size = dsparb & 0x7f;
472 size >>= 2; /* Convert to cachelines */
473
474 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
475 plane ? "B" : "A",
476 size);
477
478 return size;
479}
480
b445e3b0
ED
481/* Pineview has different values for various configs */
482static const struct intel_watermark_params pineview_display_wm = {
e0f0273e
VS
483 .fifo_size = PINEVIEW_DISPLAY_FIFO,
484 .max_wm = PINEVIEW_MAX_WM,
485 .default_wm = PINEVIEW_DFT_WM,
486 .guard_size = PINEVIEW_GUARD_WM,
487 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
488};
489static const struct intel_watermark_params pineview_display_hplloff_wm = {
e0f0273e
VS
490 .fifo_size = PINEVIEW_DISPLAY_FIFO,
491 .max_wm = PINEVIEW_MAX_WM,
492 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
493 .guard_size = PINEVIEW_GUARD_WM,
494 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
495};
496static const struct intel_watermark_params pineview_cursor_wm = {
e0f0273e
VS
497 .fifo_size = PINEVIEW_CURSOR_FIFO,
498 .max_wm = PINEVIEW_CURSOR_MAX_WM,
499 .default_wm = PINEVIEW_CURSOR_DFT_WM,
500 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
501 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
502};
503static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
e0f0273e
VS
504 .fifo_size = PINEVIEW_CURSOR_FIFO,
505 .max_wm = PINEVIEW_CURSOR_MAX_WM,
506 .default_wm = PINEVIEW_CURSOR_DFT_WM,
507 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
508 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
509};
510static const struct intel_watermark_params g4x_wm_info = {
e0f0273e
VS
511 .fifo_size = G4X_FIFO_SIZE,
512 .max_wm = G4X_MAX_WM,
513 .default_wm = G4X_MAX_WM,
514 .guard_size = 2,
515 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
516};
517static const struct intel_watermark_params g4x_cursor_wm_info = {
e0f0273e
VS
518 .fifo_size = I965_CURSOR_FIFO,
519 .max_wm = I965_CURSOR_MAX_WM,
520 .default_wm = I965_CURSOR_DFT_WM,
521 .guard_size = 2,
522 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0 523};
b445e3b0 524static const struct intel_watermark_params i965_cursor_wm_info = {
e0f0273e
VS
525 .fifo_size = I965_CURSOR_FIFO,
526 .max_wm = I965_CURSOR_MAX_WM,
527 .default_wm = I965_CURSOR_DFT_WM,
528 .guard_size = 2,
529 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0
ED
530};
531static const struct intel_watermark_params i945_wm_info = {
e0f0273e
VS
532 .fifo_size = I945_FIFO_SIZE,
533 .max_wm = I915_MAX_WM,
534 .default_wm = 1,
535 .guard_size = 2,
536 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0
ED
537};
538static const struct intel_watermark_params i915_wm_info = {
e0f0273e
VS
539 .fifo_size = I915_FIFO_SIZE,
540 .max_wm = I915_MAX_WM,
541 .default_wm = 1,
542 .guard_size = 2,
543 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0 544};
9d539105 545static const struct intel_watermark_params i830_a_wm_info = {
e0f0273e
VS
546 .fifo_size = I855GM_FIFO_SIZE,
547 .max_wm = I915_MAX_WM,
548 .default_wm = 1,
549 .guard_size = 2,
550 .cacheline_size = I830_FIFO_LINE_SIZE,
b445e3b0 551};
9d539105
VS
552static const struct intel_watermark_params i830_bc_wm_info = {
553 .fifo_size = I855GM_FIFO_SIZE,
554 .max_wm = I915_MAX_WM/2,
555 .default_wm = 1,
556 .guard_size = 2,
557 .cacheline_size = I830_FIFO_LINE_SIZE,
558};
feb56b93 559static const struct intel_watermark_params i845_wm_info = {
e0f0273e
VS
560 .fifo_size = I830_FIFO_SIZE,
561 .max_wm = I915_MAX_WM,
562 .default_wm = 1,
563 .guard_size = 2,
564 .cacheline_size = I830_FIFO_LINE_SIZE,
b445e3b0
ED
565};
566
b445e3b0
ED
567/**
568 * intel_calculate_wm - calculate watermark level
569 * @clock_in_khz: pixel clock
570 * @wm: chip FIFO params
ac484963 571 * @cpp: bytes per pixel
b445e3b0
ED
572 * @latency_ns: memory latency for the platform
573 *
574 * Calculate the watermark level (the level at which the display plane will
575 * start fetching from memory again). Each chip has a different display
576 * FIFO size and allocation, so the caller needs to figure that out and pass
577 * in the correct intel_watermark_params structure.
578 *
579 * As the pixel clock runs, the FIFO will be drained at a rate that depends
580 * on the pixel size. When it reaches the watermark level, it'll start
581 * fetching FIFO line sized based chunks from memory until the FIFO fills
582 * past the watermark point. If the FIFO drains completely, a FIFO underrun
583 * will occur, and a display engine hang could result.
584 */
585static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
586 const struct intel_watermark_params *wm,
ac484963 587 int fifo_size, int cpp,
b445e3b0
ED
588 unsigned long latency_ns)
589{
590 long entries_required, wm_size;
591
592 /*
593 * Note: we need to make sure we don't overflow for various clock &
594 * latency values.
595 * clocks go from a few thousand to several hundred thousand.
596 * latency is usually a few thousand
597 */
ac484963 598 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
b445e3b0
ED
599 1000;
600 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
601
602 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
603
604 wm_size = fifo_size - (entries_required + wm->guard_size);
605
606 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
607
608 /* Don't promote wm_size to unsigned... */
609 if (wm_size > (long)wm->max_wm)
610 wm_size = wm->max_wm;
611 if (wm_size <= 0)
612 wm_size = wm->default_wm;
d6feb196
VS
613
614 /*
615 * Bspec seems to indicate that the value shouldn't be lower than
616 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
617 * Lets go for 8 which is the burst size since certain platforms
618 * already use a hardcoded 8 (which is what the spec says should be
619 * done).
620 */
621 if (wm_size <= 8)
622 wm_size = 8;
623
b445e3b0
ED
624 return wm_size;
625}
626
627static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
628{
629 struct drm_crtc *crtc, *enabled = NULL;
630
70e1e0ec 631 for_each_crtc(dev, crtc) {
3490ea5d 632 if (intel_crtc_active(crtc)) {
b445e3b0
ED
633 if (enabled)
634 return NULL;
635 enabled = crtc;
636 }
637 }
638
639 return enabled;
640}
641
46ba614c 642static void pineview_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 643{
46ba614c 644 struct drm_device *dev = unused_crtc->dev;
fac5e23e 645 struct drm_i915_private *dev_priv = to_i915(dev);
b445e3b0
ED
646 struct drm_crtc *crtc;
647 const struct cxsr_latency *latency;
648 u32 reg;
649 unsigned long wm;
650
651 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
652 dev_priv->fsb_freq, dev_priv->mem_freq);
653 if (!latency) {
654 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
5209b1f4 655 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
656 return;
657 }
658
659 crtc = single_enabled_crtc(dev);
660 if (crtc) {
7c5f93b0 661 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
ac484963 662 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
7c5f93b0 663 int clock = adjusted_mode->crtc_clock;
b445e3b0
ED
664
665 /* Display SR */
666 wm = intel_calculate_wm(clock, &pineview_display_wm,
667 pineview_display_wm.fifo_size,
ac484963 668 cpp, latency->display_sr);
b445e3b0
ED
669 reg = I915_READ(DSPFW1);
670 reg &= ~DSPFW_SR_MASK;
f4998963 671 reg |= FW_WM(wm, SR);
b445e3b0
ED
672 I915_WRITE(DSPFW1, reg);
673 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
674
675 /* cursor SR */
676 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
677 pineview_display_wm.fifo_size,
ac484963 678 cpp, latency->cursor_sr);
b445e3b0
ED
679 reg = I915_READ(DSPFW3);
680 reg &= ~DSPFW_CURSOR_SR_MASK;
f4998963 681 reg |= FW_WM(wm, CURSOR_SR);
b445e3b0
ED
682 I915_WRITE(DSPFW3, reg);
683
684 /* Display HPLL off SR */
685 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
686 pineview_display_hplloff_wm.fifo_size,
ac484963 687 cpp, latency->display_hpll_disable);
b445e3b0
ED
688 reg = I915_READ(DSPFW3);
689 reg &= ~DSPFW_HPLL_SR_MASK;
f4998963 690 reg |= FW_WM(wm, HPLL_SR);
b445e3b0
ED
691 I915_WRITE(DSPFW3, reg);
692
693 /* cursor HPLL off SR */
694 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
695 pineview_display_hplloff_wm.fifo_size,
ac484963 696 cpp, latency->cursor_hpll_disable);
b445e3b0
ED
697 reg = I915_READ(DSPFW3);
698 reg &= ~DSPFW_HPLL_CURSOR_MASK;
f4998963 699 reg |= FW_WM(wm, HPLL_CURSOR);
b445e3b0
ED
700 I915_WRITE(DSPFW3, reg);
701 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
702
5209b1f4 703 intel_set_memory_cxsr(dev_priv, true);
b445e3b0 704 } else {
5209b1f4 705 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
706 }
707}
708
709static bool g4x_compute_wm0(struct drm_device *dev,
710 int plane,
711 const struct intel_watermark_params *display,
712 int display_latency_ns,
713 const struct intel_watermark_params *cursor,
714 int cursor_latency_ns,
715 int *plane_wm,
716 int *cursor_wm)
717{
718 struct drm_crtc *crtc;
4fe8590a 719 const struct drm_display_mode *adjusted_mode;
ac484963 720 int htotal, hdisplay, clock, cpp;
b445e3b0
ED
721 int line_time_us, line_count;
722 int entries, tlb_miss;
723
724 crtc = intel_get_crtc_for_plane(dev, plane);
3490ea5d 725 if (!intel_crtc_active(crtc)) {
b445e3b0
ED
726 *cursor_wm = cursor->guard_size;
727 *plane_wm = display->guard_size;
728 return false;
729 }
730
6e3c9717 731 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 732 clock = adjusted_mode->crtc_clock;
fec8cba3 733 htotal = adjusted_mode->crtc_htotal;
6e3c9717 734 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
ac484963 735 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b445e3b0
ED
736
737 /* Use the small buffer method to calculate plane watermark */
ac484963 738 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
b445e3b0
ED
739 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
740 if (tlb_miss > 0)
741 entries += tlb_miss;
742 entries = DIV_ROUND_UP(entries, display->cacheline_size);
743 *plane_wm = entries + display->guard_size;
744 if (*plane_wm > (int)display->max_wm)
745 *plane_wm = display->max_wm;
746
747 /* Use the large buffer method to calculate cursor watermark */
922044c9 748 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0 749 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
ac484963 750 entries = line_count * crtc->cursor->state->crtc_w * cpp;
b445e3b0
ED
751 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
752 if (tlb_miss > 0)
753 entries += tlb_miss;
754 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
755 *cursor_wm = entries + cursor->guard_size;
756 if (*cursor_wm > (int)cursor->max_wm)
757 *cursor_wm = (int)cursor->max_wm;
758
759 return true;
760}
761
762/*
763 * Check the wm result.
764 *
765 * If any calculated watermark values is larger than the maximum value that
766 * can be programmed into the associated watermark register, that watermark
767 * must be disabled.
768 */
769static bool g4x_check_srwm(struct drm_device *dev,
770 int display_wm, int cursor_wm,
771 const struct intel_watermark_params *display,
772 const struct intel_watermark_params *cursor)
773{
774 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
775 display_wm, cursor_wm);
776
777 if (display_wm > display->max_wm) {
778 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
779 display_wm, display->max_wm);
780 return false;
781 }
782
783 if (cursor_wm > cursor->max_wm) {
784 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
785 cursor_wm, cursor->max_wm);
786 return false;
787 }
788
789 if (!(display_wm || cursor_wm)) {
790 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
791 return false;
792 }
793
794 return true;
795}
796
797static bool g4x_compute_srwm(struct drm_device *dev,
798 int plane,
799 int latency_ns,
800 const struct intel_watermark_params *display,
801 const struct intel_watermark_params *cursor,
802 int *display_wm, int *cursor_wm)
803{
804 struct drm_crtc *crtc;
4fe8590a 805 const struct drm_display_mode *adjusted_mode;
ac484963 806 int hdisplay, htotal, cpp, clock;
b445e3b0
ED
807 unsigned long line_time_us;
808 int line_count, line_size;
809 int small, large;
810 int entries;
811
812 if (!latency_ns) {
813 *display_wm = *cursor_wm = 0;
814 return false;
815 }
816
817 crtc = intel_get_crtc_for_plane(dev, plane);
6e3c9717 818 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 819 clock = adjusted_mode->crtc_clock;
fec8cba3 820 htotal = adjusted_mode->crtc_htotal;
6e3c9717 821 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
ac484963 822 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b445e3b0 823
922044c9 824 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0 825 line_count = (latency_ns / line_time_us + 1000) / 1000;
ac484963 826 line_size = hdisplay * cpp;
b445e3b0
ED
827
828 /* Use the minimum of the small and large buffer method for primary */
ac484963 829 small = ((clock * cpp / 1000) * latency_ns) / 1000;
b445e3b0
ED
830 large = line_count * line_size;
831
832 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
833 *display_wm = entries + display->guard_size;
834
835 /* calculate the self-refresh watermark for display cursor */
ac484963 836 entries = line_count * cpp * crtc->cursor->state->crtc_w;
b445e3b0
ED
837 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
838 *cursor_wm = entries + cursor->guard_size;
839
840 return g4x_check_srwm(dev,
841 *display_wm, *cursor_wm,
842 display, cursor);
843}
844
15665979
VS
845#define FW_WM_VLV(value, plane) \
846 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
847
0018fda1
VS
848static void vlv_write_wm_values(struct intel_crtc *crtc,
849 const struct vlv_wm_values *wm)
850{
851 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
852 enum pipe pipe = crtc->pipe;
853
854 I915_WRITE(VLV_DDL(pipe),
855 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
856 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
857 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
858 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
859
ae80152d 860 I915_WRITE(DSPFW1,
15665979
VS
861 FW_WM(wm->sr.plane, SR) |
862 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
863 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
864 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
ae80152d 865 I915_WRITE(DSPFW2,
15665979
VS
866 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
867 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
868 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
ae80152d 869 I915_WRITE(DSPFW3,
15665979 870 FW_WM(wm->sr.cursor, CURSOR_SR));
ae80152d
VS
871
872 if (IS_CHERRYVIEW(dev_priv)) {
873 I915_WRITE(DSPFW7_CHV,
15665979
VS
874 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
875 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
ae80152d 876 I915_WRITE(DSPFW8_CHV,
15665979
VS
877 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
878 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
ae80152d 879 I915_WRITE(DSPFW9_CHV,
15665979
VS
880 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
881 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
ae80152d 882 I915_WRITE(DSPHOWM,
15665979
VS
883 FW_WM(wm->sr.plane >> 9, SR_HI) |
884 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
885 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
886 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
887 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
888 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
889 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
890 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
891 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
892 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
ae80152d
VS
893 } else {
894 I915_WRITE(DSPFW7,
15665979
VS
895 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
896 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
ae80152d 897 I915_WRITE(DSPHOWM,
15665979
VS
898 FW_WM(wm->sr.plane >> 9, SR_HI) |
899 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
900 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
901 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
902 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
903 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
904 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
ae80152d
VS
905 }
906
2cb389b7
VS
907 /* zero (unused) WM1 watermarks */
908 I915_WRITE(DSPFW4, 0);
909 I915_WRITE(DSPFW5, 0);
910 I915_WRITE(DSPFW6, 0);
911 I915_WRITE(DSPHOWM1, 0);
912
ae80152d 913 POSTING_READ(DSPFW1);
0018fda1
VS
914}
915
15665979
VS
916#undef FW_WM_VLV
917
6eb1a681
VS
918enum vlv_wm_level {
919 VLV_WM_LEVEL_PM2,
920 VLV_WM_LEVEL_PM5,
921 VLV_WM_LEVEL_DDR_DVFS,
6eb1a681
VS
922};
923
262cd2e1
VS
924/* latency must be in 0.1us units. */
925static unsigned int vlv_wm_method2(unsigned int pixel_rate,
926 unsigned int pipe_htotal,
927 unsigned int horiz_pixels,
ac484963 928 unsigned int cpp,
262cd2e1
VS
929 unsigned int latency)
930{
931 unsigned int ret;
932
933 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ac484963 934 ret = (ret + 1) * horiz_pixels * cpp;
262cd2e1
VS
935 ret = DIV_ROUND_UP(ret, 64);
936
937 return ret;
938}
939
940static void vlv_setup_wm_latency(struct drm_device *dev)
941{
fac5e23e 942 struct drm_i915_private *dev_priv = to_i915(dev);
262cd2e1
VS
943
944 /* all latencies in usec */
945 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
946
58590c14
VS
947 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
948
262cd2e1
VS
949 if (IS_CHERRYVIEW(dev_priv)) {
950 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
951 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
58590c14
VS
952
953 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
262cd2e1
VS
954 }
955}
956
957static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
958 struct intel_crtc *crtc,
959 const struct intel_plane_state *state,
960 int level)
961{
962 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
ac484963 963 int clock, htotal, cpp, width, wm;
262cd2e1
VS
964
965 if (dev_priv->wm.pri_latency[level] == 0)
966 return USHRT_MAX;
967
936e71e3 968 if (!state->base.visible)
262cd2e1
VS
969 return 0;
970
ac484963 971 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
262cd2e1
VS
972 clock = crtc->config->base.adjusted_mode.crtc_clock;
973 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
974 width = crtc->config->pipe_src_w;
975 if (WARN_ON(htotal == 0))
976 htotal = 1;
977
978 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
979 /*
980 * FIXME the formula gives values that are
981 * too big for the cursor FIFO, and hence we
982 * would never be able to use cursors. For
983 * now just hardcode the watermark.
984 */
985 wm = 63;
986 } else {
ac484963 987 wm = vlv_wm_method2(clock, htotal, width, cpp,
262cd2e1
VS
988 dev_priv->wm.pri_latency[level] * 10);
989 }
990
991 return min_t(int, wm, USHRT_MAX);
992}
993
54f1b6e1
VS
994static void vlv_compute_fifo(struct intel_crtc *crtc)
995{
996 struct drm_device *dev = crtc->base.dev;
997 struct vlv_wm_state *wm_state = &crtc->wm_state;
998 struct intel_plane *plane;
999 unsigned int total_rate = 0;
1000 const int fifo_size = 512 - 1;
1001 int fifo_extra, fifo_left = fifo_size;
1002
1003 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1004 struct intel_plane_state *state =
1005 to_intel_plane_state(plane->base.state);
1006
1007 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1008 continue;
1009
936e71e3 1010 if (state->base.visible) {
54f1b6e1
VS
1011 wm_state->num_active_planes++;
1012 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1013 }
1014 }
1015
1016 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1017 struct intel_plane_state *state =
1018 to_intel_plane_state(plane->base.state);
1019 unsigned int rate;
1020
1021 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1022 plane->wm.fifo_size = 63;
1023 continue;
1024 }
1025
936e71e3 1026 if (!state->base.visible) {
54f1b6e1
VS
1027 plane->wm.fifo_size = 0;
1028 continue;
1029 }
1030
1031 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1032 plane->wm.fifo_size = fifo_size * rate / total_rate;
1033 fifo_left -= plane->wm.fifo_size;
1034 }
1035
1036 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1037
1038 /* spread the remainder evenly */
1039 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1040 int plane_extra;
1041
1042 if (fifo_left == 0)
1043 break;
1044
1045 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1046 continue;
1047
1048 /* give it all to the first plane if none are active */
1049 if (plane->wm.fifo_size == 0 &&
1050 wm_state->num_active_planes)
1051 continue;
1052
1053 plane_extra = min(fifo_extra, fifo_left);
1054 plane->wm.fifo_size += plane_extra;
1055 fifo_left -= plane_extra;
1056 }
1057
1058 WARN_ON(fifo_left != 0);
1059}
1060
262cd2e1
VS
1061static void vlv_invert_wms(struct intel_crtc *crtc)
1062{
1063 struct vlv_wm_state *wm_state = &crtc->wm_state;
1064 int level;
1065
1066 for (level = 0; level < wm_state->num_levels; level++) {
1067 struct drm_device *dev = crtc->base.dev;
1068 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1069 struct intel_plane *plane;
1070
1071 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1072 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1073
1074 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1075 switch (plane->base.type) {
1076 int sprite;
1077 case DRM_PLANE_TYPE_CURSOR:
1078 wm_state->wm[level].cursor = plane->wm.fifo_size -
1079 wm_state->wm[level].cursor;
1080 break;
1081 case DRM_PLANE_TYPE_PRIMARY:
1082 wm_state->wm[level].primary = plane->wm.fifo_size -
1083 wm_state->wm[level].primary;
1084 break;
1085 case DRM_PLANE_TYPE_OVERLAY:
1086 sprite = plane->plane;
1087 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1088 wm_state->wm[level].sprite[sprite];
1089 break;
1090 }
1091 }
1092 }
1093}
1094
26e1fe4f 1095static void vlv_compute_wm(struct intel_crtc *crtc)
262cd2e1
VS
1096{
1097 struct drm_device *dev = crtc->base.dev;
1098 struct vlv_wm_state *wm_state = &crtc->wm_state;
1099 struct intel_plane *plane;
1100 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1101 int level;
1102
1103 memset(wm_state, 0, sizeof(*wm_state));
1104
852eb00d 1105 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
58590c14 1106 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
262cd2e1
VS
1107
1108 wm_state->num_active_planes = 0;
262cd2e1 1109
54f1b6e1 1110 vlv_compute_fifo(crtc);
262cd2e1
VS
1111
1112 if (wm_state->num_active_planes != 1)
1113 wm_state->cxsr = false;
1114
1115 if (wm_state->cxsr) {
1116 for (level = 0; level < wm_state->num_levels; level++) {
1117 wm_state->sr[level].plane = sr_fifo_size;
1118 wm_state->sr[level].cursor = 63;
1119 }
1120 }
1121
1122 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1123 struct intel_plane_state *state =
1124 to_intel_plane_state(plane->base.state);
1125
936e71e3 1126 if (!state->base.visible)
262cd2e1
VS
1127 continue;
1128
1129 /* normal watermarks */
1130 for (level = 0; level < wm_state->num_levels; level++) {
1131 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1132 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1133
1134 /* hack */
1135 if (WARN_ON(level == 0 && wm > max_wm))
1136 wm = max_wm;
1137
1138 if (wm > plane->wm.fifo_size)
1139 break;
1140
1141 switch (plane->base.type) {
1142 int sprite;
1143 case DRM_PLANE_TYPE_CURSOR:
1144 wm_state->wm[level].cursor = wm;
1145 break;
1146 case DRM_PLANE_TYPE_PRIMARY:
1147 wm_state->wm[level].primary = wm;
1148 break;
1149 case DRM_PLANE_TYPE_OVERLAY:
1150 sprite = plane->plane;
1151 wm_state->wm[level].sprite[sprite] = wm;
1152 break;
1153 }
1154 }
1155
1156 wm_state->num_levels = level;
1157
1158 if (!wm_state->cxsr)
1159 continue;
1160
1161 /* maxfifo watermarks */
1162 switch (plane->base.type) {
1163 int sprite, level;
1164 case DRM_PLANE_TYPE_CURSOR:
1165 for (level = 0; level < wm_state->num_levels; level++)
1166 wm_state->sr[level].cursor =
5a37ed0a 1167 wm_state->wm[level].cursor;
262cd2e1
VS
1168 break;
1169 case DRM_PLANE_TYPE_PRIMARY:
1170 for (level = 0; level < wm_state->num_levels; level++)
1171 wm_state->sr[level].plane =
1172 min(wm_state->sr[level].plane,
1173 wm_state->wm[level].primary);
1174 break;
1175 case DRM_PLANE_TYPE_OVERLAY:
1176 sprite = plane->plane;
1177 for (level = 0; level < wm_state->num_levels; level++)
1178 wm_state->sr[level].plane =
1179 min(wm_state->sr[level].plane,
1180 wm_state->wm[level].sprite[sprite]);
1181 break;
1182 }
1183 }
1184
1185 /* clear any (partially) filled invalid levels */
58590c14 1186 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
262cd2e1
VS
1187 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1188 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1189 }
1190
1191 vlv_invert_wms(crtc);
1192}
1193
54f1b6e1
VS
1194#define VLV_FIFO(plane, value) \
1195 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1196
1197static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1198{
1199 struct drm_device *dev = crtc->base.dev;
1200 struct drm_i915_private *dev_priv = to_i915(dev);
1201 struct intel_plane *plane;
1202 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1203
1204 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1205 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1206 WARN_ON(plane->wm.fifo_size != 63);
1207 continue;
1208 }
1209
1210 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1211 sprite0_start = plane->wm.fifo_size;
1212 else if (plane->plane == 0)
1213 sprite1_start = sprite0_start + plane->wm.fifo_size;
1214 else
1215 fifo_size = sprite1_start + plane->wm.fifo_size;
1216 }
1217
1218 WARN_ON(fifo_size != 512 - 1);
1219
1220 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1221 pipe_name(crtc->pipe), sprite0_start,
1222 sprite1_start, fifo_size);
1223
1224 switch (crtc->pipe) {
1225 uint32_t dsparb, dsparb2, dsparb3;
1226 case PIPE_A:
1227 dsparb = I915_READ(DSPARB);
1228 dsparb2 = I915_READ(DSPARB2);
1229
1230 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1231 VLV_FIFO(SPRITEB, 0xff));
1232 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1233 VLV_FIFO(SPRITEB, sprite1_start));
1234
1235 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1236 VLV_FIFO(SPRITEB_HI, 0x1));
1237 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1238 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1239
1240 I915_WRITE(DSPARB, dsparb);
1241 I915_WRITE(DSPARB2, dsparb2);
1242 break;
1243 case PIPE_B:
1244 dsparb = I915_READ(DSPARB);
1245 dsparb2 = I915_READ(DSPARB2);
1246
1247 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1248 VLV_FIFO(SPRITED, 0xff));
1249 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1250 VLV_FIFO(SPRITED, sprite1_start));
1251
1252 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1253 VLV_FIFO(SPRITED_HI, 0xff));
1254 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1255 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1256
1257 I915_WRITE(DSPARB, dsparb);
1258 I915_WRITE(DSPARB2, dsparb2);
1259 break;
1260 case PIPE_C:
1261 dsparb3 = I915_READ(DSPARB3);
1262 dsparb2 = I915_READ(DSPARB2);
1263
1264 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1265 VLV_FIFO(SPRITEF, 0xff));
1266 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1267 VLV_FIFO(SPRITEF, sprite1_start));
1268
1269 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1270 VLV_FIFO(SPRITEF_HI, 0xff));
1271 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1272 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1273
1274 I915_WRITE(DSPARB3, dsparb3);
1275 I915_WRITE(DSPARB2, dsparb2);
1276 break;
1277 default:
1278 break;
1279 }
1280}
1281
1282#undef VLV_FIFO
1283
262cd2e1
VS
1284static void vlv_merge_wm(struct drm_device *dev,
1285 struct vlv_wm_values *wm)
1286{
1287 struct intel_crtc *crtc;
1288 int num_active_crtcs = 0;
1289
58590c14 1290 wm->level = to_i915(dev)->wm.max_level;
262cd2e1
VS
1291 wm->cxsr = true;
1292
1293 for_each_intel_crtc(dev, crtc) {
1294 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1295
1296 if (!crtc->active)
1297 continue;
1298
1299 if (!wm_state->cxsr)
1300 wm->cxsr = false;
1301
1302 num_active_crtcs++;
1303 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1304 }
1305
1306 if (num_active_crtcs != 1)
1307 wm->cxsr = false;
1308
6f9c784b
VS
1309 if (num_active_crtcs > 1)
1310 wm->level = VLV_WM_LEVEL_PM2;
1311
262cd2e1
VS
1312 for_each_intel_crtc(dev, crtc) {
1313 struct vlv_wm_state *wm_state = &crtc->wm_state;
1314 enum pipe pipe = crtc->pipe;
1315
1316 if (!crtc->active)
1317 continue;
1318
1319 wm->pipe[pipe] = wm_state->wm[wm->level];
1320 if (wm->cxsr)
1321 wm->sr = wm_state->sr[wm->level];
1322
1323 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1324 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1325 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1326 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1327 }
1328}
1329
1330static void vlv_update_wm(struct drm_crtc *crtc)
1331{
1332 struct drm_device *dev = crtc->dev;
fac5e23e 1333 struct drm_i915_private *dev_priv = to_i915(dev);
262cd2e1
VS
1334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1335 enum pipe pipe = intel_crtc->pipe;
1336 struct vlv_wm_values wm = {};
1337
26e1fe4f 1338 vlv_compute_wm(intel_crtc);
262cd2e1
VS
1339 vlv_merge_wm(dev, &wm);
1340
54f1b6e1
VS
1341 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1342 /* FIXME should be part of crtc atomic commit */
1343 vlv_pipe_set_fifo_size(intel_crtc);
262cd2e1 1344 return;
54f1b6e1 1345 }
262cd2e1
VS
1346
1347 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1348 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1349 chv_set_memory_dvfs(dev_priv, false);
1350
1351 if (wm.level < VLV_WM_LEVEL_PM5 &&
1352 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1353 chv_set_memory_pm5(dev_priv, false);
1354
852eb00d 1355 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
262cd2e1 1356 intel_set_memory_cxsr(dev_priv, false);
262cd2e1 1357
54f1b6e1
VS
1358 /* FIXME should be part of crtc atomic commit */
1359 vlv_pipe_set_fifo_size(intel_crtc);
1360
262cd2e1
VS
1361 vlv_write_wm_values(intel_crtc, &wm);
1362
1363 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1364 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1365 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1366 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1367 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1368
852eb00d 1369 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
262cd2e1 1370 intel_set_memory_cxsr(dev_priv, true);
262cd2e1
VS
1371
1372 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1373 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1374 chv_set_memory_pm5(dev_priv, true);
1375
1376 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1377 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1378 chv_set_memory_dvfs(dev_priv, true);
1379
1380 dev_priv->wm.vlv = wm;
3c2777fd
VS
1381}
1382
ae80152d
VS
1383#define single_plane_enabled(mask) is_power_of_2(mask)
1384
46ba614c 1385static void g4x_update_wm(struct drm_crtc *crtc)
b445e3b0 1386{
46ba614c 1387 struct drm_device *dev = crtc->dev;
b445e3b0 1388 static const int sr_latency_ns = 12000;
fac5e23e 1389 struct drm_i915_private *dev_priv = to_i915(dev);
b445e3b0
ED
1390 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1391 int plane_sr, cursor_sr;
1392 unsigned int enabled = 0;
9858425c 1393 bool cxsr_enabled;
b445e3b0 1394
51cea1f4 1395 if (g4x_compute_wm0(dev, PIPE_A,
5aef6003
CW
1396 &g4x_wm_info, pessimal_latency_ns,
1397 &g4x_cursor_wm_info, pessimal_latency_ns,
b445e3b0 1398 &planea_wm, &cursora_wm))
51cea1f4 1399 enabled |= 1 << PIPE_A;
b445e3b0 1400
51cea1f4 1401 if (g4x_compute_wm0(dev, PIPE_B,
5aef6003
CW
1402 &g4x_wm_info, pessimal_latency_ns,
1403 &g4x_cursor_wm_info, pessimal_latency_ns,
b445e3b0 1404 &planeb_wm, &cursorb_wm))
51cea1f4 1405 enabled |= 1 << PIPE_B;
b445e3b0 1406
b445e3b0
ED
1407 if (single_plane_enabled(enabled) &&
1408 g4x_compute_srwm(dev, ffs(enabled) - 1,
1409 sr_latency_ns,
1410 &g4x_wm_info,
1411 &g4x_cursor_wm_info,
52bd02d8 1412 &plane_sr, &cursor_sr)) {
9858425c 1413 cxsr_enabled = true;
52bd02d8 1414 } else {
9858425c 1415 cxsr_enabled = false;
5209b1f4 1416 intel_set_memory_cxsr(dev_priv, false);
52bd02d8
CW
1417 plane_sr = cursor_sr = 0;
1418 }
b445e3b0 1419
a5043453
VS
1420 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1421 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
b445e3b0
ED
1422 planea_wm, cursora_wm,
1423 planeb_wm, cursorb_wm,
1424 plane_sr, cursor_sr);
1425
1426 I915_WRITE(DSPFW1,
f4998963
VS
1427 FW_WM(plane_sr, SR) |
1428 FW_WM(cursorb_wm, CURSORB) |
1429 FW_WM(planeb_wm, PLANEB) |
1430 FW_WM(planea_wm, PLANEA));
b445e3b0 1431 I915_WRITE(DSPFW2,
8c919b28 1432 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
f4998963 1433 FW_WM(cursora_wm, CURSORA));
b445e3b0
ED
1434 /* HPLL off in SR has some issues on G4x... disable it */
1435 I915_WRITE(DSPFW3,
8c919b28 1436 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
f4998963 1437 FW_WM(cursor_sr, CURSOR_SR));
9858425c
ID
1438
1439 if (cxsr_enabled)
1440 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1441}
1442
46ba614c 1443static void i965_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1444{
46ba614c 1445 struct drm_device *dev = unused_crtc->dev;
fac5e23e 1446 struct drm_i915_private *dev_priv = to_i915(dev);
b445e3b0
ED
1447 struct drm_crtc *crtc;
1448 int srwm = 1;
1449 int cursor_sr = 16;
9858425c 1450 bool cxsr_enabled;
b445e3b0
ED
1451
1452 /* Calc sr entries for one plane configs */
1453 crtc = single_enabled_crtc(dev);
1454 if (crtc) {
1455 /* self-refresh has much higher latency */
1456 static const int sr_latency_ns = 12000;
124abe07 1457 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1458 int clock = adjusted_mode->crtc_clock;
fec8cba3 1459 int htotal = adjusted_mode->crtc_htotal;
6e3c9717 1460 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
ac484963 1461 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b445e3b0
ED
1462 unsigned long line_time_us;
1463 int entries;
1464
922044c9 1465 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0
ED
1466
1467 /* Use ns/us then divide to preserve precision */
1468 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
ac484963 1469 cpp * hdisplay;
b445e3b0
ED
1470 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1471 srwm = I965_FIFO_SIZE - entries;
1472 if (srwm < 0)
1473 srwm = 1;
1474 srwm &= 0x1ff;
1475 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1476 entries, srwm);
1477
1478 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
ac484963 1479 cpp * crtc->cursor->state->crtc_w;
b445e3b0
ED
1480 entries = DIV_ROUND_UP(entries,
1481 i965_cursor_wm_info.cacheline_size);
1482 cursor_sr = i965_cursor_wm_info.fifo_size -
1483 (entries + i965_cursor_wm_info.guard_size);
1484
1485 if (cursor_sr > i965_cursor_wm_info.max_wm)
1486 cursor_sr = i965_cursor_wm_info.max_wm;
1487
1488 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1489 "cursor %d\n", srwm, cursor_sr);
1490
9858425c 1491 cxsr_enabled = true;
b445e3b0 1492 } else {
9858425c 1493 cxsr_enabled = false;
b445e3b0 1494 /* Turn off self refresh if both pipes are enabled */
5209b1f4 1495 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
1496 }
1497
1498 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1499 srwm);
1500
1501 /* 965 has limitations... */
f4998963
VS
1502 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1503 FW_WM(8, CURSORB) |
1504 FW_WM(8, PLANEB) |
1505 FW_WM(8, PLANEA));
1506 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1507 FW_WM(8, PLANEC_OLD));
b445e3b0 1508 /* update cursor SR watermark */
f4998963 1509 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
9858425c
ID
1510
1511 if (cxsr_enabled)
1512 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1513}
1514
f4998963
VS
1515#undef FW_WM
1516
46ba614c 1517static void i9xx_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1518{
46ba614c 1519 struct drm_device *dev = unused_crtc->dev;
fac5e23e 1520 struct drm_i915_private *dev_priv = to_i915(dev);
b445e3b0
ED
1521 const struct intel_watermark_params *wm_info;
1522 uint32_t fwater_lo;
1523 uint32_t fwater_hi;
1524 int cwm, srwm = 1;
1525 int fifo_size;
1526 int planea_wm, planeb_wm;
1527 struct drm_crtc *crtc, *enabled = NULL;
1528
1529 if (IS_I945GM(dev))
1530 wm_info = &i945_wm_info;
1531 else if (!IS_GEN2(dev))
1532 wm_info = &i915_wm_info;
1533 else
9d539105 1534 wm_info = &i830_a_wm_info;
b445e3b0
ED
1535
1536 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1537 crtc = intel_get_crtc_for_plane(dev, 0);
3490ea5d 1538 if (intel_crtc_active(crtc)) {
241bfc38 1539 const struct drm_display_mode *adjusted_mode;
ac484963 1540 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b9e0bda3
CW
1541 if (IS_GEN2(dev))
1542 cpp = 4;
1543
6e3c9717 1544 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1545 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
b9e0bda3 1546 wm_info, fifo_size, cpp,
5aef6003 1547 pessimal_latency_ns);
b445e3b0 1548 enabled = crtc;
9d539105 1549 } else {
b445e3b0 1550 planea_wm = fifo_size - wm_info->guard_size;
9d539105
VS
1551 if (planea_wm > (long)wm_info->max_wm)
1552 planea_wm = wm_info->max_wm;
1553 }
1554
1555 if (IS_GEN2(dev))
1556 wm_info = &i830_bc_wm_info;
b445e3b0
ED
1557
1558 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1559 crtc = intel_get_crtc_for_plane(dev, 1);
3490ea5d 1560 if (intel_crtc_active(crtc)) {
241bfc38 1561 const struct drm_display_mode *adjusted_mode;
ac484963 1562 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
b9e0bda3
CW
1563 if (IS_GEN2(dev))
1564 cpp = 4;
1565
6e3c9717 1566 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1567 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
b9e0bda3 1568 wm_info, fifo_size, cpp,
5aef6003 1569 pessimal_latency_ns);
b445e3b0
ED
1570 if (enabled == NULL)
1571 enabled = crtc;
1572 else
1573 enabled = NULL;
9d539105 1574 } else {
b445e3b0 1575 planeb_wm = fifo_size - wm_info->guard_size;
9d539105
VS
1576 if (planeb_wm > (long)wm_info->max_wm)
1577 planeb_wm = wm_info->max_wm;
1578 }
b445e3b0
ED
1579
1580 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1581
2ab1bc9d 1582 if (IS_I915GM(dev) && enabled) {
2ff8fde1 1583 struct drm_i915_gem_object *obj;
2ab1bc9d 1584
59bea882 1585 obj = intel_fb_obj(enabled->primary->state->fb);
2ab1bc9d
DV
1586
1587 /* self-refresh seems busted with untiled */
3e510a8e 1588 if (!i915_gem_object_is_tiled(obj))
2ab1bc9d
DV
1589 enabled = NULL;
1590 }
1591
b445e3b0
ED
1592 /*
1593 * Overlay gets an aggressive default since video jitter is bad.
1594 */
1595 cwm = 2;
1596
1597 /* Play safe and disable self-refresh before adjusting watermarks. */
5209b1f4 1598 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
1599
1600 /* Calc sr entries for one plane configs */
1601 if (HAS_FW_BLC(dev) && enabled) {
1602 /* self-refresh has much higher latency */
1603 static const int sr_latency_ns = 6000;
124abe07 1604 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
241bfc38 1605 int clock = adjusted_mode->crtc_clock;
fec8cba3 1606 int htotal = adjusted_mode->crtc_htotal;
6e3c9717 1607 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
ac484963 1608 int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
b445e3b0
ED
1609 unsigned long line_time_us;
1610 int entries;
1611
2d1b5056
VS
1612 if (IS_I915GM(dev) || IS_I945GM(dev))
1613 cpp = 4;
1614
922044c9 1615 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0
ED
1616
1617 /* Use ns/us then divide to preserve precision */
1618 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
ac484963 1619 cpp * hdisplay;
b445e3b0
ED
1620 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1621 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1622 srwm = wm_info->fifo_size - entries;
1623 if (srwm < 0)
1624 srwm = 1;
1625
1626 if (IS_I945G(dev) || IS_I945GM(dev))
1627 I915_WRITE(FW_BLC_SELF,
1628 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
acb91359 1629 else
b445e3b0
ED
1630 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1631 }
1632
1633 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1634 planea_wm, planeb_wm, cwm, srwm);
1635
1636 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1637 fwater_hi = (cwm & 0x1f);
1638
1639 /* Set request length to 8 cachelines per fetch */
1640 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1641 fwater_hi = fwater_hi | (1 << 8);
1642
1643 I915_WRITE(FW_BLC, fwater_lo);
1644 I915_WRITE(FW_BLC2, fwater_hi);
1645
5209b1f4
ID
1646 if (enabled)
1647 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1648}
1649
feb56b93 1650static void i845_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1651{
46ba614c 1652 struct drm_device *dev = unused_crtc->dev;
fac5e23e 1653 struct drm_i915_private *dev_priv = to_i915(dev);
b445e3b0 1654 struct drm_crtc *crtc;
241bfc38 1655 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
1656 uint32_t fwater_lo;
1657 int planea_wm;
1658
1659 crtc = single_enabled_crtc(dev);
1660 if (crtc == NULL)
1661 return;
1662
6e3c9717 1663 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1664 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
feb56b93 1665 &i845_wm_info,
b445e3b0 1666 dev_priv->display.get_fifo_size(dev, 0),
5aef6003 1667 4, pessimal_latency_ns);
b445e3b0
ED
1668 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1669 fwater_lo |= (3<<8) | planea_wm;
1670
1671 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1672
1673 I915_WRITE(FW_BLC, fwater_lo);
1674}
1675
8cfb3407 1676uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
801bcfff 1677{
fd4daa9c 1678 uint32_t pixel_rate;
801bcfff 1679
8cfb3407 1680 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
801bcfff
PZ
1681
1682 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1683 * adjust the pixel_rate here. */
1684
8cfb3407 1685 if (pipe_config->pch_pfit.enabled) {
801bcfff 1686 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
8cfb3407
VS
1687 uint32_t pfit_size = pipe_config->pch_pfit.size;
1688
1689 pipe_w = pipe_config->pipe_src_w;
1690 pipe_h = pipe_config->pipe_src_h;
801bcfff 1691
801bcfff
PZ
1692 pfit_w = (pfit_size >> 16) & 0xFFFF;
1693 pfit_h = pfit_size & 0xFFFF;
1694 if (pipe_w < pfit_w)
1695 pipe_w = pfit_w;
1696 if (pipe_h < pfit_h)
1697 pipe_h = pfit_h;
1698
15126882
MR
1699 if (WARN_ON(!pfit_w || !pfit_h))
1700 return pixel_rate;
1701
801bcfff
PZ
1702 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1703 pfit_w * pfit_h);
1704 }
1705
1706 return pixel_rate;
1707}
1708
37126462 1709/* latency must be in 0.1us units. */
ac484963 1710static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
801bcfff
PZ
1711{
1712 uint64_t ret;
1713
3312ba65
VS
1714 if (WARN(latency == 0, "Latency value missing\n"))
1715 return UINT_MAX;
1716
ac484963 1717 ret = (uint64_t) pixel_rate * cpp * latency;
801bcfff
PZ
1718 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1719
1720 return ret;
1721}
1722
37126462 1723/* latency must be in 0.1us units. */
23297044 1724static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
ac484963 1725 uint32_t horiz_pixels, uint8_t cpp,
801bcfff
PZ
1726 uint32_t latency)
1727{
1728 uint32_t ret;
1729
3312ba65
VS
1730 if (WARN(latency == 0, "Latency value missing\n"))
1731 return UINT_MAX;
15126882
MR
1732 if (WARN_ON(!pipe_htotal))
1733 return UINT_MAX;
3312ba65 1734
801bcfff 1735 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ac484963 1736 ret = (ret + 1) * horiz_pixels * cpp;
801bcfff
PZ
1737 ret = DIV_ROUND_UP(ret, 64) + 2;
1738 return ret;
1739}
1740
23297044 1741static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
ac484963 1742 uint8_t cpp)
cca32e9a 1743{
15126882
MR
1744 /*
1745 * Neither of these should be possible since this function shouldn't be
1746 * called if the CRTC is off or the plane is invisible. But let's be
1747 * extra paranoid to avoid a potential divide-by-zero if we screw up
1748 * elsewhere in the driver.
1749 */
ac484963 1750 if (WARN_ON(!cpp))
15126882
MR
1751 return 0;
1752 if (WARN_ON(!horiz_pixels))
1753 return 0;
1754
ac484963 1755 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
cca32e9a
PZ
1756}
1757
820c1980 1758struct ilk_wm_maximums {
cca32e9a
PZ
1759 uint16_t pri;
1760 uint16_t spr;
1761 uint16_t cur;
1762 uint16_t fbc;
1763};
1764
37126462
VS
1765/*
1766 * For both WM_PIPE and WM_LP.
1767 * mem_value must be in 0.1us units.
1768 */
7221fc33 1769static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
43d59eda 1770 const struct intel_plane_state *pstate,
cca32e9a
PZ
1771 uint32_t mem_value,
1772 bool is_lp)
801bcfff 1773{
ac484963
VS
1774 int cpp = pstate->base.fb ?
1775 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
cca32e9a
PZ
1776 uint32_t method1, method2;
1777
936e71e3 1778 if (!cstate->base.active || !pstate->base.visible)
801bcfff
PZ
1779 return 0;
1780
ac484963 1781 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
cca32e9a
PZ
1782
1783 if (!is_lp)
1784 return method1;
1785
7221fc33
MR
1786 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1787 cstate->base.adjusted_mode.crtc_htotal,
936e71e3 1788 drm_rect_width(&pstate->base.dst),
ac484963 1789 cpp, mem_value);
cca32e9a
PZ
1790
1791 return min(method1, method2);
801bcfff
PZ
1792}
1793
37126462
VS
1794/*
1795 * For both WM_PIPE and WM_LP.
1796 * mem_value must be in 0.1us units.
1797 */
7221fc33 1798static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
43d59eda 1799 const struct intel_plane_state *pstate,
801bcfff
PZ
1800 uint32_t mem_value)
1801{
ac484963
VS
1802 int cpp = pstate->base.fb ?
1803 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
801bcfff
PZ
1804 uint32_t method1, method2;
1805
936e71e3 1806 if (!cstate->base.active || !pstate->base.visible)
801bcfff
PZ
1807 return 0;
1808
ac484963 1809 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
7221fc33
MR
1810 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1811 cstate->base.adjusted_mode.crtc_htotal,
936e71e3 1812 drm_rect_width(&pstate->base.dst),
ac484963 1813 cpp, mem_value);
801bcfff
PZ
1814 return min(method1, method2);
1815}
1816
37126462
VS
1817/*
1818 * For both WM_PIPE and WM_LP.
1819 * mem_value must be in 0.1us units.
1820 */
7221fc33 1821static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
43d59eda 1822 const struct intel_plane_state *pstate,
801bcfff
PZ
1823 uint32_t mem_value)
1824{
b2435692
MR
1825 /*
1826 * We treat the cursor plane as always-on for the purposes of watermark
1827 * calculation. Until we have two-stage watermark programming merged,
1828 * this is necessary to avoid flickering.
1829 */
1830 int cpp = 4;
936e71e3 1831 int width = pstate->base.visible ? pstate->base.crtc_w : 64;
43d59eda 1832
b2435692 1833 if (!cstate->base.active)
801bcfff
PZ
1834 return 0;
1835
7221fc33
MR
1836 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1837 cstate->base.adjusted_mode.crtc_htotal,
b2435692 1838 width, cpp, mem_value);
801bcfff
PZ
1839}
1840
cca32e9a 1841/* Only for WM_LP. */
7221fc33 1842static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
43d59eda 1843 const struct intel_plane_state *pstate,
1fda9882 1844 uint32_t pri_val)
cca32e9a 1845{
ac484963
VS
1846 int cpp = pstate->base.fb ?
1847 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
43d59eda 1848
936e71e3 1849 if (!cstate->base.active || !pstate->base.visible)
cca32e9a
PZ
1850 return 0;
1851
936e71e3 1852 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
cca32e9a
PZ
1853}
1854
158ae64f
VS
1855static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1856{
416f4727
VS
1857 if (INTEL_INFO(dev)->gen >= 8)
1858 return 3072;
1859 else if (INTEL_INFO(dev)->gen >= 7)
158ae64f
VS
1860 return 768;
1861 else
1862 return 512;
1863}
1864
4e975081
VS
1865static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1866 int level, bool is_sprite)
1867{
1868 if (INTEL_INFO(dev)->gen >= 8)
1869 /* BDW primary/sprite plane watermarks */
1870 return level == 0 ? 255 : 2047;
1871 else if (INTEL_INFO(dev)->gen >= 7)
1872 /* IVB/HSW primary/sprite plane watermarks */
1873 return level == 0 ? 127 : 1023;
1874 else if (!is_sprite)
1875 /* ILK/SNB primary plane watermarks */
1876 return level == 0 ? 127 : 511;
1877 else
1878 /* ILK/SNB sprite plane watermarks */
1879 return level == 0 ? 63 : 255;
1880}
1881
1882static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1883 int level)
1884{
1885 if (INTEL_INFO(dev)->gen >= 7)
1886 return level == 0 ? 63 : 255;
1887 else
1888 return level == 0 ? 31 : 63;
1889}
1890
1891static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1892{
1893 if (INTEL_INFO(dev)->gen >= 8)
1894 return 31;
1895 else
1896 return 15;
1897}
1898
158ae64f
VS
1899/* Calculate the maximum primary/sprite plane watermark */
1900static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1901 int level,
240264f4 1902 const struct intel_wm_config *config,
158ae64f
VS
1903 enum intel_ddb_partitioning ddb_partitioning,
1904 bool is_sprite)
1905{
1906 unsigned int fifo_size = ilk_display_fifo_size(dev);
158ae64f
VS
1907
1908 /* if sprites aren't enabled, sprites get nothing */
240264f4 1909 if (is_sprite && !config->sprites_enabled)
158ae64f
VS
1910 return 0;
1911
1912 /* HSW allows LP1+ watermarks even with multiple pipes */
240264f4 1913 if (level == 0 || config->num_pipes_active > 1) {
158ae64f
VS
1914 fifo_size /= INTEL_INFO(dev)->num_pipes;
1915
1916 /*
1917 * For some reason the non self refresh
1918 * FIFO size is only half of the self
1919 * refresh FIFO size on ILK/SNB.
1920 */
1921 if (INTEL_INFO(dev)->gen <= 6)
1922 fifo_size /= 2;
1923 }
1924
240264f4 1925 if (config->sprites_enabled) {
158ae64f
VS
1926 /* level 0 is always calculated with 1:1 split */
1927 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1928 if (is_sprite)
1929 fifo_size *= 5;
1930 fifo_size /= 6;
1931 } else {
1932 fifo_size /= 2;
1933 }
1934 }
1935
1936 /* clamp to max that the registers can hold */
4e975081 1937 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
158ae64f
VS
1938}
1939
1940/* Calculate the maximum cursor plane watermark */
1941static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
240264f4
VS
1942 int level,
1943 const struct intel_wm_config *config)
158ae64f
VS
1944{
1945 /* HSW LP1+ watermarks w/ multiple pipes */
240264f4 1946 if (level > 0 && config->num_pipes_active > 1)
158ae64f
VS
1947 return 64;
1948
1949 /* otherwise just report max that registers can hold */
4e975081 1950 return ilk_cursor_wm_reg_max(dev, level);
158ae64f
VS
1951}
1952
d34ff9c6 1953static void ilk_compute_wm_maximums(const struct drm_device *dev,
34982fe1
VS
1954 int level,
1955 const struct intel_wm_config *config,
1956 enum intel_ddb_partitioning ddb_partitioning,
820c1980 1957 struct ilk_wm_maximums *max)
158ae64f 1958{
240264f4
VS
1959 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1960 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1961 max->cur = ilk_cursor_wm_max(dev, level, config);
4e975081 1962 max->fbc = ilk_fbc_wm_reg_max(dev);
158ae64f
VS
1963}
1964
a3cb4048
VS
1965static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1966 int level,
1967 struct ilk_wm_maximums *max)
1968{
1969 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1970 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1971 max->cur = ilk_cursor_wm_reg_max(dev, level);
1972 max->fbc = ilk_fbc_wm_reg_max(dev);
1973}
1974
d9395655 1975static bool ilk_validate_wm_level(int level,
820c1980 1976 const struct ilk_wm_maximums *max,
d9395655 1977 struct intel_wm_level *result)
a9786a11
VS
1978{
1979 bool ret;
1980
1981 /* already determined to be invalid? */
1982 if (!result->enable)
1983 return false;
1984
1985 result->enable = result->pri_val <= max->pri &&
1986 result->spr_val <= max->spr &&
1987 result->cur_val <= max->cur;
1988
1989 ret = result->enable;
1990
1991 /*
1992 * HACK until we can pre-compute everything,
1993 * and thus fail gracefully if LP0 watermarks
1994 * are exceeded...
1995 */
1996 if (level == 0 && !result->enable) {
1997 if (result->pri_val > max->pri)
1998 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1999 level, result->pri_val, max->pri);
2000 if (result->spr_val > max->spr)
2001 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2002 level, result->spr_val, max->spr);
2003 if (result->cur_val > max->cur)
2004 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2005 level, result->cur_val, max->cur);
2006
2007 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2008 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2009 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2010 result->enable = true;
2011 }
2012
a9786a11
VS
2013 return ret;
2014}
2015
d34ff9c6 2016static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
43d59eda 2017 const struct intel_crtc *intel_crtc,
6f5ddd17 2018 int level,
7221fc33 2019 struct intel_crtc_state *cstate,
86c8bbbe
MR
2020 struct intel_plane_state *pristate,
2021 struct intel_plane_state *sprstate,
2022 struct intel_plane_state *curstate,
1fd527cc 2023 struct intel_wm_level *result)
6f5ddd17
VS
2024{
2025 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2026 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2027 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2028
2029 /* WM1+ latency values stored in 0.5us units */
2030 if (level > 0) {
2031 pri_latency *= 5;
2032 spr_latency *= 5;
2033 cur_latency *= 5;
2034 }
2035
e3bddded
ML
2036 if (pristate) {
2037 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2038 pri_latency, level);
2039 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2040 }
2041
2042 if (sprstate)
2043 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2044
2045 if (curstate)
2046 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2047
6f5ddd17
VS
2048 result->enable = true;
2049}
2050
801bcfff 2051static uint32_t
532f7a7f 2052hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
1f8eeabf 2053{
532f7a7f
VS
2054 const struct intel_atomic_state *intel_state =
2055 to_intel_atomic_state(cstate->base.state);
ee91a159
MR
2056 const struct drm_display_mode *adjusted_mode =
2057 &cstate->base.adjusted_mode;
85a02deb 2058 u32 linetime, ips_linetime;
1f8eeabf 2059
ee91a159
MR
2060 if (!cstate->base.active)
2061 return 0;
2062 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2063 return 0;
532f7a7f 2064 if (WARN_ON(intel_state->cdclk == 0))
801bcfff 2065 return 0;
1011d8c4 2066
1f8eeabf
ED
2067 /* The WM are computed with base on how long it takes to fill a single
2068 * row at the given clock rate, multiplied by 8.
2069 * */
124abe07
VS
2070 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2071 adjusted_mode->crtc_clock);
2072 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
532f7a7f 2073 intel_state->cdclk);
1f8eeabf 2074
801bcfff
PZ
2075 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2076 PIPE_WM_LINETIME_TIME(linetime);
1f8eeabf
ED
2077}
2078
2af30a5c 2079static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
12b134df 2080{
fac5e23e 2081 struct drm_i915_private *dev_priv = to_i915(dev);
12b134df 2082
2af30a5c
PB
2083 if (IS_GEN9(dev)) {
2084 uint32_t val;
4f947386 2085 int ret, i;
367294be 2086 int level, max_level = ilk_wm_max_level(dev);
2af30a5c
PB
2087
2088 /* read the first set of memory latencies[0:3] */
2089 val = 0; /* data0 to be programmed to 0 for first set */
2090 mutex_lock(&dev_priv->rps.hw_lock);
2091 ret = sandybridge_pcode_read(dev_priv,
2092 GEN9_PCODE_READ_MEM_LATENCY,
2093 &val);
2094 mutex_unlock(&dev_priv->rps.hw_lock);
2095
2096 if (ret) {
2097 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2098 return;
2099 }
2100
2101 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2102 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2103 GEN9_MEM_LATENCY_LEVEL_MASK;
2104 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2105 GEN9_MEM_LATENCY_LEVEL_MASK;
2106 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2107 GEN9_MEM_LATENCY_LEVEL_MASK;
2108
2109 /* read the second set of memory latencies[4:7] */
2110 val = 1; /* data0 to be programmed to 1 for second set */
2111 mutex_lock(&dev_priv->rps.hw_lock);
2112 ret = sandybridge_pcode_read(dev_priv,
2113 GEN9_PCODE_READ_MEM_LATENCY,
2114 &val);
2115 mutex_unlock(&dev_priv->rps.hw_lock);
2116 if (ret) {
2117 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2118 return;
2119 }
2120
2121 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2122 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2123 GEN9_MEM_LATENCY_LEVEL_MASK;
2124 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2125 GEN9_MEM_LATENCY_LEVEL_MASK;
2126 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2127 GEN9_MEM_LATENCY_LEVEL_MASK;
2128
367294be 2129 /*
6f97235b
DL
2130 * WaWmMemoryReadLatency:skl
2131 *
367294be
VK
2132 * punit doesn't take into account the read latency so we need
2133 * to add 2us to the various latency levels we retrieve from
2134 * the punit.
2135 * - W0 is a bit special in that it's the only level that
2136 * can't be disabled if we want to have display working, so
2137 * we always add 2us there.
2138 * - For levels >=1, punit returns 0us latency when they are
2139 * disabled, so we respect that and don't add 2us then
4f947386
VK
2140 *
2141 * Additionally, if a level n (n > 1) has a 0us latency, all
2142 * levels m (m >= n) need to be disabled. We make sure to
2143 * sanitize the values out of the punit to satisfy this
2144 * requirement.
367294be
VK
2145 */
2146 wm[0] += 2;
2147 for (level = 1; level <= max_level; level++)
2148 if (wm[level] != 0)
2149 wm[level] += 2;
4f947386
VK
2150 else {
2151 for (i = level + 1; i <= max_level; i++)
2152 wm[i] = 0;
367294be 2153
4f947386
VK
2154 break;
2155 }
2af30a5c 2156 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
12b134df
VS
2157 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2158
2159 wm[0] = (sskpd >> 56) & 0xFF;
2160 if (wm[0] == 0)
2161 wm[0] = sskpd & 0xF;
e5d5019e
VS
2162 wm[1] = (sskpd >> 4) & 0xFF;
2163 wm[2] = (sskpd >> 12) & 0xFF;
2164 wm[3] = (sskpd >> 20) & 0x1FF;
2165 wm[4] = (sskpd >> 32) & 0x1FF;
63cf9a13
VS
2166 } else if (INTEL_INFO(dev)->gen >= 6) {
2167 uint32_t sskpd = I915_READ(MCH_SSKPD);
2168
2169 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2170 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2171 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2172 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
3a88d0ac
VS
2173 } else if (INTEL_INFO(dev)->gen >= 5) {
2174 uint32_t mltr = I915_READ(MLTR_ILK);
2175
2176 /* ILK primary LP0 latency is 700 ns */
2177 wm[0] = 7;
2178 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2179 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
12b134df
VS
2180 }
2181}
2182
53615a5e
VS
2183static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2184{
2185 /* ILK sprite LP0 latency is 1300 ns */
7e22dbbb 2186 if (IS_GEN5(dev))
53615a5e
VS
2187 wm[0] = 13;
2188}
2189
2190static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2191{
2192 /* ILK cursor LP0 latency is 1300 ns */
7e22dbbb 2193 if (IS_GEN5(dev))
53615a5e
VS
2194 wm[0] = 13;
2195
2196 /* WaDoubleCursorLP3Latency:ivb */
2197 if (IS_IVYBRIDGE(dev))
2198 wm[3] *= 2;
2199}
2200
546c81fd 2201int ilk_wm_max_level(const struct drm_device *dev)
26ec971e 2202{
26ec971e 2203 /* how many WM levels are we expecting */
b6e742f6 2204 if (INTEL_INFO(dev)->gen >= 9)
2af30a5c
PB
2205 return 7;
2206 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ad0d6dc4 2207 return 4;
26ec971e 2208 else if (INTEL_INFO(dev)->gen >= 6)
ad0d6dc4 2209 return 3;
26ec971e 2210 else
ad0d6dc4
VS
2211 return 2;
2212}
7526ed79 2213
ad0d6dc4
VS
2214static void intel_print_wm_latency(struct drm_device *dev,
2215 const char *name,
2af30a5c 2216 const uint16_t wm[8])
ad0d6dc4
VS
2217{
2218 int level, max_level = ilk_wm_max_level(dev);
26ec971e
VS
2219
2220 for (level = 0; level <= max_level; level++) {
2221 unsigned int latency = wm[level];
2222
2223 if (latency == 0) {
2224 DRM_ERROR("%s WM%d latency not provided\n",
2225 name, level);
2226 continue;
2227 }
2228
2af30a5c
PB
2229 /*
2230 * - latencies are in us on gen9.
2231 * - before then, WM1+ latency values are in 0.5us units
2232 */
2233 if (IS_GEN9(dev))
2234 latency *= 10;
2235 else if (level > 0)
26ec971e
VS
2236 latency *= 5;
2237
2238 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2239 name, level, wm[level],
2240 latency / 10, latency % 10);
2241 }
2242}
2243
e95a2f75
VS
2244static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2245 uint16_t wm[5], uint16_t min)
2246{
91c8a326 2247 int level, max_level = ilk_wm_max_level(&dev_priv->drm);
e95a2f75
VS
2248
2249 if (wm[0] >= min)
2250 return false;
2251
2252 wm[0] = max(wm[0], min);
2253 for (level = 1; level <= max_level; level++)
2254 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2255
2256 return true;
2257}
2258
2259static void snb_wm_latency_quirk(struct drm_device *dev)
2260{
fac5e23e 2261 struct drm_i915_private *dev_priv = to_i915(dev);
e95a2f75
VS
2262 bool changed;
2263
2264 /*
2265 * The BIOS provided WM memory latency values are often
2266 * inadequate for high resolution displays. Adjust them.
2267 */
2268 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2269 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2270 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2271
2272 if (!changed)
2273 return;
2274
2275 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2276 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2277 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2278 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2279}
2280
fa50ad61 2281static void ilk_setup_wm_latency(struct drm_device *dev)
53615a5e 2282{
fac5e23e 2283 struct drm_i915_private *dev_priv = to_i915(dev);
53615a5e
VS
2284
2285 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2286
2287 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2288 sizeof(dev_priv->wm.pri_latency));
2289 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2290 sizeof(dev_priv->wm.pri_latency));
2291
2292 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2293 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
26ec971e
VS
2294
2295 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2296 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2297 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
e95a2f75
VS
2298
2299 if (IS_GEN6(dev))
2300 snb_wm_latency_quirk(dev);
53615a5e
VS
2301}
2302
2af30a5c
PB
2303static void skl_setup_wm_latency(struct drm_device *dev)
2304{
fac5e23e 2305 struct drm_i915_private *dev_priv = to_i915(dev);
2af30a5c
PB
2306
2307 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2308 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2309}
2310
ed4a6a7c
MR
2311static bool ilk_validate_pipe_wm(struct drm_device *dev,
2312 struct intel_pipe_wm *pipe_wm)
2313{
2314 /* LP0 watermark maximums depend on this pipe alone */
2315 const struct intel_wm_config config = {
2316 .num_pipes_active = 1,
2317 .sprites_enabled = pipe_wm->sprites_enabled,
2318 .sprites_scaled = pipe_wm->sprites_scaled,
2319 };
2320 struct ilk_wm_maximums max;
2321
2322 /* LP0 watermarks always use 1/2 DDB partitioning */
2323 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2324
2325 /* At least LP0 must be valid */
2326 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2327 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2328 return false;
2329 }
2330
2331 return true;
2332}
2333
0b2ae6d7 2334/* Compute new watermarks for the pipe */
e3bddded 2335static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
0b2ae6d7 2336{
e3bddded
ML
2337 struct drm_atomic_state *state = cstate->base.state;
2338 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
86c8bbbe 2339 struct intel_pipe_wm *pipe_wm;
e3bddded 2340 struct drm_device *dev = state->dev;
fac5e23e 2341 const struct drm_i915_private *dev_priv = to_i915(dev);
43d59eda 2342 struct intel_plane *intel_plane;
86c8bbbe 2343 struct intel_plane_state *pristate = NULL;
43d59eda 2344 struct intel_plane_state *sprstate = NULL;
86c8bbbe 2345 struct intel_plane_state *curstate = NULL;
d81f04c5 2346 int level, max_level = ilk_wm_max_level(dev), usable_level;
820c1980 2347 struct ilk_wm_maximums max;
0b2ae6d7 2348
e8f1f02e 2349 pipe_wm = &cstate->wm.ilk.optimal;
86c8bbbe 2350
43d59eda 2351 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
e3bddded
ML
2352 struct intel_plane_state *ps;
2353
2354 ps = intel_atomic_get_existing_plane_state(state,
2355 intel_plane);
2356 if (!ps)
2357 continue;
86c8bbbe
MR
2358
2359 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
e3bddded 2360 pristate = ps;
86c8bbbe 2361 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
e3bddded 2362 sprstate = ps;
86c8bbbe 2363 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
e3bddded 2364 curstate = ps;
43d59eda
MR
2365 }
2366
ed4a6a7c 2367 pipe_wm->pipe_enabled = cstate->base.active;
e3bddded 2368 if (sprstate) {
936e71e3
VS
2369 pipe_wm->sprites_enabled = sprstate->base.visible;
2370 pipe_wm->sprites_scaled = sprstate->base.visible &&
2371 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
2372 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
e3bddded
ML
2373 }
2374
d81f04c5
ML
2375 usable_level = max_level;
2376
7b39a0b7 2377 /* ILK/SNB: LP2+ watermarks only w/o sprites */
e3bddded 2378 if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
d81f04c5 2379 usable_level = 1;
7b39a0b7
VS
2380
2381 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
ed4a6a7c 2382 if (pipe_wm->sprites_scaled)
d81f04c5 2383 usable_level = 0;
7b39a0b7 2384
86c8bbbe 2385 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
71f0a626
ML
2386 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2387
2388 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2389 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
0b2ae6d7 2390
a42a5719 2391 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
532f7a7f 2392 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
0b2ae6d7 2393
ed4a6a7c 2394 if (!ilk_validate_pipe_wm(dev, pipe_wm))
1a426d61 2395 return -EINVAL;
a3cb4048
VS
2396
2397 ilk_compute_wm_reg_maximums(dev, 1, &max);
2398
2399 for (level = 1; level <= max_level; level++) {
71f0a626 2400 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
a3cb4048 2401
86c8bbbe 2402 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
d81f04c5 2403 pristate, sprstate, curstate, wm);
a3cb4048
VS
2404
2405 /*
2406 * Disable any watermark level that exceeds the
2407 * register maximums since such watermarks are
2408 * always invalid.
2409 */
71f0a626
ML
2410 if (level > usable_level)
2411 continue;
2412
2413 if (ilk_validate_wm_level(level, &max, wm))
2414 pipe_wm->wm[level] = *wm;
2415 else
d81f04c5 2416 usable_level = level;
a3cb4048
VS
2417 }
2418
86c8bbbe 2419 return 0;
0b2ae6d7
VS
2420}
2421
ed4a6a7c
MR
2422/*
2423 * Build a set of 'intermediate' watermark values that satisfy both the old
2424 * state and the new state. These can be programmed to the hardware
2425 * immediately.
2426 */
2427static int ilk_compute_intermediate_wm(struct drm_device *dev,
2428 struct intel_crtc *intel_crtc,
2429 struct intel_crtc_state *newstate)
2430{
e8f1f02e 2431 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
ed4a6a7c
MR
2432 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2433 int level, max_level = ilk_wm_max_level(dev);
2434
2435 /*
2436 * Start with the final, target watermarks, then combine with the
2437 * currently active watermarks to get values that are safe both before
2438 * and after the vblank.
2439 */
e8f1f02e 2440 *a = newstate->wm.ilk.optimal;
ed4a6a7c
MR
2441 a->pipe_enabled |= b->pipe_enabled;
2442 a->sprites_enabled |= b->sprites_enabled;
2443 a->sprites_scaled |= b->sprites_scaled;
2444
2445 for (level = 0; level <= max_level; level++) {
2446 struct intel_wm_level *a_wm = &a->wm[level];
2447 const struct intel_wm_level *b_wm = &b->wm[level];
2448
2449 a_wm->enable &= b_wm->enable;
2450 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2451 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2452 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2453 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2454 }
2455
2456 /*
2457 * We need to make sure that these merged watermark values are
2458 * actually a valid configuration themselves. If they're not,
2459 * there's no safe way to transition from the old state to
2460 * the new state, so we need to fail the atomic transaction.
2461 */
2462 if (!ilk_validate_pipe_wm(dev, a))
2463 return -EINVAL;
2464
2465 /*
2466 * If our intermediate WM are identical to the final WM, then we can
2467 * omit the post-vblank programming; only update if it's different.
2468 */
e8f1f02e 2469 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
ed4a6a7c
MR
2470 newstate->wm.need_postvbl_update = false;
2471
2472 return 0;
2473}
2474
0b2ae6d7
VS
2475/*
2476 * Merge the watermarks from all active pipes for a specific level.
2477 */
2478static void ilk_merge_wm_level(struct drm_device *dev,
2479 int level,
2480 struct intel_wm_level *ret_wm)
2481{
2482 const struct intel_crtc *intel_crtc;
2483
d52fea5b
VS
2484 ret_wm->enable = true;
2485
d3fcc808 2486 for_each_intel_crtc(dev, intel_crtc) {
ed4a6a7c 2487 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
fe392efd
VS
2488 const struct intel_wm_level *wm = &active->wm[level];
2489
2490 if (!active->pipe_enabled)
2491 continue;
0b2ae6d7 2492
d52fea5b
VS
2493 /*
2494 * The watermark values may have been used in the past,
2495 * so we must maintain them in the registers for some
2496 * time even if the level is now disabled.
2497 */
0b2ae6d7 2498 if (!wm->enable)
d52fea5b 2499 ret_wm->enable = false;
0b2ae6d7
VS
2500
2501 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2502 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2503 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2504 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2505 }
0b2ae6d7
VS
2506}
2507
2508/*
2509 * Merge all low power watermarks for all active pipes.
2510 */
2511static void ilk_wm_merge(struct drm_device *dev,
0ba22e26 2512 const struct intel_wm_config *config,
820c1980 2513 const struct ilk_wm_maximums *max,
0b2ae6d7
VS
2514 struct intel_pipe_wm *merged)
2515{
fac5e23e 2516 struct drm_i915_private *dev_priv = to_i915(dev);
0b2ae6d7 2517 int level, max_level = ilk_wm_max_level(dev);
d52fea5b 2518 int last_enabled_level = max_level;
0b2ae6d7 2519
0ba22e26
VS
2520 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2521 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2522 config->num_pipes_active > 1)
1204d5ba 2523 last_enabled_level = 0;
0ba22e26 2524
6c8b6c28
VS
2525 /* ILK: FBC WM must be disabled always */
2526 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
0b2ae6d7
VS
2527
2528 /* merge each WM1+ level */
2529 for (level = 1; level <= max_level; level++) {
2530 struct intel_wm_level *wm = &merged->wm[level];
2531
2532 ilk_merge_wm_level(dev, level, wm);
2533
d52fea5b
VS
2534 if (level > last_enabled_level)
2535 wm->enable = false;
2536 else if (!ilk_validate_wm_level(level, max, wm))
2537 /* make sure all following levels get disabled */
2538 last_enabled_level = level - 1;
0b2ae6d7
VS
2539
2540 /*
2541 * The spec says it is preferred to disable
2542 * FBC WMs instead of disabling a WM level.
2543 */
2544 if (wm->fbc_val > max->fbc) {
d52fea5b
VS
2545 if (wm->enable)
2546 merged->fbc_wm_enabled = false;
0b2ae6d7
VS
2547 wm->fbc_val = 0;
2548 }
2549 }
6c8b6c28
VS
2550
2551 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2552 /*
2553 * FIXME this is racy. FBC might get enabled later.
2554 * What we should check here is whether FBC can be
2555 * enabled sometime later.
2556 */
7733b49b 2557 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
0e631adc 2558 intel_fbc_is_active(dev_priv)) {
6c8b6c28
VS
2559 for (level = 2; level <= max_level; level++) {
2560 struct intel_wm_level *wm = &merged->wm[level];
2561
2562 wm->enable = false;
2563 }
2564 }
0b2ae6d7
VS
2565}
2566
b380ca3c
VS
2567static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2568{
2569 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2570 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2571}
2572
a68d68ee
VS
2573/* The value we need to program into the WM_LPx latency field */
2574static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2575{
fac5e23e 2576 struct drm_i915_private *dev_priv = to_i915(dev);
a68d68ee 2577
a42a5719 2578 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
a68d68ee
VS
2579 return 2 * level;
2580 else
2581 return dev_priv->wm.pri_latency[level];
2582}
2583
820c1980 2584static void ilk_compute_wm_results(struct drm_device *dev,
0362c781 2585 const struct intel_pipe_wm *merged,
609cedef 2586 enum intel_ddb_partitioning partitioning,
820c1980 2587 struct ilk_wm_values *results)
801bcfff 2588{
0b2ae6d7
VS
2589 struct intel_crtc *intel_crtc;
2590 int level, wm_lp;
cca32e9a 2591
0362c781 2592 results->enable_fbc_wm = merged->fbc_wm_enabled;
609cedef 2593 results->partitioning = partitioning;
cca32e9a 2594
0b2ae6d7 2595 /* LP1+ register values */
cca32e9a 2596 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
1fd527cc 2597 const struct intel_wm_level *r;
801bcfff 2598
b380ca3c 2599 level = ilk_wm_lp_to_level(wm_lp, merged);
0b2ae6d7 2600
0362c781 2601 r = &merged->wm[level];
cca32e9a 2602
d52fea5b
VS
2603 /*
2604 * Maintain the watermark values even if the level is
2605 * disabled. Doing otherwise could cause underruns.
2606 */
2607 results->wm_lp[wm_lp - 1] =
a68d68ee 2608 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
416f4727
VS
2609 (r->pri_val << WM1_LP_SR_SHIFT) |
2610 r->cur_val;
2611
d52fea5b
VS
2612 if (r->enable)
2613 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2614
416f4727
VS
2615 if (INTEL_INFO(dev)->gen >= 8)
2616 results->wm_lp[wm_lp - 1] |=
2617 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2618 else
2619 results->wm_lp[wm_lp - 1] |=
2620 r->fbc_val << WM1_LP_FBC_SHIFT;
2621
d52fea5b
VS
2622 /*
2623 * Always set WM1S_LP_EN when spr_val != 0, even if the
2624 * level is disabled. Doing otherwise could cause underruns.
2625 */
6cef2b8a
VS
2626 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2627 WARN_ON(wm_lp != 1);
2628 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2629 } else
2630 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
cca32e9a 2631 }
801bcfff 2632
0b2ae6d7 2633 /* LP0 register values */
d3fcc808 2634 for_each_intel_crtc(dev, intel_crtc) {
0b2ae6d7 2635 enum pipe pipe = intel_crtc->pipe;
ed4a6a7c
MR
2636 const struct intel_wm_level *r =
2637 &intel_crtc->wm.active.ilk.wm[0];
0b2ae6d7
VS
2638
2639 if (WARN_ON(!r->enable))
2640 continue;
2641
ed4a6a7c 2642 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
1011d8c4 2643
0b2ae6d7
VS
2644 results->wm_pipe[pipe] =
2645 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2646 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2647 r->cur_val;
801bcfff
PZ
2648 }
2649}
2650
861f3389
PZ
2651/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2652 * case both are at the same level. Prefer r1 in case they're the same. */
820c1980 2653static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
198a1e9b
VS
2654 struct intel_pipe_wm *r1,
2655 struct intel_pipe_wm *r2)
861f3389 2656{
198a1e9b
VS
2657 int level, max_level = ilk_wm_max_level(dev);
2658 int level1 = 0, level2 = 0;
861f3389 2659
198a1e9b
VS
2660 for (level = 1; level <= max_level; level++) {
2661 if (r1->wm[level].enable)
2662 level1 = level;
2663 if (r2->wm[level].enable)
2664 level2 = level;
861f3389
PZ
2665 }
2666
198a1e9b
VS
2667 if (level1 == level2) {
2668 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
861f3389
PZ
2669 return r2;
2670 else
2671 return r1;
198a1e9b 2672 } else if (level1 > level2) {
861f3389
PZ
2673 return r1;
2674 } else {
2675 return r2;
2676 }
2677}
2678
49a687c4
VS
2679/* dirty bits used to track which watermarks need changes */
2680#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2681#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2682#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2683#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2684#define WM_DIRTY_FBC (1 << 24)
2685#define WM_DIRTY_DDB (1 << 25)
2686
055e393f 2687static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
820c1980
ID
2688 const struct ilk_wm_values *old,
2689 const struct ilk_wm_values *new)
49a687c4
VS
2690{
2691 unsigned int dirty = 0;
2692 enum pipe pipe;
2693 int wm_lp;
2694
055e393f 2695 for_each_pipe(dev_priv, pipe) {
49a687c4
VS
2696 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2697 dirty |= WM_DIRTY_LINETIME(pipe);
2698 /* Must disable LP1+ watermarks too */
2699 dirty |= WM_DIRTY_LP_ALL;
2700 }
2701
2702 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2703 dirty |= WM_DIRTY_PIPE(pipe);
2704 /* Must disable LP1+ watermarks too */
2705 dirty |= WM_DIRTY_LP_ALL;
2706 }
2707 }
2708
2709 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2710 dirty |= WM_DIRTY_FBC;
2711 /* Must disable LP1+ watermarks too */
2712 dirty |= WM_DIRTY_LP_ALL;
2713 }
2714
2715 if (old->partitioning != new->partitioning) {
2716 dirty |= WM_DIRTY_DDB;
2717 /* Must disable LP1+ watermarks too */
2718 dirty |= WM_DIRTY_LP_ALL;
2719 }
2720
2721 /* LP1+ watermarks already deemed dirty, no need to continue */
2722 if (dirty & WM_DIRTY_LP_ALL)
2723 return dirty;
2724
2725 /* Find the lowest numbered LP1+ watermark in need of an update... */
2726 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2727 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2728 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2729 break;
2730 }
2731
2732 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2733 for (; wm_lp <= 3; wm_lp++)
2734 dirty |= WM_DIRTY_LP(wm_lp);
2735
2736 return dirty;
2737}
2738
8553c18e
VS
2739static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2740 unsigned int dirty)
801bcfff 2741{
820c1980 2742 struct ilk_wm_values *previous = &dev_priv->wm.hw;
8553c18e 2743 bool changed = false;
801bcfff 2744
facd619b
VS
2745 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2746 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2747 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
8553c18e 2748 changed = true;
facd619b
VS
2749 }
2750 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2751 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2752 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
8553c18e 2753 changed = true;
facd619b
VS
2754 }
2755 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2756 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2757 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
8553c18e 2758 changed = true;
facd619b 2759 }
801bcfff 2760
facd619b
VS
2761 /*
2762 * Don't touch WM1S_LP_EN here.
2763 * Doing so could cause underruns.
2764 */
6cef2b8a 2765
8553c18e
VS
2766 return changed;
2767}
2768
2769/*
2770 * The spec says we shouldn't write when we don't need, because every write
2771 * causes WMs to be re-evaluated, expending some power.
2772 */
820c1980
ID
2773static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2774 struct ilk_wm_values *results)
8553c18e 2775{
91c8a326 2776 struct drm_device *dev = &dev_priv->drm;
820c1980 2777 struct ilk_wm_values *previous = &dev_priv->wm.hw;
8553c18e
VS
2778 unsigned int dirty;
2779 uint32_t val;
2780
055e393f 2781 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
8553c18e
VS
2782 if (!dirty)
2783 return;
2784
2785 _ilk_disable_lp_wm(dev_priv, dirty);
2786
49a687c4 2787 if (dirty & WM_DIRTY_PIPE(PIPE_A))
801bcfff 2788 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
49a687c4 2789 if (dirty & WM_DIRTY_PIPE(PIPE_B))
801bcfff 2790 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
49a687c4 2791 if (dirty & WM_DIRTY_PIPE(PIPE_C))
801bcfff
PZ
2792 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2793
49a687c4 2794 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
801bcfff 2795 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
49a687c4 2796 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
801bcfff 2797 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
49a687c4 2798 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
801bcfff
PZ
2799 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2800
49a687c4 2801 if (dirty & WM_DIRTY_DDB) {
a42a5719 2802 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ac9545fd
VS
2803 val = I915_READ(WM_MISC);
2804 if (results->partitioning == INTEL_DDB_PART_1_2)
2805 val &= ~WM_MISC_DATA_PARTITION_5_6;
2806 else
2807 val |= WM_MISC_DATA_PARTITION_5_6;
2808 I915_WRITE(WM_MISC, val);
2809 } else {
2810 val = I915_READ(DISP_ARB_CTL2);
2811 if (results->partitioning == INTEL_DDB_PART_1_2)
2812 val &= ~DISP_DATA_PARTITION_5_6;
2813 else
2814 val |= DISP_DATA_PARTITION_5_6;
2815 I915_WRITE(DISP_ARB_CTL2, val);
2816 }
1011d8c4
PZ
2817 }
2818
49a687c4 2819 if (dirty & WM_DIRTY_FBC) {
cca32e9a
PZ
2820 val = I915_READ(DISP_ARB_CTL);
2821 if (results->enable_fbc_wm)
2822 val &= ~DISP_FBC_WM_DIS;
2823 else
2824 val |= DISP_FBC_WM_DIS;
2825 I915_WRITE(DISP_ARB_CTL, val);
2826 }
2827
954911eb
ID
2828 if (dirty & WM_DIRTY_LP(1) &&
2829 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2830 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2831
2832 if (INTEL_INFO(dev)->gen >= 7) {
6cef2b8a
VS
2833 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2834 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2835 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2836 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2837 }
801bcfff 2838
facd619b 2839 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
801bcfff 2840 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
facd619b 2841 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
801bcfff 2842 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
facd619b 2843 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
801bcfff 2844 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
609cedef
VS
2845
2846 dev_priv->wm.hw = *results;
801bcfff
PZ
2847}
2848
ed4a6a7c 2849bool ilk_disable_lp_wm(struct drm_device *dev)
8553c18e 2850{
fac5e23e 2851 struct drm_i915_private *dev_priv = to_i915(dev);
8553c18e
VS
2852
2853 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2854}
2855
656d1b89 2856#define SKL_SAGV_BLOCK_TIME 30 /* µs */
b9cec075 2857
024c9045
MR
2858/*
2859 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2860 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2861 * other universal planes are in indices 1..n. Note that this may leave unused
2862 * indices between the top "sprite" plane and the cursor.
2863 */
2864static int
2865skl_wm_plane_id(const struct intel_plane *plane)
2866{
2867 switch (plane->base.type) {
2868 case DRM_PLANE_TYPE_PRIMARY:
2869 return 0;
2870 case DRM_PLANE_TYPE_CURSOR:
2871 return PLANE_CURSOR;
2872 case DRM_PLANE_TYPE_OVERLAY:
2873 return plane->plane + 1;
2874 default:
2875 MISSING_CASE(plane->base.type);
2876 return plane->plane;
2877 }
2878}
2879
656d1b89
L
2880/*
2881 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2882 * depending on power and performance requirements. The display engine access
2883 * to system memory is blocked during the adjustment time. Because of the
2884 * blocking time, having this enabled can cause full system hangs and/or pipe
2885 * underruns if we don't meet all of the following requirements:
2886 *
2887 * - <= 1 pipe enabled
2888 * - All planes can enable watermarks for latencies >= SAGV engine block time
2889 * - We're not using an interlaced display configuration
2890 */
2891int
16dcdc4e 2892intel_enable_sagv(struct drm_i915_private *dev_priv)
656d1b89
L
2893{
2894 int ret;
2895
16dcdc4e
PZ
2896 if (dev_priv->sagv_status == I915_SAGV_NOT_CONTROLLED ||
2897 dev_priv->sagv_status == I915_SAGV_ENABLED)
656d1b89
L
2898 return 0;
2899
2900 DRM_DEBUG_KMS("Enabling the SAGV\n");
2901 mutex_lock(&dev_priv->rps.hw_lock);
2902
2903 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2904 GEN9_SAGV_ENABLE);
2905
2906 /* We don't need to wait for the SAGV when enabling */
2907 mutex_unlock(&dev_priv->rps.hw_lock);
2908
2909 /*
2910 * Some skl systems, pre-release machines in particular,
2911 * don't actually have an SAGV.
2912 */
2913 if (ret == -ENXIO) {
2914 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
16dcdc4e 2915 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
656d1b89
L
2916 return 0;
2917 } else if (ret < 0) {
2918 DRM_ERROR("Failed to enable the SAGV\n");
2919 return ret;
2920 }
2921
16dcdc4e 2922 dev_priv->sagv_status = I915_SAGV_ENABLED;
656d1b89
L
2923 return 0;
2924}
2925
2926static int
16dcdc4e 2927intel_do_sagv_disable(struct drm_i915_private *dev_priv)
656d1b89
L
2928{
2929 int ret;
2930 uint32_t temp = GEN9_SAGV_DISABLE;
2931
2932 ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2933 &temp);
2934 if (ret)
2935 return ret;
2936 else
2937 return temp & GEN9_SAGV_IS_DISABLED;
2938}
2939
2940int
16dcdc4e 2941intel_disable_sagv(struct drm_i915_private *dev_priv)
656d1b89
L
2942{
2943 int ret, result;
2944
16dcdc4e
PZ
2945 if (dev_priv->sagv_status == I915_SAGV_NOT_CONTROLLED ||
2946 dev_priv->sagv_status == I915_SAGV_DISABLED)
656d1b89
L
2947 return 0;
2948
2949 DRM_DEBUG_KMS("Disabling the SAGV\n");
2950 mutex_lock(&dev_priv->rps.hw_lock);
2951
2952 /* bspec says to keep retrying for at least 1 ms */
16dcdc4e 2953 ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
656d1b89
L
2954 mutex_unlock(&dev_priv->rps.hw_lock);
2955
2956 if (ret == -ETIMEDOUT) {
2957 DRM_ERROR("Request to disable SAGV timed out\n");
2958 return -ETIMEDOUT;
2959 }
2960
2961 /*
2962 * Some skl systems, pre-release machines in particular,
2963 * don't actually have an SAGV.
2964 */
2965 if (result == -ENXIO) {
2966 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
16dcdc4e 2967 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
656d1b89
L
2968 return 0;
2969 } else if (result < 0) {
2970 DRM_ERROR("Failed to disable the SAGV\n");
2971 return result;
2972 }
2973
16dcdc4e 2974 dev_priv->sagv_status = I915_SAGV_DISABLED;
656d1b89
L
2975 return 0;
2976}
2977
16dcdc4e 2978bool intel_can_enable_sagv(struct drm_atomic_state *state)
656d1b89
L
2979{
2980 struct drm_device *dev = state->dev;
2981 struct drm_i915_private *dev_priv = to_i915(dev);
2982 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2983 struct drm_crtc *crtc;
2984 enum pipe pipe;
2985 int level, plane;
2986
2987 /*
2988 * SKL workaround: bspec recommends we disable the SAGV when we have
2989 * more then one pipe enabled
2990 *
2991 * If there are no active CRTCs, no additional checks need be performed
2992 */
2993 if (hweight32(intel_state->active_crtcs) == 0)
2994 return true;
2995 else if (hweight32(intel_state->active_crtcs) > 1)
2996 return false;
2997
2998 /* Since we're now guaranteed to only have one active CRTC... */
2999 pipe = ffs(intel_state->active_crtcs) - 1;
3000 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
3001
3002 if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
3003 return false;
3004
3005 for_each_plane(dev_priv, pipe, plane) {
3006 /* Skip this plane if it's not enabled */
3007 if (intel_state->wm_results.plane[pipe][plane][0] == 0)
3008 continue;
3009
3010 /* Find the highest enabled wm level for this plane */
3011 for (level = ilk_wm_max_level(dev);
3012 intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
3013 { }
3014
3015 /*
3016 * If any of the planes on this pipe don't enable wm levels
3017 * that incur memory latencies higher then 30µs we can't enable
3018 * the SAGV
3019 */
3020 if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
3021 return false;
3022 }
3023
3024 return true;
3025}
3026
b9cec075
DL
3027static void
3028skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
024c9045 3029 const struct intel_crtc_state *cstate,
c107acfe
MR
3030 struct skl_ddb_entry *alloc, /* out */
3031 int *num_active /* out */)
b9cec075 3032{
c107acfe
MR
3033 struct drm_atomic_state *state = cstate->base.state;
3034 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3035 struct drm_i915_private *dev_priv = to_i915(dev);
024c9045 3036 struct drm_crtc *for_crtc = cstate->base.crtc;
b9cec075
DL
3037 unsigned int pipe_size, ddb_size;
3038 int nth_active_pipe;
c107acfe
MR
3039 int pipe = to_intel_crtc(for_crtc)->pipe;
3040
a6d3460e 3041 if (WARN_ON(!state) || !cstate->base.active) {
b9cec075
DL
3042 alloc->start = 0;
3043 alloc->end = 0;
a6d3460e 3044 *num_active = hweight32(dev_priv->active_crtcs);
b9cec075
DL
3045 return;
3046 }
3047
a6d3460e
MR
3048 if (intel_state->active_pipe_changes)
3049 *num_active = hweight32(intel_state->active_crtcs);
3050 else
3051 *num_active = hweight32(dev_priv->active_crtcs);
3052
6f3fff60
D
3053 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3054 WARN_ON(ddb_size == 0);
b9cec075
DL
3055
3056 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3057
c107acfe 3058 /*
a6d3460e
MR
3059 * If the state doesn't change the active CRTC's, then there's
3060 * no need to recalculate; the existing pipe allocation limits
3061 * should remain unchanged. Note that we're safe from racing
3062 * commits since any racing commit that changes the active CRTC
3063 * list would need to grab _all_ crtc locks, including the one
3064 * we currently hold.
c107acfe 3065 */
a6d3460e
MR
3066 if (!intel_state->active_pipe_changes) {
3067 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
3068 return;
c107acfe 3069 }
a6d3460e
MR
3070
3071 nth_active_pipe = hweight32(intel_state->active_crtcs &
3072 (drm_crtc_mask(for_crtc) - 1));
3073 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3074 alloc->start = nth_active_pipe * ddb_size / *num_active;
3075 alloc->end = alloc->start + pipe_size;
b9cec075
DL
3076}
3077
c107acfe 3078static unsigned int skl_cursor_allocation(int num_active)
b9cec075 3079{
c107acfe 3080 if (num_active == 1)
b9cec075
DL
3081 return 32;
3082
3083 return 8;
3084}
3085
a269c583
DL
3086static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3087{
3088 entry->start = reg & 0x3ff;
3089 entry->end = (reg >> 16) & 0x3ff;
16160e3d
DL
3090 if (entry->end)
3091 entry->end += 1;
a269c583
DL
3092}
3093
08db6652
DL
3094void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3095 struct skl_ddb_allocation *ddb /* out */)
a269c583 3096{
a269c583
DL
3097 enum pipe pipe;
3098 int plane;
3099 u32 val;
3100
b10f1b20
ML
3101 memset(ddb, 0, sizeof(*ddb));
3102
a269c583 3103 for_each_pipe(dev_priv, pipe) {
4d800030
ID
3104 enum intel_display_power_domain power_domain;
3105
3106 power_domain = POWER_DOMAIN_PIPE(pipe);
3107 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
b10f1b20
ML
3108 continue;
3109
dd740780 3110 for_each_plane(dev_priv, pipe, plane) {
a269c583
DL
3111 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
3112 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
3113 val);
3114 }
3115
3116 val = I915_READ(CUR_BUF_CFG(pipe));
4969d33e
MR
3117 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
3118 val);
4d800030
ID
3119
3120 intel_display_power_put(dev_priv, power_domain);
a269c583
DL
3121 }
3122}
3123
9c2f7a9d
KM
3124/*
3125 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3126 * The bspec defines downscale amount as:
3127 *
3128 * """
3129 * Horizontal down scale amount = maximum[1, Horizontal source size /
3130 * Horizontal destination size]
3131 * Vertical down scale amount = maximum[1, Vertical source size /
3132 * Vertical destination size]
3133 * Total down scale amount = Horizontal down scale amount *
3134 * Vertical down scale amount
3135 * """
3136 *
3137 * Return value is provided in 16.16 fixed point form to retain fractional part.
3138 * Caller should take care of dividing & rounding off the value.
3139 */
3140static uint32_t
3141skl_plane_downscale_amount(const struct intel_plane_state *pstate)
3142{
3143 uint32_t downscale_h, downscale_w;
3144 uint32_t src_w, src_h, dst_w, dst_h;
3145
936e71e3 3146 if (WARN_ON(!pstate->base.visible))
9c2f7a9d
KM
3147 return DRM_PLANE_HELPER_NO_SCALING;
3148
3149 /* n.b., src is 16.16 fixed point, dst is whole integer */
936e71e3
VS
3150 src_w = drm_rect_width(&pstate->base.src);
3151 src_h = drm_rect_height(&pstate->base.src);
3152 dst_w = drm_rect_width(&pstate->base.dst);
3153 dst_h = drm_rect_height(&pstate->base.dst);
9c2f7a9d
KM
3154 if (intel_rotation_90_or_270(pstate->base.rotation))
3155 swap(dst_w, dst_h);
3156
3157 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3158 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3159
3160 /* Provide result in 16.16 fixed point */
3161 return (uint64_t)downscale_w * downscale_h >> 16;
3162}
3163
b9cec075 3164static unsigned int
024c9045
MR
3165skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3166 const struct drm_plane_state *pstate,
3167 int y)
b9cec075 3168{
a280f7dd 3169 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
024c9045 3170 struct drm_framebuffer *fb = pstate->fb;
8d19d7d9 3171 uint32_t down_scale_amount, data_rate;
a280f7dd 3172 uint32_t width = 0, height = 0;
a1de91e5
MR
3173 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3174
936e71e3 3175 if (!intel_pstate->base.visible)
a1de91e5
MR
3176 return 0;
3177 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3178 return 0;
3179 if (y && format != DRM_FORMAT_NV12)
3180 return 0;
a280f7dd 3181
936e71e3
VS
3182 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3183 height = drm_rect_height(&intel_pstate->base.src) >> 16;
a280f7dd
KM
3184
3185 if (intel_rotation_90_or_270(pstate->rotation))
3186 swap(width, height);
2cd601c6
CK
3187
3188 /* for planar format */
a1de91e5 3189 if (format == DRM_FORMAT_NV12) {
2cd601c6 3190 if (y) /* y-plane data rate */
8d19d7d9 3191 data_rate = width * height *
a1de91e5 3192 drm_format_plane_cpp(format, 0);
2cd601c6 3193 else /* uv-plane data rate */
8d19d7d9 3194 data_rate = (width / 2) * (height / 2) *
a1de91e5 3195 drm_format_plane_cpp(format, 1);
8d19d7d9
KM
3196 } else {
3197 /* for packed formats */
3198 data_rate = width * height * drm_format_plane_cpp(format, 0);
2cd601c6
CK
3199 }
3200
8d19d7d9
KM
3201 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
3202
3203 return (uint64_t)data_rate * down_scale_amount >> 16;
b9cec075
DL
3204}
3205
3206/*
3207 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3208 * a 8192x4096@32bpp framebuffer:
3209 * 3 * 4096 * 8192 * 4 < 2^32
3210 */
3211static unsigned int
9c74d826 3212skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
b9cec075 3213{
9c74d826
MR
3214 struct drm_crtc_state *cstate = &intel_cstate->base;
3215 struct drm_atomic_state *state = cstate->state;
3216 struct drm_crtc *crtc = cstate->crtc;
3217 struct drm_device *dev = crtc->dev;
3218 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
a6d3460e 3219 const struct drm_plane *plane;
024c9045 3220 const struct intel_plane *intel_plane;
a6d3460e 3221 struct drm_plane_state *pstate;
a1de91e5 3222 unsigned int rate, total_data_rate = 0;
9c74d826 3223 int id;
a6d3460e
MR
3224 int i;
3225
3226 if (WARN_ON(!state))
3227 return 0;
b9cec075 3228
a1de91e5 3229 /* Calculate and cache data rate for each plane */
a6d3460e
MR
3230 for_each_plane_in_state(state, plane, pstate, i) {
3231 id = skl_wm_plane_id(to_intel_plane(plane));
3232 intel_plane = to_intel_plane(plane);
3233
3234 if (intel_plane->pipe != intel_crtc->pipe)
3235 continue;
3236
3237 /* packed/uv */
3238 rate = skl_plane_relative_data_rate(intel_cstate,
3239 pstate, 0);
3240 intel_cstate->wm.skl.plane_data_rate[id] = rate;
3241
3242 /* y-plane */
3243 rate = skl_plane_relative_data_rate(intel_cstate,
3244 pstate, 1);
3245 intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
a1de91e5 3246 }
024c9045 3247
a1de91e5
MR
3248 /* Calculate CRTC's total data rate from cached values */
3249 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3250 int id = skl_wm_plane_id(intel_plane);
024c9045 3251
a1de91e5 3252 /* packed/uv */
9c74d826
MR
3253 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
3254 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
b9cec075
DL
3255 }
3256
3257 return total_data_rate;
3258}
3259
cbcfd14b
KM
3260static uint16_t
3261skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3262 const int y)
3263{
3264 struct drm_framebuffer *fb = pstate->fb;
3265 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3266 uint32_t src_w, src_h;
3267 uint32_t min_scanlines = 8;
3268 uint8_t plane_bpp;
3269
3270 if (WARN_ON(!fb))
3271 return 0;
3272
3273 /* For packed formats, no y-plane, return 0 */
3274 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3275 return 0;
3276
3277 /* For Non Y-tile return 8-blocks */
3278 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3279 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3280 return 8;
3281
936e71e3
VS
3282 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3283 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
cbcfd14b
KM
3284
3285 if (intel_rotation_90_or_270(pstate->rotation))
3286 swap(src_w, src_h);
3287
3288 /* Halve UV plane width and height for NV12 */
3289 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3290 src_w /= 2;
3291 src_h /= 2;
3292 }
3293
3294 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3295 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3296 else
3297 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3298
3299 if (intel_rotation_90_or_270(pstate->rotation)) {
3300 switch (plane_bpp) {
3301 case 1:
3302 min_scanlines = 32;
3303 break;
3304 case 2:
3305 min_scanlines = 16;
3306 break;
3307 case 4:
3308 min_scanlines = 8;
3309 break;
3310 case 8:
3311 min_scanlines = 4;
3312 break;
3313 default:
3314 WARN(1, "Unsupported pixel depth %u for rotation",
3315 plane_bpp);
3316 min_scanlines = 32;
3317 }
3318 }
3319
3320 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3321}
3322
c107acfe 3323static int
024c9045 3324skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
b9cec075
DL
3325 struct skl_ddb_allocation *ddb /* out */)
3326{
c107acfe 3327 struct drm_atomic_state *state = cstate->base.state;
024c9045 3328 struct drm_crtc *crtc = cstate->base.crtc;
b9cec075
DL
3329 struct drm_device *dev = crtc->dev;
3330 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
024c9045 3331 struct intel_plane *intel_plane;
c107acfe
MR
3332 struct drm_plane *plane;
3333 struct drm_plane_state *pstate;
b9cec075 3334 enum pipe pipe = intel_crtc->pipe;
34bb56af 3335 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
b9cec075 3336 uint16_t alloc_size, start, cursor_blocks;
86a2100a
MR
3337 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3338 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
b9cec075 3339 unsigned int total_data_rate;
c107acfe
MR
3340 int num_active;
3341 int id, i;
b9cec075 3342
a6d3460e
MR
3343 if (WARN_ON(!state))
3344 return 0;
3345
c107acfe
MR
3346 if (!cstate->base.active) {
3347 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3348 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3349 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3350 return 0;
3351 }
3352
a6d3460e 3353 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
34bb56af 3354 alloc_size = skl_ddb_entry_size(alloc);
b9cec075
DL
3355 if (alloc_size == 0) {
3356 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
c107acfe 3357 return 0;
b9cec075
DL
3358 }
3359
c107acfe 3360 cursor_blocks = skl_cursor_allocation(num_active);
4969d33e
MR
3361 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3362 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
b9cec075
DL
3363
3364 alloc_size -= cursor_blocks;
b9cec075 3365
80958155 3366 /* 1. Allocate the mininum required blocks for each active plane */
a6d3460e
MR
3367 for_each_plane_in_state(state, plane, pstate, i) {
3368 intel_plane = to_intel_plane(plane);
3369 id = skl_wm_plane_id(intel_plane);
c107acfe 3370
a6d3460e
MR
3371 if (intel_plane->pipe != pipe)
3372 continue;
c107acfe 3373
936e71e3 3374 if (!to_intel_plane_state(pstate)->base.visible) {
a6d3460e
MR
3375 minimum[id] = 0;
3376 y_minimum[id] = 0;
3377 continue;
3378 }
3379 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3380 minimum[id] = 0;
3381 y_minimum[id] = 0;
3382 continue;
c107acfe 3383 }
a6d3460e 3384
cbcfd14b
KM
3385 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3386 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
c107acfe 3387 }
80958155 3388
c107acfe
MR
3389 for (i = 0; i < PLANE_CURSOR; i++) {
3390 alloc_size -= minimum[i];
3391 alloc_size -= y_minimum[i];
80958155
DL
3392 }
3393
b9cec075 3394 /*
80958155
DL
3395 * 2. Distribute the remaining space in proportion to the amount of
3396 * data each plane needs to fetch from memory.
b9cec075
DL
3397 *
3398 * FIXME: we may not allocate every single block here.
3399 */
024c9045 3400 total_data_rate = skl_get_total_relative_data_rate(cstate);
a1de91e5 3401 if (total_data_rate == 0)
c107acfe 3402 return 0;
b9cec075 3403
34bb56af 3404 start = alloc->start;
024c9045 3405 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2cd601c6
CK
3406 unsigned int data_rate, y_data_rate;
3407 uint16_t plane_blocks, y_plane_blocks = 0;
024c9045 3408 int id = skl_wm_plane_id(intel_plane);
b9cec075 3409
a1de91e5 3410 data_rate = cstate->wm.skl.plane_data_rate[id];
b9cec075
DL
3411
3412 /*
2cd601c6 3413 * allocation for (packed formats) or (uv-plane part of planar format):
b9cec075
DL
3414 * promote the expression to 64 bits to avoid overflowing, the
3415 * result is < available as data_rate / total_data_rate < 1
3416 */
024c9045 3417 plane_blocks = minimum[id];
80958155
DL
3418 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3419 total_data_rate);
b9cec075 3420
c107acfe
MR
3421 /* Leave disabled planes at (0,0) */
3422 if (data_rate) {
3423 ddb->plane[pipe][id].start = start;
3424 ddb->plane[pipe][id].end = start + plane_blocks;
3425 }
b9cec075
DL
3426
3427 start += plane_blocks;
2cd601c6
CK
3428
3429 /*
3430 * allocation for y_plane part of planar format:
3431 */
a1de91e5
MR
3432 y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3433
3434 y_plane_blocks = y_minimum[id];
3435 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3436 total_data_rate);
2cd601c6 3437
c107acfe
MR
3438 if (y_data_rate) {
3439 ddb->y_plane[pipe][id].start = start;
3440 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3441 }
a1de91e5
MR
3442
3443 start += y_plane_blocks;
b9cec075
DL
3444 }
3445
c107acfe 3446 return 0;
b9cec075
DL
3447}
3448
5cec258b 3449static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
2d41c0b5
PB
3450{
3451 /* TODO: Take into account the scalers once we support them */
2d112de7 3452 return config->base.adjusted_mode.crtc_clock;
2d41c0b5
PB
3453}
3454
3455/*
3456 * The max latency should be 257 (max the punit can code is 255 and we add 2us
ac484963 3457 * for the read latency) and cpp should always be <= 8, so that
2d41c0b5
PB
3458 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3459 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3460*/
ac484963 3461static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
2d41c0b5
PB
3462{
3463 uint32_t wm_intermediate_val, ret;
3464
3465 if (latency == 0)
3466 return UINT_MAX;
3467
ac484963 3468 wm_intermediate_val = latency * pixel_rate * cpp / 512;
2d41c0b5
PB
3469 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3470
3471 return ret;
3472}
3473
3474static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
ac484963 3475 uint32_t horiz_pixels, uint8_t cpp,
0fda6568 3476 uint64_t tiling, uint32_t latency)
2d41c0b5 3477{
d4c2aa60
TU
3478 uint32_t ret;
3479 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3480 uint32_t wm_intermediate_val;
2d41c0b5
PB
3481
3482 if (latency == 0)
3483 return UINT_MAX;
3484
ac484963 3485 plane_bytes_per_line = horiz_pixels * cpp;
0fda6568
TU
3486
3487 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3488 tiling == I915_FORMAT_MOD_Yf_TILED) {
3489 plane_bytes_per_line *= 4;
3490 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3491 plane_blocks_per_line /= 4;
055c3ff6
MR
3492 } else if (tiling == DRM_FORMAT_MOD_NONE) {
3493 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
0fda6568
TU
3494 } else {
3495 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3496 }
3497
2d41c0b5
PB
3498 wm_intermediate_val = latency * pixel_rate;
3499 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
d4c2aa60 3500 plane_blocks_per_line;
2d41c0b5
PB
3501
3502 return ret;
3503}
3504
9c2f7a9d
KM
3505static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3506 struct intel_plane_state *pstate)
3507{
3508 uint64_t adjusted_pixel_rate;
3509 uint64_t downscale_amount;
3510 uint64_t pixel_rate;
3511
3512 /* Shouldn't reach here on disabled planes... */
936e71e3 3513 if (WARN_ON(!pstate->base.visible))
9c2f7a9d
KM
3514 return 0;
3515
3516 /*
3517 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3518 * with additional adjustments for plane-specific scaling.
3519 */
3520 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
3521 downscale_amount = skl_plane_downscale_amount(pstate);
3522
3523 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3524 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3525
3526 return pixel_rate;
3527}
3528
55994c2c
MR
3529static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3530 struct intel_crtc_state *cstate,
3531 struct intel_plane_state *intel_pstate,
3532 uint16_t ddb_allocation,
3533 int level,
3534 uint16_t *out_blocks, /* out */
3535 uint8_t *out_lines, /* out */
3536 bool *enabled /* out */)
2d41c0b5 3537{
33815fa5
MR
3538 struct drm_plane_state *pstate = &intel_pstate->base;
3539 struct drm_framebuffer *fb = pstate->fb;
d4c2aa60
TU
3540 uint32_t latency = dev_priv->wm.skl_latency[level];
3541 uint32_t method1, method2;
3542 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3543 uint32_t res_blocks, res_lines;
3544 uint32_t selected_result;
ac484963 3545 uint8_t cpp;
a280f7dd 3546 uint32_t width = 0, height = 0;
9c2f7a9d 3547 uint32_t plane_pixel_rate;
2d41c0b5 3548
936e71e3 3549 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
55994c2c
MR
3550 *enabled = false;
3551 return 0;
3552 }
2d41c0b5 3553
936e71e3
VS
3554 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3555 height = drm_rect_height(&intel_pstate->base.src) >> 16;
a280f7dd 3556
33815fa5 3557 if (intel_rotation_90_or_270(pstate->rotation))
a280f7dd
KM
3558 swap(width, height);
3559
ac484963 3560 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
9c2f7a9d
KM
3561 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3562
3563 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3564 method2 = skl_wm_method2(plane_pixel_rate,
024c9045 3565 cstate->base.adjusted_mode.crtc_htotal,
a280f7dd
KM
3566 width,
3567 cpp,
3568 fb->modifier[0],
d4c2aa60 3569 latency);
2d41c0b5 3570
a280f7dd 3571 plane_bytes_per_line = width * cpp;
d4c2aa60 3572 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2d41c0b5 3573
024c9045
MR
3574 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3575 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
1fc0a8f7
TU
3576 uint32_t min_scanlines = 4;
3577 uint32_t y_tile_minimum;
33815fa5 3578 if (intel_rotation_90_or_270(pstate->rotation)) {
ac484963 3579 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
024c9045
MR
3580 drm_format_plane_cpp(fb->pixel_format, 1) :
3581 drm_format_plane_cpp(fb->pixel_format, 0);
3582
ac484963 3583 switch (cpp) {
1fc0a8f7
TU
3584 case 1:
3585 min_scanlines = 16;
3586 break;
3587 case 2:
3588 min_scanlines = 8;
3589 break;
3590 case 8:
3591 WARN(1, "Unsupported pixel depth for rotation");
2f0b5790 3592 }
1fc0a8f7
TU
3593 }
3594 y_tile_minimum = plane_blocks_per_line * min_scanlines;
0fda6568
TU
3595 selected_result = max(method2, y_tile_minimum);
3596 } else {
3597 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3598 selected_result = min(method1, method2);
3599 else
3600 selected_result = method1;
3601 }
2d41c0b5 3602
d4c2aa60
TU
3603 res_blocks = selected_result + 1;
3604 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
e6d66171 3605
0fda6568 3606 if (level >= 1 && level <= 7) {
024c9045
MR
3607 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3608 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
0fda6568
TU
3609 res_lines += 4;
3610 else
3611 res_blocks++;
3612 }
e6d66171 3613
55994c2c
MR
3614 if (res_blocks >= ddb_allocation || res_lines > 31) {
3615 *enabled = false;
6b6bada7
MR
3616
3617 /*
3618 * If there are no valid level 0 watermarks, then we can't
3619 * support this display configuration.
3620 */
3621 if (level) {
3622 return 0;
3623 } else {
3624 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3625 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3626 to_intel_crtc(cstate->base.crtc)->pipe,
3627 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3628 res_blocks, ddb_allocation, res_lines);
3629
3630 return -EINVAL;
3631 }
55994c2c 3632 }
e6d66171
DL
3633
3634 *out_blocks = res_blocks;
3635 *out_lines = res_lines;
55994c2c 3636 *enabled = true;
2d41c0b5 3637
55994c2c 3638 return 0;
2d41c0b5
PB
3639}
3640
f4a96752
MR
3641static int
3642skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3643 struct skl_ddb_allocation *ddb,
3644 struct intel_crtc_state *cstate,
3645 int level,
3646 struct skl_wm_level *result)
2d41c0b5 3647{
f4a96752 3648 struct drm_atomic_state *state = cstate->base.state;
024c9045 3649 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
f4a96752 3650 struct drm_plane *plane;
024c9045 3651 struct intel_plane *intel_plane;
33815fa5 3652 struct intel_plane_state *intel_pstate;
2d41c0b5 3653 uint16_t ddb_blocks;
024c9045 3654 enum pipe pipe = intel_crtc->pipe;
55994c2c 3655 int ret;
024c9045 3656
f4a96752
MR
3657 /*
3658 * We'll only calculate watermarks for planes that are actually
3659 * enabled, so make sure all other planes are set as disabled.
3660 */
3661 memset(result, 0, sizeof(*result));
3662
91c8a326
CW
3663 for_each_intel_plane_mask(&dev_priv->drm,
3664 intel_plane,
3665 cstate->base.plane_mask) {
024c9045 3666 int i = skl_wm_plane_id(intel_plane);
2d41c0b5 3667
f4a96752
MR
3668 plane = &intel_plane->base;
3669 intel_pstate = NULL;
3670 if (state)
3671 intel_pstate =
3672 intel_atomic_get_existing_plane_state(state,
3673 intel_plane);
3674
3675 /*
3676 * Note: If we start supporting multiple pending atomic commits
3677 * against the same planes/CRTC's in the future, plane->state
3678 * will no longer be the correct pre-state to use for the
3679 * calculations here and we'll need to change where we get the
3680 * 'unchanged' plane data from.
3681 *
3682 * For now this is fine because we only allow one queued commit
3683 * against a CRTC. Even if the plane isn't modified by this
3684 * transaction and we don't have a plane lock, we still have
3685 * the CRTC's lock, so we know that no other transactions are
3686 * racing with us to update it.
3687 */
3688 if (!intel_pstate)
3689 intel_pstate = to_intel_plane_state(plane->state);
3690
3691 WARN_ON(!intel_pstate->base.fb);
3692
2d41c0b5
PB
3693 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3694
55994c2c
MR
3695 ret = skl_compute_plane_wm(dev_priv,
3696 cstate,
3697 intel_pstate,
3698 ddb_blocks,
3699 level,
3700 &result->plane_res_b[i],
3701 &result->plane_res_l[i],
3702 &result->plane_en[i]);
3703 if (ret)
3704 return ret;
2d41c0b5 3705 }
f4a96752
MR
3706
3707 return 0;
2d41c0b5
PB
3708}
3709
407b50f3 3710static uint32_t
024c9045 3711skl_compute_linetime_wm(struct intel_crtc_state *cstate)
407b50f3 3712{
024c9045 3713 if (!cstate->base.active)
407b50f3
DL
3714 return 0;
3715
024c9045 3716 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
661abfc0 3717 return 0;
407b50f3 3718
024c9045
MR
3719 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3720 skl_pipe_pixel_rate(cstate));
407b50f3
DL
3721}
3722
024c9045 3723static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
9414f563 3724 struct skl_wm_level *trans_wm /* out */)
407b50f3 3725{
024c9045 3726 struct drm_crtc *crtc = cstate->base.crtc;
9414f563 3727 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
024c9045 3728 struct intel_plane *intel_plane;
9414f563 3729
024c9045 3730 if (!cstate->base.active)
407b50f3 3731 return;
9414f563
DL
3732
3733 /* Until we know more, just disable transition WMs */
024c9045
MR
3734 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3735 int i = skl_wm_plane_id(intel_plane);
3736
9414f563 3737 trans_wm->plane_en[i] = false;
024c9045 3738 }
407b50f3
DL
3739}
3740
55994c2c
MR
3741static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3742 struct skl_ddb_allocation *ddb,
3743 struct skl_pipe_wm *pipe_wm)
2d41c0b5 3744{
024c9045 3745 struct drm_device *dev = cstate->base.crtc->dev;
fac5e23e 3746 const struct drm_i915_private *dev_priv = to_i915(dev);
2d41c0b5 3747 int level, max_level = ilk_wm_max_level(dev);
55994c2c 3748 int ret;
2d41c0b5
PB
3749
3750 for (level = 0; level <= max_level; level++) {
55994c2c
MR
3751 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3752 level, &pipe_wm->wm[level]);
3753 if (ret)
3754 return ret;
2d41c0b5 3755 }
024c9045 3756 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
2d41c0b5 3757
024c9045 3758 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
55994c2c
MR
3759
3760 return 0;
2d41c0b5
PB
3761}
3762
3763static void skl_compute_wm_results(struct drm_device *dev,
2d41c0b5
PB
3764 struct skl_pipe_wm *p_wm,
3765 struct skl_wm_values *r,
3766 struct intel_crtc *intel_crtc)
3767{
3768 int level, max_level = ilk_wm_max_level(dev);
3769 enum pipe pipe = intel_crtc->pipe;
9414f563
DL
3770 uint32_t temp;
3771 int i;
2d41c0b5
PB
3772
3773 for (level = 0; level <= max_level; level++) {
2d41c0b5
PB
3774 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3775 temp = 0;
2d41c0b5
PB
3776
3777 temp |= p_wm->wm[level].plane_res_l[i] <<
3778 PLANE_WM_LINES_SHIFT;
3779 temp |= p_wm->wm[level].plane_res_b[i];
3780 if (p_wm->wm[level].plane_en[i])
3781 temp |= PLANE_WM_EN;
3782
3783 r->plane[pipe][i][level] = temp;
2d41c0b5
PB
3784 }
3785
3786 temp = 0;
2d41c0b5 3787
4969d33e
MR
3788 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3789 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
2d41c0b5 3790
4969d33e 3791 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
2d41c0b5
PB
3792 temp |= PLANE_WM_EN;
3793
4969d33e 3794 r->plane[pipe][PLANE_CURSOR][level] = temp;
2d41c0b5
PB
3795
3796 }
3797
9414f563
DL
3798 /* transition WMs */
3799 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3800 temp = 0;
3801 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3802 temp |= p_wm->trans_wm.plane_res_b[i];
3803 if (p_wm->trans_wm.plane_en[i])
3804 temp |= PLANE_WM_EN;
3805
3806 r->plane_trans[pipe][i] = temp;
3807 }
3808
3809 temp = 0;
4969d33e
MR
3810 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3811 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3812 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
9414f563
DL
3813 temp |= PLANE_WM_EN;
3814
4969d33e 3815 r->plane_trans[pipe][PLANE_CURSOR] = temp;
9414f563 3816
2d41c0b5
PB
3817 r->wm_linetime[pipe] = p_wm->linetime;
3818}
3819
f0f59a00
VS
3820static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3821 i915_reg_t reg,
16160e3d
DL
3822 const struct skl_ddb_entry *entry)
3823{
3824 if (entry->end)
3825 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3826 else
3827 I915_WRITE(reg, 0);
3828}
3829
62e0fb88
L
3830void skl_write_plane_wm(struct intel_crtc *intel_crtc,
3831 const struct skl_wm_values *wm,
3832 int plane)
3833{
3834 struct drm_crtc *crtc = &intel_crtc->base;
3835 struct drm_device *dev = crtc->dev;
3836 struct drm_i915_private *dev_priv = to_i915(dev);
3837 int level, max_level = ilk_wm_max_level(dev);
3838 enum pipe pipe = intel_crtc->pipe;
3839
3840 for (level = 0; level <= max_level; level++) {
3841 I915_WRITE(PLANE_WM(pipe, plane, level),
3842 wm->plane[pipe][plane][level]);
3843 }
3844 I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
27082493
L
3845
3846 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
3847 &wm->ddb.plane[pipe][plane]);
3848 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
3849 &wm->ddb.y_plane[pipe][plane]);
62e0fb88
L
3850}
3851
3852void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
3853 const struct skl_wm_values *wm)
3854{
3855 struct drm_crtc *crtc = &intel_crtc->base;
3856 struct drm_device *dev = crtc->dev;
3857 struct drm_i915_private *dev_priv = to_i915(dev);
3858 int level, max_level = ilk_wm_max_level(dev);
3859 enum pipe pipe = intel_crtc->pipe;
3860
3861 for (level = 0; level <= max_level; level++) {
3862 I915_WRITE(CUR_WM(pipe, level),
3863 wm->plane[pipe][PLANE_CURSOR][level]);
3864 }
3865 I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
5d374d96 3866
27082493
L
3867 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3868 &wm->ddb.plane[pipe][PLANE_CURSOR]);
2d41c0b5
PB
3869}
3870
27082493
L
3871bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
3872 const struct skl_ddb_allocation *new,
3873 enum pipe pipe)
0e8fb7ba 3874{
27082493
L
3875 return new->pipe[pipe].start == old->pipe[pipe].start &&
3876 new->pipe[pipe].end == old->pipe[pipe].end;
0e8fb7ba
DL
3877}
3878
27082493
L
3879static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
3880 const struct skl_ddb_entry *b)
0e8fb7ba 3881{
27082493 3882 return a->start < b->end && b->start < a->end;
0e8fb7ba
DL
3883}
3884
27082493
L
3885bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
3886 const struct skl_ddb_allocation *old,
3887 const struct skl_ddb_allocation *new,
3888 enum pipe pipe)
0e8fb7ba 3889{
27082493
L
3890 struct drm_device *dev = state->dev;
3891 struct intel_crtc *intel_crtc;
3892 enum pipe otherp;
0e8fb7ba 3893
27082493
L
3894 for_each_intel_crtc(dev, intel_crtc) {
3895 otherp = intel_crtc->pipe;
0e8fb7ba 3896
27082493 3897 if (otherp == pipe)
0e8fb7ba
DL
3898 continue;
3899
27082493
L
3900 if (skl_ddb_entries_overlap(&new->pipe[pipe],
3901 &old->pipe[otherp]))
3902 return true;
0e8fb7ba
DL
3903 }
3904
27082493 3905 return false;
0e8fb7ba
DL
3906}
3907
55994c2c
MR
3908static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3909 struct skl_ddb_allocation *ddb, /* out */
3910 struct skl_pipe_wm *pipe_wm, /* out */
3911 bool *changed /* out */)
2d41c0b5 3912{
f4a96752
MR
3913 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3914 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
55994c2c 3915 int ret;
2d41c0b5 3916
55994c2c
MR
3917 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3918 if (ret)
3919 return ret;
2d41c0b5 3920
4e0963c7 3921 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
55994c2c
MR
3922 *changed = false;
3923 else
3924 *changed = true;
2d41c0b5 3925
55994c2c 3926 return 0;
2d41c0b5
PB
3927}
3928
9b613022
MR
3929static uint32_t
3930pipes_modified(struct drm_atomic_state *state)
3931{
3932 struct drm_crtc *crtc;
3933 struct drm_crtc_state *cstate;
3934 uint32_t i, ret = 0;
3935
3936 for_each_crtc_in_state(state, crtc, cstate, i)
3937 ret |= drm_crtc_mask(crtc);
3938
3939 return ret;
3940}
3941
98d39494
MR
3942static int
3943skl_compute_ddb(struct drm_atomic_state *state)
3944{
3945 struct drm_device *dev = state->dev;
3946 struct drm_i915_private *dev_priv = to_i915(dev);
3947 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3948 struct intel_crtc *intel_crtc;
734fa01f 3949 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
9b613022 3950 uint32_t realloc_pipes = pipes_modified(state);
98d39494
MR
3951 int ret;
3952
3953 /*
3954 * If this is our first atomic update following hardware readout,
3955 * we can't trust the DDB that the BIOS programmed for us. Let's
3956 * pretend that all pipes switched active status so that we'll
3957 * ensure a full DDB recompute.
3958 */
1b54a880
MR
3959 if (dev_priv->wm.distrust_bios_wm) {
3960 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
3961 state->acquire_ctx);
3962 if (ret)
3963 return ret;
3964
98d39494
MR
3965 intel_state->active_pipe_changes = ~0;
3966
1b54a880
MR
3967 /*
3968 * We usually only initialize intel_state->active_crtcs if we
3969 * we're doing a modeset; make sure this field is always
3970 * initialized during the sanitization process that happens
3971 * on the first commit too.
3972 */
3973 if (!intel_state->modeset)
3974 intel_state->active_crtcs = dev_priv->active_crtcs;
3975 }
3976
98d39494
MR
3977 /*
3978 * If the modeset changes which CRTC's are active, we need to
3979 * recompute the DDB allocation for *all* active pipes, even
3980 * those that weren't otherwise being modified in any way by this
3981 * atomic commit. Due to the shrinking of the per-pipe allocations
3982 * when new active CRTC's are added, it's possible for a pipe that
3983 * we were already using and aren't changing at all here to suddenly
3984 * become invalid if its DDB needs exceeds its new allocation.
3985 *
3986 * Note that if we wind up doing a full DDB recompute, we can't let
3987 * any other display updates race with this transaction, so we need
3988 * to grab the lock on *all* CRTC's.
3989 */
734fa01f 3990 if (intel_state->active_pipe_changes) {
98d39494 3991 realloc_pipes = ~0;
734fa01f
MR
3992 intel_state->wm_results.dirty_pipes = ~0;
3993 }
98d39494
MR
3994
3995 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
3996 struct intel_crtc_state *cstate;
3997
3998 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
3999 if (IS_ERR(cstate))
4000 return PTR_ERR(cstate);
4001
734fa01f 4002 ret = skl_allocate_pipe_ddb(cstate, ddb);
98d39494
MR
4003 if (ret)
4004 return ret;
05a76d3d
L
4005
4006 ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
4007 if (ret)
4008 return ret;
98d39494
MR
4009 }
4010
4011 return 0;
4012}
4013
2722efb9
MR
4014static void
4015skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4016 struct skl_wm_values *src,
4017 enum pipe pipe)
4018{
4019 dst->wm_linetime[pipe] = src->wm_linetime[pipe];
4020 memcpy(dst->plane[pipe], src->plane[pipe],
4021 sizeof(dst->plane[pipe]));
4022 memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
4023 sizeof(dst->plane_trans[pipe]));
4024
4025 dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
4026 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4027 sizeof(dst->ddb.y_plane[pipe]));
4028 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4029 sizeof(dst->ddb.plane[pipe]));
4030}
4031
98d39494
MR
4032static int
4033skl_compute_wm(struct drm_atomic_state *state)
4034{
4035 struct drm_crtc *crtc;
4036 struct drm_crtc_state *cstate;
734fa01f
MR
4037 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4038 struct skl_wm_values *results = &intel_state->wm_results;
4039 struct skl_pipe_wm *pipe_wm;
98d39494 4040 bool changed = false;
734fa01f 4041 int ret, i;
98d39494
MR
4042
4043 /*
4044 * If this transaction isn't actually touching any CRTC's, don't
4045 * bother with watermark calculation. Note that if we pass this
4046 * test, we're guaranteed to hold at least one CRTC state mutex,
4047 * which means we can safely use values like dev_priv->active_crtcs
4048 * since any racing commits that want to update them would need to
4049 * hold _all_ CRTC state mutexes.
4050 */
4051 for_each_crtc_in_state(state, crtc, cstate, i)
4052 changed = true;
4053 if (!changed)
4054 return 0;
4055
734fa01f
MR
4056 /* Clear all dirty flags */
4057 results->dirty_pipes = 0;
4058
98d39494
MR
4059 ret = skl_compute_ddb(state);
4060 if (ret)
4061 return ret;
4062
734fa01f
MR
4063 /*
4064 * Calculate WM's for all pipes that are part of this transaction.
4065 * Note that the DDB allocation above may have added more CRTC's that
4066 * weren't otherwise being modified (and set bits in dirty_pipes) if
4067 * pipe allocations had to change.
4068 *
4069 * FIXME: Now that we're doing this in the atomic check phase, we
4070 * should allow skl_update_pipe_wm() to return failure in cases where
4071 * no suitable watermark values can be found.
4072 */
4073 for_each_crtc_in_state(state, crtc, cstate, i) {
4074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4075 struct intel_crtc_state *intel_cstate =
4076 to_intel_crtc_state(cstate);
4077
4078 pipe_wm = &intel_cstate->wm.skl.optimal;
4079 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
4080 &changed);
4081 if (ret)
4082 return ret;
4083
4084 if (changed)
4085 results->dirty_pipes |= drm_crtc_mask(crtc);
4086
4087 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4088 /* This pipe's WM's did not change */
4089 continue;
4090
4091 intel_cstate->update_wm_pre = true;
4092 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
4093 }
4094
98d39494
MR
4095 return 0;
4096}
4097
2d41c0b5
PB
4098static void skl_update_wm(struct drm_crtc *crtc)
4099{
4100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4101 struct drm_device *dev = crtc->dev;
fac5e23e 4102 struct drm_i915_private *dev_priv = to_i915(dev);
2d41c0b5 4103 struct skl_wm_values *results = &dev_priv->wm.skl_results;
2722efb9 4104 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4e0963c7 4105 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
e8f1f02e 4106 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
27082493 4107 enum pipe pipe = intel_crtc->pipe;
adda50b8 4108
734fa01f 4109 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
2d41c0b5
PB
4110 return;
4111
734fa01f
MR
4112 intel_crtc->wm.active.skl = *pipe_wm;
4113
4114 mutex_lock(&dev_priv->wm.wm_mutex);
2d41c0b5 4115
2722efb9 4116 /*
27082493
L
4117 * If this pipe isn't active already, we're going to be enabling it
4118 * very soon. Since it's safe to update a pipe's ddb allocation while
4119 * the pipe's shut off, just do so here. Already active pipes will have
4120 * their watermarks updated once we update their planes.
2722efb9 4121 */
27082493
L
4122 if (crtc->state->active_changed) {
4123 int plane;
4124
4125 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
4126 skl_write_plane_wm(intel_crtc, results, plane);
4127
4128 skl_write_cursor_wm(intel_crtc, results);
4129 }
4130
4131 skl_copy_wm_for_pipe(hw_vals, results, pipe);
734fa01f
MR
4132
4133 mutex_unlock(&dev_priv->wm.wm_mutex);
2d41c0b5
PB
4134}
4135
d890565c
VS
4136static void ilk_compute_wm_config(struct drm_device *dev,
4137 struct intel_wm_config *config)
4138{
4139 struct intel_crtc *crtc;
4140
4141 /* Compute the currently _active_ config */
4142 for_each_intel_crtc(dev, crtc) {
4143 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4144
4145 if (!wm->pipe_enabled)
4146 continue;
4147
4148 config->sprites_enabled |= wm->sprites_enabled;
4149 config->sprites_scaled |= wm->sprites_scaled;
4150 config->num_pipes_active++;
4151 }
4152}
4153
ed4a6a7c 4154static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
801bcfff 4155{
91c8a326 4156 struct drm_device *dev = &dev_priv->drm;
b9d5c839 4157 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
820c1980 4158 struct ilk_wm_maximums max;
d890565c 4159 struct intel_wm_config config = {};
820c1980 4160 struct ilk_wm_values results = {};
77c122bc 4161 enum intel_ddb_partitioning partitioning;
261a27d1 4162
d890565c
VS
4163 ilk_compute_wm_config(dev, &config);
4164
4165 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4166 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
a485bfb8
VS
4167
4168 /* 5/6 split only in single pipe config on IVB+ */
ec98c8d1 4169 if (INTEL_INFO(dev)->gen >= 7 &&
d890565c
VS
4170 config.num_pipes_active == 1 && config.sprites_enabled) {
4171 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4172 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
0362c781 4173
820c1980 4174 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
861f3389 4175 } else {
198a1e9b 4176 best_lp_wm = &lp_wm_1_2;
861f3389
PZ
4177 }
4178
198a1e9b 4179 partitioning = (best_lp_wm == &lp_wm_1_2) ?
77c122bc 4180 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
801bcfff 4181
820c1980 4182 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
609cedef 4183
820c1980 4184 ilk_write_wm_values(dev_priv, &results);
1011d8c4
PZ
4185}
4186
ed4a6a7c 4187static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
b9d5c839 4188{
ed4a6a7c
MR
4189 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4190 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
b9d5c839 4191
ed4a6a7c 4192 mutex_lock(&dev_priv->wm.wm_mutex);
e8f1f02e 4193 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
ed4a6a7c
MR
4194 ilk_program_watermarks(dev_priv);
4195 mutex_unlock(&dev_priv->wm.wm_mutex);
4196}
bf220452 4197
ed4a6a7c
MR
4198static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
4199{
4200 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4201 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
bf220452 4202
ed4a6a7c
MR
4203 mutex_lock(&dev_priv->wm.wm_mutex);
4204 if (cstate->wm.need_postvbl_update) {
e8f1f02e 4205 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
ed4a6a7c
MR
4206 ilk_program_watermarks(dev_priv);
4207 }
4208 mutex_unlock(&dev_priv->wm.wm_mutex);
b9d5c839
VS
4209}
4210
3078999f
PB
4211static void skl_pipe_wm_active_state(uint32_t val,
4212 struct skl_pipe_wm *active,
4213 bool is_transwm,
4214 bool is_cursor,
4215 int i,
4216 int level)
4217{
4218 bool is_enabled = (val & PLANE_WM_EN) != 0;
4219
4220 if (!is_transwm) {
4221 if (!is_cursor) {
4222 active->wm[level].plane_en[i] = is_enabled;
4223 active->wm[level].plane_res_b[i] =
4224 val & PLANE_WM_BLOCKS_MASK;
4225 active->wm[level].plane_res_l[i] =
4226 (val >> PLANE_WM_LINES_SHIFT) &
4227 PLANE_WM_LINES_MASK;
4228 } else {
4969d33e
MR
4229 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
4230 active->wm[level].plane_res_b[PLANE_CURSOR] =
3078999f 4231 val & PLANE_WM_BLOCKS_MASK;
4969d33e 4232 active->wm[level].plane_res_l[PLANE_CURSOR] =
3078999f
PB
4233 (val >> PLANE_WM_LINES_SHIFT) &
4234 PLANE_WM_LINES_MASK;
4235 }
4236 } else {
4237 if (!is_cursor) {
4238 active->trans_wm.plane_en[i] = is_enabled;
4239 active->trans_wm.plane_res_b[i] =
4240 val & PLANE_WM_BLOCKS_MASK;
4241 active->trans_wm.plane_res_l[i] =
4242 (val >> PLANE_WM_LINES_SHIFT) &
4243 PLANE_WM_LINES_MASK;
4244 } else {
4969d33e
MR
4245 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
4246 active->trans_wm.plane_res_b[PLANE_CURSOR] =
3078999f 4247 val & PLANE_WM_BLOCKS_MASK;
4969d33e 4248 active->trans_wm.plane_res_l[PLANE_CURSOR] =
3078999f
PB
4249 (val >> PLANE_WM_LINES_SHIFT) &
4250 PLANE_WM_LINES_MASK;
4251 }
4252 }
4253}
4254
4255static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4256{
4257 struct drm_device *dev = crtc->dev;
fac5e23e 4258 struct drm_i915_private *dev_priv = to_i915(dev);
3078999f
PB
4259 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4e0963c7 4261 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
e8f1f02e 4262 struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
3078999f
PB
4263 enum pipe pipe = intel_crtc->pipe;
4264 int level, i, max_level;
4265 uint32_t temp;
4266
4267 max_level = ilk_wm_max_level(dev);
4268
4269 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4270
4271 for (level = 0; level <= max_level; level++) {
4272 for (i = 0; i < intel_num_planes(intel_crtc); i++)
4273 hw->plane[pipe][i][level] =
4274 I915_READ(PLANE_WM(pipe, i, level));
4969d33e 4275 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
3078999f
PB
4276 }
4277
4278 for (i = 0; i < intel_num_planes(intel_crtc); i++)
4279 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
4969d33e 4280 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
3078999f 4281
3ef00284 4282 if (!intel_crtc->active)
3078999f
PB
4283 return;
4284
2b4b9f35 4285 hw->dirty_pipes |= drm_crtc_mask(crtc);
3078999f
PB
4286
4287 active->linetime = hw->wm_linetime[pipe];
4288
4289 for (level = 0; level <= max_level; level++) {
4290 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4291 temp = hw->plane[pipe][i][level];
4292 skl_pipe_wm_active_state(temp, active, false,
4293 false, i, level);
4294 }
4969d33e 4295 temp = hw->plane[pipe][PLANE_CURSOR][level];
3078999f
PB
4296 skl_pipe_wm_active_state(temp, active, false, true, i, level);
4297 }
4298
4299 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4300 temp = hw->plane_trans[pipe][i];
4301 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
4302 }
4303
4969d33e 4304 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3078999f 4305 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
4e0963c7
MR
4306
4307 intel_crtc->wm.active.skl = *active;
3078999f
PB
4308}
4309
4310void skl_wm_get_hw_state(struct drm_device *dev)
4311{
fac5e23e 4312 struct drm_i915_private *dev_priv = to_i915(dev);
a269c583 4313 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3078999f
PB
4314 struct drm_crtc *crtc;
4315
a269c583 4316 skl_ddb_get_hw_state(dev_priv, ddb);
3078999f
PB
4317 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
4318 skl_pipe_wm_get_hw_state(crtc);
a1de91e5 4319
279e99d7
MR
4320 if (dev_priv->active_crtcs) {
4321 /* Fully recompute DDB on first atomic commit */
4322 dev_priv->wm.distrust_bios_wm = true;
4323 } else {
4324 /* Easy/common case; just sanitize DDB now if everything off */
4325 memset(ddb, 0, sizeof(*ddb));
4326 }
3078999f
PB
4327}
4328
243e6a44
VS
4329static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4330{
4331 struct drm_device *dev = crtc->dev;
fac5e23e 4332 struct drm_i915_private *dev_priv = to_i915(dev);
820c1980 4333 struct ilk_wm_values *hw = &dev_priv->wm.hw;
243e6a44 4334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4e0963c7 4335 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
e8f1f02e 4336 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
243e6a44 4337 enum pipe pipe = intel_crtc->pipe;
f0f59a00 4338 static const i915_reg_t wm0_pipe_reg[] = {
243e6a44
VS
4339 [PIPE_A] = WM0_PIPEA_ILK,
4340 [PIPE_B] = WM0_PIPEB_ILK,
4341 [PIPE_C] = WM0_PIPEC_IVB,
4342 };
4343
4344 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
a42a5719 4345 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ce0e0713 4346 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
243e6a44 4347
15606534
VS
4348 memset(active, 0, sizeof(*active));
4349
3ef00284 4350 active->pipe_enabled = intel_crtc->active;
2a44b76b
VS
4351
4352 if (active->pipe_enabled) {
243e6a44
VS
4353 u32 tmp = hw->wm_pipe[pipe];
4354
4355 /*
4356 * For active pipes LP0 watermark is marked as
4357 * enabled, and LP1+ watermaks as disabled since
4358 * we can't really reverse compute them in case
4359 * multiple pipes are active.
4360 */
4361 active->wm[0].enable = true;
4362 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4363 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4364 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4365 active->linetime = hw->wm_linetime[pipe];
4366 } else {
4367 int level, max_level = ilk_wm_max_level(dev);
4368
4369 /*
4370 * For inactive pipes, all watermark levels
4371 * should be marked as enabled but zeroed,
4372 * which is what we'd compute them to.
4373 */
4374 for (level = 0; level <= max_level; level++)
4375 active->wm[level].enable = true;
4376 }
4e0963c7
MR
4377
4378 intel_crtc->wm.active.ilk = *active;
243e6a44
VS
4379}
4380
6eb1a681
VS
4381#define _FW_WM(value, plane) \
4382 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4383#define _FW_WM_VLV(value, plane) \
4384 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4385
4386static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4387 struct vlv_wm_values *wm)
4388{
4389 enum pipe pipe;
4390 uint32_t tmp;
4391
4392 for_each_pipe(dev_priv, pipe) {
4393 tmp = I915_READ(VLV_DDL(pipe));
4394
4395 wm->ddl[pipe].primary =
4396 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4397 wm->ddl[pipe].cursor =
4398 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4399 wm->ddl[pipe].sprite[0] =
4400 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4401 wm->ddl[pipe].sprite[1] =
4402 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4403 }
4404
4405 tmp = I915_READ(DSPFW1);
4406 wm->sr.plane = _FW_WM(tmp, SR);
4407 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
4408 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
4409 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
4410
4411 tmp = I915_READ(DSPFW2);
4412 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
4413 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
4414 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
4415
4416 tmp = I915_READ(DSPFW3);
4417 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4418
4419 if (IS_CHERRYVIEW(dev_priv)) {
4420 tmp = I915_READ(DSPFW7_CHV);
4421 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4422 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4423
4424 tmp = I915_READ(DSPFW8_CHV);
4425 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4426 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4427
4428 tmp = I915_READ(DSPFW9_CHV);
4429 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4430 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4431
4432 tmp = I915_READ(DSPHOWM);
4433 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4434 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4435 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4436 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4437 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4438 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4439 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4440 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4441 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4442 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4443 } else {
4444 tmp = I915_READ(DSPFW7);
4445 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4446 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4447
4448 tmp = I915_READ(DSPHOWM);
4449 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4450 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4451 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4452 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4453 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4454 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4455 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4456 }
4457}
4458
4459#undef _FW_WM
4460#undef _FW_WM_VLV
4461
4462void vlv_wm_get_hw_state(struct drm_device *dev)
4463{
4464 struct drm_i915_private *dev_priv = to_i915(dev);
4465 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4466 struct intel_plane *plane;
4467 enum pipe pipe;
4468 u32 val;
4469
4470 vlv_read_wm_values(dev_priv, wm);
4471
4472 for_each_intel_plane(dev, plane) {
4473 switch (plane->base.type) {
4474 int sprite;
4475 case DRM_PLANE_TYPE_CURSOR:
4476 plane->wm.fifo_size = 63;
4477 break;
4478 case DRM_PLANE_TYPE_PRIMARY:
4479 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4480 break;
4481 case DRM_PLANE_TYPE_OVERLAY:
4482 sprite = plane->plane;
4483 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4484 break;
4485 }
4486 }
4487
4488 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4489 wm->level = VLV_WM_LEVEL_PM2;
4490
4491 if (IS_CHERRYVIEW(dev_priv)) {
4492 mutex_lock(&dev_priv->rps.hw_lock);
4493
4494 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4495 if (val & DSP_MAXFIFO_PM5_ENABLE)
4496 wm->level = VLV_WM_LEVEL_PM5;
4497
58590c14
VS
4498 /*
4499 * If DDR DVFS is disabled in the BIOS, Punit
4500 * will never ack the request. So if that happens
4501 * assume we don't have to enable/disable DDR DVFS
4502 * dynamically. To test that just set the REQ_ACK
4503 * bit to poke the Punit, but don't change the
4504 * HIGH/LOW bits so that we don't actually change
4505 * the current state.
4506 */
6eb1a681 4507 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
58590c14
VS
4508 val |= FORCE_DDR_FREQ_REQ_ACK;
4509 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4510
4511 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4512 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4513 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4514 "assuming DDR DVFS is disabled\n");
4515 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4516 } else {
4517 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4518 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4519 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4520 }
6eb1a681
VS
4521
4522 mutex_unlock(&dev_priv->rps.hw_lock);
4523 }
4524
4525 for_each_pipe(dev_priv, pipe)
4526 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4527 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4528 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4529
4530 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4531 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4532}
4533
243e6a44
VS
4534void ilk_wm_get_hw_state(struct drm_device *dev)
4535{
fac5e23e 4536 struct drm_i915_private *dev_priv = to_i915(dev);
820c1980 4537 struct ilk_wm_values *hw = &dev_priv->wm.hw;
243e6a44
VS
4538 struct drm_crtc *crtc;
4539
70e1e0ec 4540 for_each_crtc(dev, crtc)
243e6a44
VS
4541 ilk_pipe_wm_get_hw_state(crtc);
4542
4543 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4544 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4545 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4546
4547 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
cfa7698b
VS
4548 if (INTEL_INFO(dev)->gen >= 7) {
4549 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4550 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4551 }
243e6a44 4552
a42a5719 4553 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ac9545fd
VS
4554 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4555 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4556 else if (IS_IVYBRIDGE(dev))
4557 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4558 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
243e6a44
VS
4559
4560 hw->enable_fbc_wm =
4561 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4562}
4563
b445e3b0
ED
4564/**
4565 * intel_update_watermarks - update FIFO watermark values based on current modes
4566 *
4567 * Calculate watermark values for the various WM regs based on current mode
4568 * and plane configuration.
4569 *
4570 * There are several cases to deal with here:
4571 * - normal (i.e. non-self-refresh)
4572 * - self-refresh (SR) mode
4573 * - lines are large relative to FIFO size (buffer can hold up to 2)
4574 * - lines are small relative to FIFO size (buffer can hold more than 2
4575 * lines), so need to account for TLB latency
4576 *
4577 * The normal calculation is:
4578 * watermark = dotclock * bytes per pixel * latency
4579 * where latency is platform & configuration dependent (we assume pessimal
4580 * values here).
4581 *
4582 * The SR calculation is:
4583 * watermark = (trunc(latency/line time)+1) * surface width *
4584 * bytes per pixel
4585 * where
4586 * line time = htotal / dotclock
4587 * surface width = hdisplay for normal plane and 64 for cursor
4588 * and latency is assumed to be high, as above.
4589 *
4590 * The final value programmed to the register should always be rounded up,
4591 * and include an extra 2 entries to account for clock crossings.
4592 *
4593 * We don't use the sprite, so we can ignore that. And on Crestline we have
4594 * to set the non-SR watermarks to 8.
4595 */
46ba614c 4596void intel_update_watermarks(struct drm_crtc *crtc)
b445e3b0 4597{
fac5e23e 4598 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
b445e3b0
ED
4599
4600 if (dev_priv->display.update_wm)
46ba614c 4601 dev_priv->display.update_wm(crtc);
b445e3b0
ED
4602}
4603
e2828914 4604/*
9270388e 4605 * Lock protecting IPS related data structures
9270388e
DV
4606 */
4607DEFINE_SPINLOCK(mchdev_lock);
4608
4609/* Global for IPS driver to get at the current i915 device. Protected by
4610 * mchdev_lock. */
4611static struct drm_i915_private *i915_mch_dev;
4612
91d14251 4613bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
2b4e57bd 4614{
2b4e57bd
ED
4615 u16 rgvswctl;
4616
9270388e
DV
4617 assert_spin_locked(&mchdev_lock);
4618
2b4e57bd
ED
4619 rgvswctl = I915_READ16(MEMSWCTL);
4620 if (rgvswctl & MEMCTL_CMD_STS) {
4621 DRM_DEBUG("gpu busy, RCS change rejected\n");
4622 return false; /* still busy with another command */
4623 }
4624
4625 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4626 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4627 I915_WRITE16(MEMSWCTL, rgvswctl);
4628 POSTING_READ16(MEMSWCTL);
4629
4630 rgvswctl |= MEMCTL_CMD_STS;
4631 I915_WRITE16(MEMSWCTL, rgvswctl);
4632
4633 return true;
4634}
4635
91d14251 4636static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
2b4e57bd 4637{
84f1b20f 4638 u32 rgvmodectl;
2b4e57bd
ED
4639 u8 fmax, fmin, fstart, vstart;
4640
9270388e
DV
4641 spin_lock_irq(&mchdev_lock);
4642
84f1b20f
TU
4643 rgvmodectl = I915_READ(MEMMODECTL);
4644
2b4e57bd
ED
4645 /* Enable temp reporting */
4646 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4647 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4648
4649 /* 100ms RC evaluation intervals */
4650 I915_WRITE(RCUPEI, 100000);
4651 I915_WRITE(RCDNEI, 100000);
4652
4653 /* Set max/min thresholds to 90ms and 80ms respectively */
4654 I915_WRITE(RCBMAXAVG, 90000);
4655 I915_WRITE(RCBMINAVG, 80000);
4656
4657 I915_WRITE(MEMIHYST, 1);
4658
4659 /* Set up min, max, and cur for interrupt handling */
4660 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4661 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4662 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4663 MEMMODE_FSTART_SHIFT;
4664
616847e7 4665 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
2b4e57bd
ED
4666 PXVFREQ_PX_SHIFT;
4667
20e4d407
DV
4668 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4669 dev_priv->ips.fstart = fstart;
2b4e57bd 4670
20e4d407
DV
4671 dev_priv->ips.max_delay = fstart;
4672 dev_priv->ips.min_delay = fmin;
4673 dev_priv->ips.cur_delay = fstart;
2b4e57bd
ED
4674
4675 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4676 fmax, fmin, fstart);
4677
4678 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4679
4680 /*
4681 * Interrupts will be enabled in ironlake_irq_postinstall
4682 */
4683
4684 I915_WRITE(VIDSTART, vstart);
4685 POSTING_READ(VIDSTART);
4686
4687 rgvmodectl |= MEMMODE_SWMODE_EN;
4688 I915_WRITE(MEMMODECTL, rgvmodectl);
4689
9270388e 4690 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2b4e57bd 4691 DRM_ERROR("stuck trying to change perf mode\n");
dd92d8de 4692 mdelay(1);
2b4e57bd 4693
91d14251 4694 ironlake_set_drps(dev_priv, fstart);
2b4e57bd 4695
7d81c3e0
VS
4696 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4697 I915_READ(DDREC) + I915_READ(CSIEC);
20e4d407 4698 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
7d81c3e0 4699 dev_priv->ips.last_count2 = I915_READ(GFXEC);
5ed0bdf2 4700 dev_priv->ips.last_time2 = ktime_get_raw_ns();
9270388e
DV
4701
4702 spin_unlock_irq(&mchdev_lock);
2b4e57bd
ED
4703}
4704
91d14251 4705static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
2b4e57bd 4706{
9270388e
DV
4707 u16 rgvswctl;
4708
4709 spin_lock_irq(&mchdev_lock);
4710
4711 rgvswctl = I915_READ16(MEMSWCTL);
2b4e57bd
ED
4712
4713 /* Ack interrupts, disable EFC interrupt */
4714 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4715 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4716 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4717 I915_WRITE(DEIIR, DE_PCU_EVENT);
4718 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4719
4720 /* Go back to the starting frequency */
91d14251 4721 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
dd92d8de 4722 mdelay(1);
2b4e57bd
ED
4723 rgvswctl |= MEMCTL_CMD_STS;
4724 I915_WRITE(MEMSWCTL, rgvswctl);
dd92d8de 4725 mdelay(1);
2b4e57bd 4726
9270388e 4727 spin_unlock_irq(&mchdev_lock);
2b4e57bd
ED
4728}
4729
acbe9475
DV
4730/* There's a funny hw issue where the hw returns all 0 when reading from
4731 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4732 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4733 * all limits and the gpu stuck at whatever frequency it is at atm).
4734 */
74ef1173 4735static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
2b4e57bd 4736{
7b9e0ae6 4737 u32 limits;
2b4e57bd 4738
20b46e59
DV
4739 /* Only set the down limit when we've reached the lowest level to avoid
4740 * getting more interrupts, otherwise leave this clear. This prevents a
4741 * race in the hw when coming out of rc6: There's a tiny window where
4742 * the hw runs at the minimal clock before selecting the desired
4743 * frequency, if the down threshold expires in that window we will not
4744 * receive a down interrupt. */
2d1fe073 4745 if (IS_GEN9(dev_priv)) {
74ef1173
AG
4746 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4747 if (val <= dev_priv->rps.min_freq_softlimit)
4748 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4749 } else {
4750 limits = dev_priv->rps.max_freq_softlimit << 24;
4751 if (val <= dev_priv->rps.min_freq_softlimit)
4752 limits |= dev_priv->rps.min_freq_softlimit << 16;
4753 }
20b46e59
DV
4754
4755 return limits;
4756}
4757
dd75fdc8
CW
4758static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4759{
4760 int new_power;
8a586437
AG
4761 u32 threshold_up = 0, threshold_down = 0; /* in % */
4762 u32 ei_up = 0, ei_down = 0;
dd75fdc8
CW
4763
4764 new_power = dev_priv->rps.power;
4765 switch (dev_priv->rps.power) {
4766 case LOW_POWER:
a72b5623
CW
4767 if (val > dev_priv->rps.efficient_freq + 1 &&
4768 val > dev_priv->rps.cur_freq)
dd75fdc8
CW
4769 new_power = BETWEEN;
4770 break;
4771
4772 case BETWEEN:
a72b5623
CW
4773 if (val <= dev_priv->rps.efficient_freq &&
4774 val < dev_priv->rps.cur_freq)
dd75fdc8 4775 new_power = LOW_POWER;
a72b5623
CW
4776 else if (val >= dev_priv->rps.rp0_freq &&
4777 val > dev_priv->rps.cur_freq)
dd75fdc8
CW
4778 new_power = HIGH_POWER;
4779 break;
4780
4781 case HIGH_POWER:
a72b5623
CW
4782 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4783 val < dev_priv->rps.cur_freq)
dd75fdc8
CW
4784 new_power = BETWEEN;
4785 break;
4786 }
4787 /* Max/min bins are special */
aed242ff 4788 if (val <= dev_priv->rps.min_freq_softlimit)
dd75fdc8 4789 new_power = LOW_POWER;
aed242ff 4790 if (val >= dev_priv->rps.max_freq_softlimit)
dd75fdc8
CW
4791 new_power = HIGH_POWER;
4792 if (new_power == dev_priv->rps.power)
4793 return;
4794
4795 /* Note the units here are not exactly 1us, but 1280ns. */
4796 switch (new_power) {
4797 case LOW_POWER:
4798 /* Upclock if more than 95% busy over 16ms */
8a586437
AG
4799 ei_up = 16000;
4800 threshold_up = 95;
dd75fdc8
CW
4801
4802 /* Downclock if less than 85% busy over 32ms */
8a586437
AG
4803 ei_down = 32000;
4804 threshold_down = 85;
dd75fdc8
CW
4805 break;
4806
4807 case BETWEEN:
4808 /* Upclock if more than 90% busy over 13ms */
8a586437
AG
4809 ei_up = 13000;
4810 threshold_up = 90;
dd75fdc8
CW
4811
4812 /* Downclock if less than 75% busy over 32ms */
8a586437
AG
4813 ei_down = 32000;
4814 threshold_down = 75;
dd75fdc8
CW
4815 break;
4816
4817 case HIGH_POWER:
4818 /* Upclock if more than 85% busy over 10ms */
8a586437
AG
4819 ei_up = 10000;
4820 threshold_up = 85;
dd75fdc8
CW
4821
4822 /* Downclock if less than 60% busy over 32ms */
8a586437
AG
4823 ei_down = 32000;
4824 threshold_down = 60;
dd75fdc8
CW
4825 break;
4826 }
4827
8a586437 4828 I915_WRITE(GEN6_RP_UP_EI,
a72b5623 4829 GT_INTERVAL_FROM_US(dev_priv, ei_up));
8a586437 4830 I915_WRITE(GEN6_RP_UP_THRESHOLD,
a72b5623
CW
4831 GT_INTERVAL_FROM_US(dev_priv,
4832 ei_up * threshold_up / 100));
8a586437
AG
4833
4834 I915_WRITE(GEN6_RP_DOWN_EI,
a72b5623 4835 GT_INTERVAL_FROM_US(dev_priv, ei_down));
8a586437 4836 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
a72b5623
CW
4837 GT_INTERVAL_FROM_US(dev_priv,
4838 ei_down * threshold_down / 100));
4839
4840 I915_WRITE(GEN6_RP_CONTROL,
4841 GEN6_RP_MEDIA_TURBO |
4842 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4843 GEN6_RP_MEDIA_IS_GFX |
4844 GEN6_RP_ENABLE |
4845 GEN6_RP_UP_BUSY_AVG |
4846 GEN6_RP_DOWN_IDLE_AVG);
8a586437 4847
dd75fdc8 4848 dev_priv->rps.power = new_power;
8fb55197
CW
4849 dev_priv->rps.up_threshold = threshold_up;
4850 dev_priv->rps.down_threshold = threshold_down;
dd75fdc8
CW
4851 dev_priv->rps.last_adj = 0;
4852}
4853
2876ce73
CW
4854static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4855{
4856 u32 mask = 0;
4857
4858 if (val > dev_priv->rps.min_freq_softlimit)
6f4b12f8 4859 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
2876ce73 4860 if (val < dev_priv->rps.max_freq_softlimit)
6f4b12f8 4861 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
2876ce73 4862
7b3c29f6
CW
4863 mask &= dev_priv->pm_rps_events;
4864
59d02a1f 4865 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
2876ce73
CW
4866}
4867
b8a5ff8d
JM
4868/* gen6_set_rps is called to update the frequency request, but should also be
4869 * called when the range (min_delay and max_delay) is modified so that we can
4870 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
dc97997a 4871static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
20b46e59 4872{
23eafea6 4873 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
dc97997a 4874 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
23eafea6
SAK
4875 return;
4876
4fc688ce 4877 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
aed242ff
CW
4878 WARN_ON(val > dev_priv->rps.max_freq);
4879 WARN_ON(val < dev_priv->rps.min_freq);
004777cb 4880
eb64cad1
CW
4881 /* min/max delay may still have been modified so be sure to
4882 * write the limits value.
4883 */
4884 if (val != dev_priv->rps.cur_freq) {
4885 gen6_set_rps_thresholds(dev_priv, val);
b8a5ff8d 4886
dc97997a 4887 if (IS_GEN9(dev_priv))
5704195c
AG
4888 I915_WRITE(GEN6_RPNSWREQ,
4889 GEN9_FREQUENCY(val));
dc97997a 4890 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
eb64cad1
CW
4891 I915_WRITE(GEN6_RPNSWREQ,
4892 HSW_FREQUENCY(val));
4893 else
4894 I915_WRITE(GEN6_RPNSWREQ,
4895 GEN6_FREQUENCY(val) |
4896 GEN6_OFFSET(0) |
4897 GEN6_AGGRESSIVE_TURBO);
b8a5ff8d 4898 }
7b9e0ae6 4899
7b9e0ae6
CW
4900 /* Make sure we continue to get interrupts
4901 * until we hit the minimum or maximum frequencies.
4902 */
74ef1173 4903 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
2876ce73 4904 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
7b9e0ae6 4905
d5570a72
BW
4906 POSTING_READ(GEN6_RPNSWREQ);
4907
b39fb297 4908 dev_priv->rps.cur_freq = val;
0f94592e 4909 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
2b4e57bd
ED
4910}
4911
dc97997a 4912static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
ffe02b40 4913{
ffe02b40 4914 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
aed242ff
CW
4915 WARN_ON(val > dev_priv->rps.max_freq);
4916 WARN_ON(val < dev_priv->rps.min_freq);
ffe02b40 4917
dc97997a 4918 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
ffe02b40
VS
4919 "Odd GPU freq value\n"))
4920 val &= ~1;
4921
cd25dd5b
D
4922 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4923
8fb55197 4924 if (val != dev_priv->rps.cur_freq) {
ffe02b40 4925 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
8fb55197
CW
4926 if (!IS_CHERRYVIEW(dev_priv))
4927 gen6_set_rps_thresholds(dev_priv, val);
4928 }
ffe02b40 4929
ffe02b40
VS
4930 dev_priv->rps.cur_freq = val;
4931 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4932}
4933
a7f6e231 4934/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
76c3552f
D
4935 *
4936 * * If Gfx is Idle, then
a7f6e231
D
4937 * 1. Forcewake Media well.
4938 * 2. Request idle freq.
4939 * 3. Release Forcewake of Media well.
76c3552f
D
4940*/
4941static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4942{
aed242ff 4943 u32 val = dev_priv->rps.idle_freq;
5549d25f 4944
aed242ff 4945 if (dev_priv->rps.cur_freq <= val)
76c3552f
D
4946 return;
4947
a7f6e231
D
4948 /* Wake up the media well, as that takes a lot less
4949 * power than the Render well. */
4950 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
dc97997a 4951 valleyview_set_rps(dev_priv, val);
a7f6e231 4952 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
76c3552f
D
4953}
4954
43cf3bf0
CW
4955void gen6_rps_busy(struct drm_i915_private *dev_priv)
4956{
4957 mutex_lock(&dev_priv->rps.hw_lock);
4958 if (dev_priv->rps.enabled) {
4959 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4960 gen6_rps_reset_ei(dev_priv);
4961 I915_WRITE(GEN6_PMINTRMSK,
4962 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
2b83c4c4 4963
c33d247d
CW
4964 gen6_enable_rps_interrupts(dev_priv);
4965
2b83c4c4
MW
4966 /* Ensure we start at the user's desired frequency */
4967 intel_set_rps(dev_priv,
4968 clamp(dev_priv->rps.cur_freq,
4969 dev_priv->rps.min_freq_softlimit,
4970 dev_priv->rps.max_freq_softlimit));
43cf3bf0
CW
4971 }
4972 mutex_unlock(&dev_priv->rps.hw_lock);
4973}
4974
b29c19b6
CW
4975void gen6_rps_idle(struct drm_i915_private *dev_priv)
4976{
c33d247d
CW
4977 /* Flush our bottom-half so that it does not race with us
4978 * setting the idle frequency and so that it is bounded by
4979 * our rpm wakeref. And then disable the interrupts to stop any
4980 * futher RPS reclocking whilst we are asleep.
4981 */
4982 gen6_disable_rps_interrupts(dev_priv);
4983
b29c19b6 4984 mutex_lock(&dev_priv->rps.hw_lock);
c0951f0c 4985 if (dev_priv->rps.enabled) {
dc97997a 4986 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
76c3552f 4987 vlv_set_rps_idle(dev_priv);
7526ed79 4988 else
dc97997a 4989 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
c0951f0c 4990 dev_priv->rps.last_adj = 0;
12c100bf
VS
4991 I915_WRITE(GEN6_PMINTRMSK,
4992 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
c0951f0c 4993 }
8d3afd7d 4994 mutex_unlock(&dev_priv->rps.hw_lock);
1854d5ca 4995
8d3afd7d 4996 spin_lock(&dev_priv->rps.client_lock);
1854d5ca
CW
4997 while (!list_empty(&dev_priv->rps.clients))
4998 list_del_init(dev_priv->rps.clients.next);
8d3afd7d 4999 spin_unlock(&dev_priv->rps.client_lock);
b29c19b6
CW
5000}
5001
1854d5ca 5002void gen6_rps_boost(struct drm_i915_private *dev_priv,
e61b9958
CW
5003 struct intel_rps_client *rps,
5004 unsigned long submitted)
b29c19b6 5005{
8d3afd7d
CW
5006 /* This is intentionally racy! We peek at the state here, then
5007 * validate inside the RPS worker.
5008 */
67d97da3 5009 if (!(dev_priv->gt.awake &&
8d3afd7d 5010 dev_priv->rps.enabled &&
29ecd78d 5011 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
8d3afd7d 5012 return;
43cf3bf0 5013
e61b9958
CW
5014 /* Force a RPS boost (and don't count it against the client) if
5015 * the GPU is severely congested.
5016 */
d0bc54f2 5017 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
e61b9958
CW
5018 rps = NULL;
5019
8d3afd7d
CW
5020 spin_lock(&dev_priv->rps.client_lock);
5021 if (rps == NULL || list_empty(&rps->link)) {
5022 spin_lock_irq(&dev_priv->irq_lock);
5023 if (dev_priv->rps.interrupts_enabled) {
5024 dev_priv->rps.client_boost = true;
c33d247d 5025 schedule_work(&dev_priv->rps.work);
8d3afd7d
CW
5026 }
5027 spin_unlock_irq(&dev_priv->irq_lock);
1854d5ca 5028
2e1b8730
CW
5029 if (rps != NULL) {
5030 list_add(&rps->link, &dev_priv->rps.clients);
5031 rps->boosts++;
1854d5ca
CW
5032 } else
5033 dev_priv->rps.boosts++;
c0951f0c 5034 }
8d3afd7d 5035 spin_unlock(&dev_priv->rps.client_lock);
b29c19b6
CW
5036}
5037
dc97997a 5038void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
0a073b84 5039{
dc97997a
CW
5040 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5041 valleyview_set_rps(dev_priv, val);
ffe02b40 5042 else
dc97997a 5043 gen6_set_rps(dev_priv, val);
0a073b84
JB
5044}
5045
dc97997a 5046static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
20e49366 5047{
20e49366 5048 I915_WRITE(GEN6_RC_CONTROL, 0);
38c23527 5049 I915_WRITE(GEN9_PG_ENABLE, 0);
20e49366
ZW
5050}
5051
dc97997a 5052static void gen9_disable_rps(struct drm_i915_private *dev_priv)
2030d684 5053{
2030d684
AG
5054 I915_WRITE(GEN6_RP_CONTROL, 0);
5055}
5056
dc97997a 5057static void gen6_disable_rps(struct drm_i915_private *dev_priv)
d20d4f0c 5058{
d20d4f0c 5059 I915_WRITE(GEN6_RC_CONTROL, 0);
44fc7d5c 5060 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2030d684 5061 I915_WRITE(GEN6_RP_CONTROL, 0);
44fc7d5c
DV
5062}
5063
dc97997a 5064static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
38807746 5065{
38807746
D
5066 I915_WRITE(GEN6_RC_CONTROL, 0);
5067}
5068
dc97997a 5069static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
44fc7d5c 5070{
98a2e5f9
D
5071 /* we're doing forcewake before Disabling RC6,
5072 * This what the BIOS expects when going into suspend */
59bad947 5073 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
98a2e5f9 5074
44fc7d5c 5075 I915_WRITE(GEN6_RC_CONTROL, 0);
d20d4f0c 5076
59bad947 5077 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d20d4f0c
JB
5078}
5079
dc97997a 5080static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
dc39fff7 5081{
dc97997a 5082 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
91ca689a
ID
5083 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
5084 mode = GEN6_RC_CTL_RC6_ENABLE;
5085 else
5086 mode = 0;
5087 }
dc97997a 5088 if (HAS_RC6p(dev_priv))
b99d49cc
ID
5089 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5090 "RC6 %s RC6p %s RC6pp %s\n",
5091 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
5092 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
5093 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
58abf1da
RV
5094
5095 else
b99d49cc
ID
5096 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5097 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
dc39fff7
BW
5098}
5099
dc97997a 5100static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
274008e8 5101{
72e96d64 5102 struct i915_ggtt *ggtt = &dev_priv->ggtt;
274008e8
SAK
5103 bool enable_rc6 = true;
5104 unsigned long rc6_ctx_base;
fc619841
ID
5105 u32 rc_ctl;
5106 int rc_sw_target;
5107
5108 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5109 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5110 RC_SW_TARGET_STATE_SHIFT;
5111 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5112 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5113 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5114 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5115 rc_sw_target);
274008e8
SAK
5116
5117 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
b99d49cc 5118 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
274008e8
SAK
5119 enable_rc6 = false;
5120 }
5121
5122 /*
5123 * The exact context size is not known for BXT, so assume a page size
5124 * for this check.
5125 */
5126 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
72e96d64
JL
5127 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
5128 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
5129 ggtt->stolen_reserved_size))) {
b99d49cc 5130 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
274008e8
SAK
5131 enable_rc6 = false;
5132 }
5133
5134 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
5135 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
5136 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
5137 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
b99d49cc 5138 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
274008e8
SAK
5139 enable_rc6 = false;
5140 }
5141
fc619841
ID
5142 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5143 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5144 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5145 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5146 enable_rc6 = false;
5147 }
5148
5149 if (!I915_READ(GEN6_GFXPAUSE)) {
5150 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5151 enable_rc6 = false;
5152 }
5153
5154 if (!I915_READ(GEN8_MISC_CTRL0)) {
5155 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
274008e8
SAK
5156 enable_rc6 = false;
5157 }
5158
5159 return enable_rc6;
5160}
5161
dc97997a 5162int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
2b4e57bd 5163{
e7d66d89 5164 /* No RC6 before Ironlake and code is gone for ilk. */
dc97997a 5165 if (INTEL_INFO(dev_priv)->gen < 6)
e6069ca8
ID
5166 return 0;
5167
274008e8
SAK
5168 if (!enable_rc6)
5169 return 0;
5170
dc97997a 5171 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
274008e8
SAK
5172 DRM_INFO("RC6 disabled by BIOS\n");
5173 return 0;
5174 }
5175
456470eb 5176 /* Respect the kernel parameter if it is set */
e6069ca8
ID
5177 if (enable_rc6 >= 0) {
5178 int mask;
5179
dc97997a 5180 if (HAS_RC6p(dev_priv))
e6069ca8
ID
5181 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
5182 INTEL_RC6pp_ENABLE;
5183 else
5184 mask = INTEL_RC6_ENABLE;
5185
5186 if ((enable_rc6 & mask) != enable_rc6)
b99d49cc
ID
5187 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5188 "(requested %d, valid %d)\n",
5189 enable_rc6 & mask, enable_rc6, mask);
e6069ca8
ID
5190
5191 return enable_rc6 & mask;
5192 }
2b4e57bd 5193
dc97997a 5194 if (IS_IVYBRIDGE(dev_priv))
cca84a1f 5195 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8bade1ad
BW
5196
5197 return INTEL_RC6_ENABLE;
2b4e57bd
ED
5198}
5199
dc97997a 5200static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
3280e8b0
BW
5201{
5202 /* All of these values are in units of 50MHz */
773ea9a8 5203
93ee2920 5204 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
dc97997a 5205 if (IS_BROXTON(dev_priv)) {
773ea9a8 5206 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
35040562
BP
5207 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
5208 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5209 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
5210 } else {
773ea9a8 5211 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
35040562
BP
5212 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
5213 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5214 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
5215 }
3280e8b0 5216 /* hw_max = RP0 until we check for overclocking */
773ea9a8 5217 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3280e8b0 5218
93ee2920 5219 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
dc97997a
CW
5220 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
5221 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
773ea9a8
CW
5222 u32 ddcc_status = 0;
5223
5224 if (sandybridge_pcode_read(dev_priv,
5225 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
5226 &ddcc_status) == 0)
93ee2920 5227 dev_priv->rps.efficient_freq =
46efa4ab
TR
5228 clamp_t(u8,
5229 ((ddcc_status >> 8) & 0xff),
5230 dev_priv->rps.min_freq,
5231 dev_priv->rps.max_freq);
93ee2920
TR
5232 }
5233
dc97997a 5234 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
c5e0688c 5235 /* Store the frequency values in 16.66 MHZ units, which is
773ea9a8
CW
5236 * the natural hardware unit for SKL
5237 */
c5e0688c
AG
5238 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
5239 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
5240 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
5241 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
5242 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
5243 }
3280e8b0
BW
5244}
5245
3a45b05c
CW
5246static void reset_rps(struct drm_i915_private *dev_priv,
5247 void (*set)(struct drm_i915_private *, u8))
5248{
5249 u8 freq = dev_priv->rps.cur_freq;
5250
5251 /* force a reset */
5252 dev_priv->rps.power = -1;
5253 dev_priv->rps.cur_freq = -1;
5254
5255 set(dev_priv, freq);
5256}
5257
b6fef0ef 5258/* See the Gen9_GT_PM_Programming_Guide doc for the below */
dc97997a 5259static void gen9_enable_rps(struct drm_i915_private *dev_priv)
b6fef0ef 5260{
b6fef0ef
JB
5261 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5262
23eafea6 5263 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
dc97997a 5264 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
2030d684
AG
5265 /*
5266 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5267 * clear out the Control register just to avoid inconsitency
5268 * with debugfs interface, which will show Turbo as enabled
5269 * only and that is not expected by the User after adding the
5270 * WaGsvDisableTurbo. Apart from this there is no problem even
5271 * if the Turbo is left enabled in the Control register, as the
5272 * Up/Down interrupts would remain masked.
5273 */
dc97997a 5274 gen9_disable_rps(dev_priv);
23eafea6
SAK
5275 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5276 return;
5277 }
5278
0beb059a
AG
5279 /* Program defaults and thresholds for RPS*/
5280 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5281 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5282
5283 /* 1 second timeout*/
5284 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5285 GT_INTERVAL_FROM_US(dev_priv, 1000000));
5286
b6fef0ef 5287 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
b6fef0ef 5288
0beb059a
AG
5289 /* Leaning on the below call to gen6_set_rps to program/setup the
5290 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5291 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
3a45b05c 5292 reset_rps(dev_priv, gen6_set_rps);
b6fef0ef
JB
5293
5294 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5295}
5296
dc97997a 5297static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
20e49366 5298{
e2f80391 5299 struct intel_engine_cs *engine;
20e49366 5300 uint32_t rc6_mask = 0;
20e49366
ZW
5301
5302 /* 1a: Software RC state - RC0 */
5303 I915_WRITE(GEN6_RC_STATE, 0);
5304
5305 /* 1b: Get forcewake during program sequence. Although the driver
5306 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 5307 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
20e49366
ZW
5308
5309 /* 2a: Disable RC states. */
5310 I915_WRITE(GEN6_RC_CONTROL, 0);
5311
5312 /* 2b: Program RC6 thresholds.*/
63a4dec2
SAK
5313
5314 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
dc97997a 5315 if (IS_SKYLAKE(dev_priv))
63a4dec2
SAK
5316 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5317 else
5318 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
20e49366
ZW
5319 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5320 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
b4ac5afc 5321 for_each_engine(engine, dev_priv)
e2f80391 5322 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
97c322e7 5323
1a3d1898 5324 if (HAS_GUC(dev_priv))
97c322e7
SAK
5325 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5326
20e49366 5327 I915_WRITE(GEN6_RC_SLEEP, 0);
20e49366 5328
38c23527
ZW
5329 /* 2c: Program Coarse Power Gating Policies. */
5330 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5331 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5332
20e49366 5333 /* 3a: Enable RC6 */
dc97997a 5334 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
20e49366 5335 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
87ad3212 5336 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
3e7732a0 5337 /* WaRsUseTimeoutMode */
9fc736e8 5338 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
3e7732a0 5339 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
e3429cd2
SAK
5340 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5341 GEN7_RC_CTL_TO_MODE |
5342 rc6_mask);
3e7732a0
SAK
5343 } else {
5344 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
e3429cd2
SAK
5345 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5346 GEN6_RC_CTL_EI_MODE(1) |
5347 rc6_mask);
3e7732a0 5348 }
20e49366 5349
cb07bae0
SK
5350 /*
5351 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
f2d2fe95 5352 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
cb07bae0 5353 */
dc97997a 5354 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
f2d2fe95
SAK
5355 I915_WRITE(GEN9_PG_ENABLE, 0);
5356 else
5357 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5358 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
38c23527 5359
59bad947 5360 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
20e49366
ZW
5361}
5362
dc97997a 5363static void gen8_enable_rps(struct drm_i915_private *dev_priv)
6edee7f3 5364{
e2f80391 5365 struct intel_engine_cs *engine;
93ee2920 5366 uint32_t rc6_mask = 0;
6edee7f3
BW
5367
5368 /* 1a: Software RC state - RC0 */
5369 I915_WRITE(GEN6_RC_STATE, 0);
5370
5371 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5372 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 5373 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6edee7f3
BW
5374
5375 /* 2a: Disable RC states. */
5376 I915_WRITE(GEN6_RC_CONTROL, 0);
5377
6edee7f3
BW
5378 /* 2b: Program RC6 thresholds.*/
5379 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5380 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5381 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
b4ac5afc 5382 for_each_engine(engine, dev_priv)
e2f80391 5383 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6edee7f3 5384 I915_WRITE(GEN6_RC_SLEEP, 0);
dc97997a 5385 if (IS_BROADWELL(dev_priv))
0d68b25e
TR
5386 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5387 else
5388 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
6edee7f3
BW
5389
5390 /* 3: Enable RC6 */
dc97997a 5391 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6edee7f3 5392 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
dc97997a
CW
5393 intel_print_rc6_info(dev_priv, rc6_mask);
5394 if (IS_BROADWELL(dev_priv))
0d68b25e
TR
5395 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5396 GEN7_RC_CTL_TO_MODE |
5397 rc6_mask);
5398 else
5399 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5400 GEN6_RC_CTL_EI_MODE(1) |
5401 rc6_mask);
6edee7f3
BW
5402
5403 /* 4 Program defaults and thresholds for RPS*/
f9bdc585
BW
5404 I915_WRITE(GEN6_RPNSWREQ,
5405 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5406 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5407 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
7526ed79
DV
5408 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5409 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5410
5411 /* Docs recommend 900MHz, and 300 MHz respectively */
5412 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5413 dev_priv->rps.max_freq_softlimit << 24 |
5414 dev_priv->rps.min_freq_softlimit << 16);
5415
5416 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5417 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5418 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5419 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5420
5421 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6edee7f3
BW
5422
5423 /* 5: Enable RPS */
7526ed79
DV
5424 I915_WRITE(GEN6_RP_CONTROL,
5425 GEN6_RP_MEDIA_TURBO |
5426 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5427 GEN6_RP_MEDIA_IS_GFX |
5428 GEN6_RP_ENABLE |
5429 GEN6_RP_UP_BUSY_AVG |
5430 GEN6_RP_DOWN_IDLE_AVG);
5431
5432 /* 6: Ring frequency + overclocking (our driver does this later */
5433
3a45b05c 5434 reset_rps(dev_priv, gen6_set_rps);
7526ed79 5435
59bad947 5436 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6edee7f3
BW
5437}
5438
dc97997a 5439static void gen6_enable_rps(struct drm_i915_private *dev_priv)
2b4e57bd 5440{
e2f80391 5441 struct intel_engine_cs *engine;
99ac9612 5442 u32 rc6vids, rc6_mask = 0;
2b4e57bd 5443 u32 gtfifodbg;
2b4e57bd 5444 int rc6_mode;
b4ac5afc 5445 int ret;
2b4e57bd 5446
4fc688ce 5447 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79f5b2c7 5448
2b4e57bd
ED
5449 /* Here begins a magic sequence of register writes to enable
5450 * auto-downclocking.
5451 *
5452 * Perhaps there might be some value in exposing these to
5453 * userspace...
5454 */
5455 I915_WRITE(GEN6_RC_STATE, 0);
2b4e57bd
ED
5456
5457 /* Clear the DBG now so we don't confuse earlier errors */
297b32ec
VS
5458 gtfifodbg = I915_READ(GTFIFODBG);
5459 if (gtfifodbg) {
2b4e57bd
ED
5460 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5461 I915_WRITE(GTFIFODBG, gtfifodbg);
5462 }
5463
59bad947 5464 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2b4e57bd
ED
5465
5466 /* disable the counters and set deterministic thresholds */
5467 I915_WRITE(GEN6_RC_CONTROL, 0);
5468
5469 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5470 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5471 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5472 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5473 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5474
b4ac5afc 5475 for_each_engine(engine, dev_priv)
e2f80391 5476 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
2b4e57bd
ED
5477
5478 I915_WRITE(GEN6_RC_SLEEP, 0);
5479 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
dc97997a 5480 if (IS_IVYBRIDGE(dev_priv))
351aa566
SM
5481 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5482 else
5483 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
0920a487 5484 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
2b4e57bd
ED
5485 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5486
5a7dc92a 5487 /* Check if we are enabling RC6 */
dc97997a 5488 rc6_mode = intel_enable_rc6();
2b4e57bd
ED
5489 if (rc6_mode & INTEL_RC6_ENABLE)
5490 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5491
5a7dc92a 5492 /* We don't use those on Haswell */
dc97997a 5493 if (!IS_HASWELL(dev_priv)) {
5a7dc92a
ED
5494 if (rc6_mode & INTEL_RC6p_ENABLE)
5495 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2b4e57bd 5496
5a7dc92a
ED
5497 if (rc6_mode & INTEL_RC6pp_ENABLE)
5498 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5499 }
2b4e57bd 5500
dc97997a 5501 intel_print_rc6_info(dev_priv, rc6_mask);
2b4e57bd
ED
5502
5503 I915_WRITE(GEN6_RC_CONTROL,
5504 rc6_mask |
5505 GEN6_RC_CTL_EI_MODE(1) |
5506 GEN6_RC_CTL_HW_ENABLE);
5507
dd75fdc8
CW
5508 /* Power down if completely idle for over 50ms */
5509 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
2b4e57bd 5510 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2b4e57bd 5511
42c0526c 5512 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
d060c169 5513 if (ret)
42c0526c 5514 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
d060c169 5515
3a45b05c 5516 reset_rps(dev_priv, gen6_set_rps);
2b4e57bd 5517
31643d54
BW
5518 rc6vids = 0;
5519 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
dc97997a 5520 if (IS_GEN6(dev_priv) && ret) {
31643d54 5521 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
dc97997a 5522 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
31643d54
BW
5523 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5524 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5525 rc6vids &= 0xffff00;
5526 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5527 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5528 if (ret)
5529 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5530 }
5531
59bad947 5532 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2b4e57bd
ED
5533}
5534
fb7404e8 5535static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2b4e57bd
ED
5536{
5537 int min_freq = 15;
3ebecd07
CW
5538 unsigned int gpu_freq;
5539 unsigned int max_ia_freq, min_ring_freq;
4c8c7743 5540 unsigned int max_gpu_freq, min_gpu_freq;
2b4e57bd 5541 int scaling_factor = 180;
eda79642 5542 struct cpufreq_policy *policy;
2b4e57bd 5543
4fc688ce 5544 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79f5b2c7 5545
eda79642
BW
5546 policy = cpufreq_cpu_get(0);
5547 if (policy) {
5548 max_ia_freq = policy->cpuinfo.max_freq;
5549 cpufreq_cpu_put(policy);
5550 } else {
5551 /*
5552 * Default to measured freq if none found, PCU will ensure we
5553 * don't go over
5554 */
2b4e57bd 5555 max_ia_freq = tsc_khz;
eda79642 5556 }
2b4e57bd
ED
5557
5558 /* Convert from kHz to MHz */
5559 max_ia_freq /= 1000;
5560
153b4b95 5561 min_ring_freq = I915_READ(DCLK) & 0xf;
f6aca45c
BW
5562 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5563 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3ebecd07 5564
dc97997a 5565 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4c8c7743
AG
5566 /* Convert GT frequency to 50 HZ units */
5567 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5568 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5569 } else {
5570 min_gpu_freq = dev_priv->rps.min_freq;
5571 max_gpu_freq = dev_priv->rps.max_freq;
5572 }
5573
2b4e57bd
ED
5574 /*
5575 * For each potential GPU frequency, load a ring frequency we'd like
5576 * to use for memory access. We do this by specifying the IA frequency
5577 * the PCU should use as a reference to determine the ring frequency.
5578 */
4c8c7743
AG
5579 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5580 int diff = max_gpu_freq - gpu_freq;
3ebecd07
CW
5581 unsigned int ia_freq = 0, ring_freq = 0;
5582
dc97997a 5583 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4c8c7743
AG
5584 /*
5585 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5586 * No floor required for ring frequency on SKL.
5587 */
5588 ring_freq = gpu_freq;
dc97997a 5589 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
46c764d4
BW
5590 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5591 ring_freq = max(min_ring_freq, gpu_freq);
dc97997a 5592 } else if (IS_HASWELL(dev_priv)) {
f6aca45c 5593 ring_freq = mult_frac(gpu_freq, 5, 4);
3ebecd07
CW
5594 ring_freq = max(min_ring_freq, ring_freq);
5595 /* leave ia_freq as the default, chosen by cpufreq */
5596 } else {
5597 /* On older processors, there is no separate ring
5598 * clock domain, so in order to boost the bandwidth
5599 * of the ring, we need to upclock the CPU (ia_freq).
5600 *
5601 * For GPU frequencies less than 750MHz,
5602 * just use the lowest ring freq.
5603 */
5604 if (gpu_freq < min_freq)
5605 ia_freq = 800;
5606 else
5607 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5608 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5609 }
2b4e57bd 5610
42c0526c
BW
5611 sandybridge_pcode_write(dev_priv,
5612 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3ebecd07
CW
5613 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5614 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5615 gpu_freq);
2b4e57bd 5616 }
2b4e57bd
ED
5617}
5618
03af2045 5619static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
2b6b3a09
D
5620{
5621 u32 val, rp0;
5622
5b5929cb 5623 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
2b6b3a09 5624
43b67998 5625 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
5b5929cb
JN
5626 case 8:
5627 /* (2 * 4) config */
5628 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5629 break;
5630 case 12:
5631 /* (2 * 6) config */
5632 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5633 break;
5634 case 16:
5635 /* (2 * 8) config */
5636 default:
5637 /* Setting (2 * 8) Min RP0 for any other combination */
5638 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5639 break;
095acd5f 5640 }
5b5929cb
JN
5641
5642 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5643
2b6b3a09
D
5644 return rp0;
5645}
5646
5647static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5648{
5649 u32 val, rpe;
5650
5651 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5652 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5653
5654 return rpe;
5655}
5656
7707df4a
D
5657static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5658{
5659 u32 val, rp1;
5660
5b5929cb
JN
5661 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5662 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5663
7707df4a
D
5664 return rp1;
5665}
5666
f8f2b001
D
5667static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5668{
5669 u32 val, rp1;
5670
5671 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5672
5673 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5674
5675 return rp1;
5676}
5677
03af2045 5678static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
0a073b84
JB
5679{
5680 u32 val, rp0;
5681
64936258 5682 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
0a073b84
JB
5683
5684 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5685 /* Clamp to max */
5686 rp0 = min_t(u32, rp0, 0xea);
5687
5688 return rp0;
5689}
5690
5691static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5692{
5693 u32 val, rpe;
5694
64936258 5695 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
0a073b84 5696 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
64936258 5697 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
0a073b84
JB
5698 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5699
5700 return rpe;
5701}
5702
03af2045 5703static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
0a073b84 5704{
36146035
ID
5705 u32 val;
5706
5707 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5708 /*
5709 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5710 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5711 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5712 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5713 * to make sure it matches what Punit accepts.
5714 */
5715 return max_t(u32, val, 0xc0);
0a073b84
JB
5716}
5717
ae48434c
ID
5718/* Check that the pctx buffer wasn't move under us. */
5719static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5720{
5721 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5722
5723 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5724 dev_priv->vlv_pctx->stolen->start);
5725}
5726
38807746
D
5727
5728/* Check that the pcbr address is not empty. */
5729static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5730{
5731 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5732
5733 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5734}
5735
dc97997a 5736static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
38807746 5737{
62106b4f 5738 struct i915_ggtt *ggtt = &dev_priv->ggtt;
72e96d64 5739 unsigned long pctx_paddr, paddr;
38807746
D
5740 u32 pcbr;
5741 int pctx_size = 32*1024;
5742
38807746
D
5743 pcbr = I915_READ(VLV_PCBR);
5744 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
ce611ef8 5745 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
38807746 5746 paddr = (dev_priv->mm.stolen_base +
62106b4f 5747 (ggtt->stolen_size - pctx_size));
38807746
D
5748
5749 pctx_paddr = (paddr & (~4095));
5750 I915_WRITE(VLV_PCBR, pctx_paddr);
5751 }
ce611ef8
VS
5752
5753 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
38807746
D
5754}
5755
dc97997a 5756static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
c9cddffc 5757{
c9cddffc
JB
5758 struct drm_i915_gem_object *pctx;
5759 unsigned long pctx_paddr;
5760 u32 pcbr;
5761 int pctx_size = 24*1024;
5762
5763 pcbr = I915_READ(VLV_PCBR);
5764 if (pcbr) {
5765 /* BIOS set it up already, grab the pre-alloc'd space */
5766 int pcbr_offset;
5767
5768 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
91c8a326 5769 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
c9cddffc 5770 pcbr_offset,
190d6cd5 5771 I915_GTT_OFFSET_NONE,
c9cddffc
JB
5772 pctx_size);
5773 goto out;
5774 }
5775
ce611ef8
VS
5776 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5777
c9cddffc
JB
5778 /*
5779 * From the Gunit register HAS:
5780 * The Gfx driver is expected to program this register and ensure
5781 * proper allocation within Gfx stolen memory. For example, this
5782 * register should be programmed such than the PCBR range does not
5783 * overlap with other ranges, such as the frame buffer, protected
5784 * memory, or any other relevant ranges.
5785 */
91c8a326 5786 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
c9cddffc
JB
5787 if (!pctx) {
5788 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
ee504898 5789 goto out;
c9cddffc
JB
5790 }
5791
5792 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5793 I915_WRITE(VLV_PCBR, pctx_paddr);
5794
5795out:
ce611ef8 5796 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
c9cddffc
JB
5797 dev_priv->vlv_pctx = pctx;
5798}
5799
dc97997a 5800static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
ae48434c 5801{
ae48434c
ID
5802 if (WARN_ON(!dev_priv->vlv_pctx))
5803 return;
5804
34911fd3 5805 i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
ae48434c
ID
5806 dev_priv->vlv_pctx = NULL;
5807}
5808
c30fec65
VS
5809static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5810{
5811 dev_priv->rps.gpll_ref_freq =
5812 vlv_get_cck_clock(dev_priv, "GPLL ref",
5813 CCK_GPLL_CLOCK_CONTROL,
5814 dev_priv->czclk_freq);
5815
5816 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5817 dev_priv->rps.gpll_ref_freq);
5818}
5819
dc97997a 5820static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
4e80519e 5821{
2bb25c17 5822 u32 val;
4e80519e 5823
dc97997a 5824 valleyview_setup_pctx(dev_priv);
4e80519e 5825
c30fec65
VS
5826 vlv_init_gpll_ref_freq(dev_priv);
5827
2bb25c17
VS
5828 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5829 switch ((val >> 6) & 3) {
5830 case 0:
5831 case 1:
5832 dev_priv->mem_freq = 800;
5833 break;
5834 case 2:
5835 dev_priv->mem_freq = 1066;
5836 break;
5837 case 3:
5838 dev_priv->mem_freq = 1333;
5839 break;
5840 }
80b83b62 5841 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
2bb25c17 5842
4e80519e
ID
5843 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5844 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5845 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7c59a9c1 5846 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4e80519e
ID
5847 dev_priv->rps.max_freq);
5848
5849 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5850 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7c59a9c1 5851 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4e80519e
ID
5852 dev_priv->rps.efficient_freq);
5853
f8f2b001
D
5854 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5855 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7c59a9c1 5856 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
f8f2b001
D
5857 dev_priv->rps.rp1_freq);
5858
4e80519e
ID
5859 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5860 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7c59a9c1 5861 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4e80519e 5862 dev_priv->rps.min_freq);
4e80519e
ID
5863}
5864
dc97997a 5865static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
38807746 5866{
2bb25c17 5867 u32 val;
2b6b3a09 5868
dc97997a 5869 cherryview_setup_pctx(dev_priv);
2b6b3a09 5870
c30fec65
VS
5871 vlv_init_gpll_ref_freq(dev_priv);
5872
a580516d 5873 mutex_lock(&dev_priv->sb_lock);
c6e8f39d 5874 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
a580516d 5875 mutex_unlock(&dev_priv->sb_lock);
c6e8f39d 5876
2bb25c17 5877 switch ((val >> 2) & 0x7) {
2bb25c17 5878 case 3:
2bb25c17
VS
5879 dev_priv->mem_freq = 2000;
5880 break;
bfa7df01 5881 default:
2bb25c17
VS
5882 dev_priv->mem_freq = 1600;
5883 break;
5884 }
80b83b62 5885 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
2bb25c17 5886
2b6b3a09
D
5887 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5888 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5889 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7c59a9c1 5890 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
2b6b3a09
D
5891 dev_priv->rps.max_freq);
5892
5893 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5894 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7c59a9c1 5895 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2b6b3a09
D
5896 dev_priv->rps.efficient_freq);
5897
7707df4a
D
5898 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5899 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7c59a9c1 5900 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
7707df4a
D
5901 dev_priv->rps.rp1_freq);
5902
5b7c91b7
D
5903 /* PUnit validated range is only [RPe, RP0] */
5904 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
2b6b3a09 5905 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7c59a9c1 5906 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2b6b3a09
D
5907 dev_priv->rps.min_freq);
5908
1c14762d
VS
5909 WARN_ONCE((dev_priv->rps.max_freq |
5910 dev_priv->rps.efficient_freq |
5911 dev_priv->rps.rp1_freq |
5912 dev_priv->rps.min_freq) & 1,
5913 "Odd GPU freq values\n");
38807746
D
5914}
5915
dc97997a 5916static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
4e80519e 5917{
dc97997a 5918 valleyview_cleanup_pctx(dev_priv);
4e80519e
ID
5919}
5920
dc97997a 5921static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
38807746 5922{
e2f80391 5923 struct intel_engine_cs *engine;
2b6b3a09 5924 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
38807746
D
5925
5926 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5927
297b32ec
VS
5928 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
5929 GT_FIFO_FREE_ENTRIES_CHV);
38807746
D
5930 if (gtfifodbg) {
5931 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5932 gtfifodbg);
5933 I915_WRITE(GTFIFODBG, gtfifodbg);
5934 }
5935
5936 cherryview_check_pctx(dev_priv);
5937
5938 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5939 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 5940 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
38807746 5941
160614a2
VS
5942 /* Disable RC states. */
5943 I915_WRITE(GEN6_RC_CONTROL, 0);
5944
38807746
D
5945 /* 2a: Program RC6 thresholds.*/
5946 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5947 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5948 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5949
b4ac5afc 5950 for_each_engine(engine, dev_priv)
e2f80391 5951 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
38807746
D
5952 I915_WRITE(GEN6_RC_SLEEP, 0);
5953
f4f71c7d
D
5954 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5955 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
38807746
D
5956
5957 /* allows RC6 residency counter to work */
5958 I915_WRITE(VLV_COUNTER_CONTROL,
5959 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5960 VLV_MEDIA_RC6_COUNT_EN |
5961 VLV_RENDER_RC6_COUNT_EN));
5962
5963 /* For now we assume BIOS is allocating and populating the PCBR */
5964 pcbr = I915_READ(VLV_PCBR);
5965
38807746 5966 /* 3: Enable RC6 */
dc97997a
CW
5967 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5968 (pcbr >> VLV_PCBR_ADDR_SHIFT))
af5a75a3 5969 rc6_mode = GEN7_RC_CTL_TO_MODE;
38807746
D
5970
5971 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5972
2b6b3a09 5973 /* 4 Program defaults and thresholds for RPS*/
3cbdb48f 5974 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2b6b3a09
D
5975 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5976 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5977 I915_WRITE(GEN6_RP_UP_EI, 66000);
5978 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5979
5980 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5981
5982 /* 5: Enable RPS */
5983 I915_WRITE(GEN6_RP_CONTROL,
5984 GEN6_RP_MEDIA_HW_NORMAL_MODE |
eb973a5e 5985 GEN6_RP_MEDIA_IS_GFX |
2b6b3a09
D
5986 GEN6_RP_ENABLE |
5987 GEN6_RP_UP_BUSY_AVG |
5988 GEN6_RP_DOWN_IDLE_AVG);
5989
3ef62342
D
5990 /* Setting Fixed Bias */
5991 val = VLV_OVERRIDE_EN |
5992 VLV_SOC_TDP_EN |
5993 CHV_BIAS_CPU_50_SOC_50;
5994 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5995
2b6b3a09
D
5996 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5997
8d40c3ae
VS
5998 /* RPS code assumes GPLL is used */
5999 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6000
742f491d 6001 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
2b6b3a09
D
6002 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6003
3a45b05c 6004 reset_rps(dev_priv, valleyview_set_rps);
2b6b3a09 6005
59bad947 6006 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
38807746
D
6007}
6008
dc97997a 6009static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
0a073b84 6010{
e2f80391 6011 struct intel_engine_cs *engine;
2a5913a8 6012 u32 gtfifodbg, val, rc6_mode = 0;
0a073b84
JB
6013
6014 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6015
ae48434c
ID
6016 valleyview_check_pctx(dev_priv);
6017
297b32ec
VS
6018 gtfifodbg = I915_READ(GTFIFODBG);
6019 if (gtfifodbg) {
f7d85c1e
JB
6020 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6021 gtfifodbg);
0a073b84
JB
6022 I915_WRITE(GTFIFODBG, gtfifodbg);
6023 }
6024
c8d9a590 6025 /* If VLV, Forcewake all wells, else re-direct to regular path */
59bad947 6026 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
0a073b84 6027
160614a2
VS
6028 /* Disable RC states. */
6029 I915_WRITE(GEN6_RC_CONTROL, 0);
6030
cad725fe 6031 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
0a073b84
JB
6032 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6033 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6034 I915_WRITE(GEN6_RP_UP_EI, 66000);
6035 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6036
6037 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6038
6039 I915_WRITE(GEN6_RP_CONTROL,
6040 GEN6_RP_MEDIA_TURBO |
6041 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6042 GEN6_RP_MEDIA_IS_GFX |
6043 GEN6_RP_ENABLE |
6044 GEN6_RP_UP_BUSY_AVG |
6045 GEN6_RP_DOWN_IDLE_CONT);
6046
6047 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
6048 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6049 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6050
b4ac5afc 6051 for_each_engine(engine, dev_priv)
e2f80391 6052 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
0a073b84 6053
2f0aa304 6054 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
0a073b84
JB
6055
6056 /* allows RC6 residency counter to work */
49798eb2 6057 I915_WRITE(VLV_COUNTER_CONTROL,
31685c25
D
6058 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
6059 VLV_RENDER_RC0_COUNT_EN |
49798eb2
JB
6060 VLV_MEDIA_RC6_COUNT_EN |
6061 VLV_RENDER_RC6_COUNT_EN));
31685c25 6062
dc97997a 6063 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6b88f295 6064 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
dc39fff7 6065
dc97997a 6066 intel_print_rc6_info(dev_priv, rc6_mode);
dc39fff7 6067
a2b23fe0 6068 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
0a073b84 6069
3ef62342
D
6070 /* Setting Fixed Bias */
6071 val = VLV_OVERRIDE_EN |
6072 VLV_SOC_TDP_EN |
6073 VLV_BIAS_CPU_125_SOC_875;
6074 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6075
64936258 6076 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
0a073b84 6077
8d40c3ae
VS
6078 /* RPS code assumes GPLL is used */
6079 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6080
742f491d 6081 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
0a073b84
JB
6082 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6083
3a45b05c 6084 reset_rps(dev_priv, valleyview_set_rps);
0a073b84 6085
59bad947 6086 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
0a073b84
JB
6087}
6088
dde18883
ED
6089static unsigned long intel_pxfreq(u32 vidfreq)
6090{
6091 unsigned long freq;
6092 int div = (vidfreq & 0x3f0000) >> 16;
6093 int post = (vidfreq & 0x3000) >> 12;
6094 int pre = (vidfreq & 0x7);
6095
6096 if (!pre)
6097 return 0;
6098
6099 freq = ((div * 133333) / ((1<<post) * pre));
6100
6101 return freq;
6102}
6103
eb48eb00
DV
6104static const struct cparams {
6105 u16 i;
6106 u16 t;
6107 u16 m;
6108 u16 c;
6109} cparams[] = {
6110 { 1, 1333, 301, 28664 },
6111 { 1, 1066, 294, 24460 },
6112 { 1, 800, 294, 25192 },
6113 { 0, 1333, 276, 27605 },
6114 { 0, 1066, 276, 27605 },
6115 { 0, 800, 231, 23784 },
6116};
6117
f531dcb2 6118static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
6119{
6120 u64 total_count, diff, ret;
6121 u32 count1, count2, count3, m = 0, c = 0;
6122 unsigned long now = jiffies_to_msecs(jiffies), diff1;
6123 int i;
6124
02d71956
DV
6125 assert_spin_locked(&mchdev_lock);
6126
20e4d407 6127 diff1 = now - dev_priv->ips.last_time1;
eb48eb00
DV
6128
6129 /* Prevent division-by-zero if we are asking too fast.
6130 * Also, we don't get interesting results if we are polling
6131 * faster than once in 10ms, so just return the saved value
6132 * in such cases.
6133 */
6134 if (diff1 <= 10)
20e4d407 6135 return dev_priv->ips.chipset_power;
eb48eb00
DV
6136
6137 count1 = I915_READ(DMIEC);
6138 count2 = I915_READ(DDREC);
6139 count3 = I915_READ(CSIEC);
6140
6141 total_count = count1 + count2 + count3;
6142
6143 /* FIXME: handle per-counter overflow */
20e4d407
DV
6144 if (total_count < dev_priv->ips.last_count1) {
6145 diff = ~0UL - dev_priv->ips.last_count1;
eb48eb00
DV
6146 diff += total_count;
6147 } else {
20e4d407 6148 diff = total_count - dev_priv->ips.last_count1;
eb48eb00
DV
6149 }
6150
6151 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
20e4d407
DV
6152 if (cparams[i].i == dev_priv->ips.c_m &&
6153 cparams[i].t == dev_priv->ips.r_t) {
eb48eb00
DV
6154 m = cparams[i].m;
6155 c = cparams[i].c;
6156 break;
6157 }
6158 }
6159
6160 diff = div_u64(diff, diff1);
6161 ret = ((m * diff) + c);
6162 ret = div_u64(ret, 10);
6163
20e4d407
DV
6164 dev_priv->ips.last_count1 = total_count;
6165 dev_priv->ips.last_time1 = now;
eb48eb00 6166
20e4d407 6167 dev_priv->ips.chipset_power = ret;
eb48eb00
DV
6168
6169 return ret;
6170}
6171
f531dcb2
CW
6172unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6173{
6174 unsigned long val;
6175
dc97997a 6176 if (INTEL_INFO(dev_priv)->gen != 5)
f531dcb2
CW
6177 return 0;
6178
6179 spin_lock_irq(&mchdev_lock);
6180
6181 val = __i915_chipset_val(dev_priv);
6182
6183 spin_unlock_irq(&mchdev_lock);
6184
6185 return val;
6186}
6187
eb48eb00
DV
6188unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6189{
6190 unsigned long m, x, b;
6191 u32 tsfs;
6192
6193 tsfs = I915_READ(TSFS);
6194
6195 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6196 x = I915_READ8(TR1);
6197
6198 b = tsfs & TSFS_INTR_MASK;
6199
6200 return ((m * x) / 127) - b;
6201}
6202
d972d6ee
MK
6203static int _pxvid_to_vd(u8 pxvid)
6204{
6205 if (pxvid == 0)
6206 return 0;
6207
6208 if (pxvid >= 8 && pxvid < 31)
6209 pxvid = 31;
6210
6211 return (pxvid + 2) * 125;
6212}
6213
6214static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
eb48eb00 6215{
d972d6ee
MK
6216 const int vd = _pxvid_to_vd(pxvid);
6217 const int vm = vd - 1125;
6218
dc97997a 6219 if (INTEL_INFO(dev_priv)->is_mobile)
d972d6ee
MK
6220 return vm > 0 ? vm : 0;
6221
6222 return vd;
eb48eb00
DV
6223}
6224
02d71956 6225static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
eb48eb00 6226{
5ed0bdf2 6227 u64 now, diff, diffms;
eb48eb00
DV
6228 u32 count;
6229
02d71956 6230 assert_spin_locked(&mchdev_lock);
eb48eb00 6231
5ed0bdf2
TG
6232 now = ktime_get_raw_ns();
6233 diffms = now - dev_priv->ips.last_time2;
6234 do_div(diffms, NSEC_PER_MSEC);
eb48eb00
DV
6235
6236 /* Don't divide by 0 */
eb48eb00
DV
6237 if (!diffms)
6238 return;
6239
6240 count = I915_READ(GFXEC);
6241
20e4d407
DV
6242 if (count < dev_priv->ips.last_count2) {
6243 diff = ~0UL - dev_priv->ips.last_count2;
eb48eb00
DV
6244 diff += count;
6245 } else {
20e4d407 6246 diff = count - dev_priv->ips.last_count2;
eb48eb00
DV
6247 }
6248
20e4d407
DV
6249 dev_priv->ips.last_count2 = count;
6250 dev_priv->ips.last_time2 = now;
eb48eb00
DV
6251
6252 /* More magic constants... */
6253 diff = diff * 1181;
6254 diff = div_u64(diff, diffms * 10);
20e4d407 6255 dev_priv->ips.gfx_power = diff;
eb48eb00
DV
6256}
6257
02d71956
DV
6258void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6259{
dc97997a 6260 if (INTEL_INFO(dev_priv)->gen != 5)
02d71956
DV
6261 return;
6262
9270388e 6263 spin_lock_irq(&mchdev_lock);
02d71956
DV
6264
6265 __i915_update_gfx_val(dev_priv);
6266
9270388e 6267 spin_unlock_irq(&mchdev_lock);
02d71956
DV
6268}
6269
f531dcb2 6270static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
6271{
6272 unsigned long t, corr, state1, corr2, state2;
6273 u32 pxvid, ext_v;
6274
02d71956
DV
6275 assert_spin_locked(&mchdev_lock);
6276
616847e7 6277 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
eb48eb00
DV
6278 pxvid = (pxvid >> 24) & 0x7f;
6279 ext_v = pvid_to_extvid(dev_priv, pxvid);
6280
6281 state1 = ext_v;
6282
6283 t = i915_mch_val(dev_priv);
6284
6285 /* Revel in the empirically derived constants */
6286
6287 /* Correction factor in 1/100000 units */
6288 if (t > 80)
6289 corr = ((t * 2349) + 135940);
6290 else if (t >= 50)
6291 corr = ((t * 964) + 29317);
6292 else /* < 50 */
6293 corr = ((t * 301) + 1004);
6294
6295 corr = corr * ((150142 * state1) / 10000 - 78642);
6296 corr /= 100000;
20e4d407 6297 corr2 = (corr * dev_priv->ips.corr);
eb48eb00
DV
6298
6299 state2 = (corr2 * state1) / 10000;
6300 state2 /= 100; /* convert to mW */
6301
02d71956 6302 __i915_update_gfx_val(dev_priv);
eb48eb00 6303
20e4d407 6304 return dev_priv->ips.gfx_power + state2;
eb48eb00
DV
6305}
6306
f531dcb2
CW
6307unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6308{
6309 unsigned long val;
6310
dc97997a 6311 if (INTEL_INFO(dev_priv)->gen != 5)
f531dcb2
CW
6312 return 0;
6313
6314 spin_lock_irq(&mchdev_lock);
6315
6316 val = __i915_gfx_val(dev_priv);
6317
6318 spin_unlock_irq(&mchdev_lock);
6319
6320 return val;
6321}
6322
eb48eb00
DV
6323/**
6324 * i915_read_mch_val - return value for IPS use
6325 *
6326 * Calculate and return a value for the IPS driver to use when deciding whether
6327 * we have thermal and power headroom to increase CPU or GPU power budget.
6328 */
6329unsigned long i915_read_mch_val(void)
6330{
6331 struct drm_i915_private *dev_priv;
6332 unsigned long chipset_val, graphics_val, ret = 0;
6333
9270388e 6334 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
6335 if (!i915_mch_dev)
6336 goto out_unlock;
6337 dev_priv = i915_mch_dev;
6338
f531dcb2
CW
6339 chipset_val = __i915_chipset_val(dev_priv);
6340 graphics_val = __i915_gfx_val(dev_priv);
eb48eb00
DV
6341
6342 ret = chipset_val + graphics_val;
6343
6344out_unlock:
9270388e 6345 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6346
6347 return ret;
6348}
6349EXPORT_SYMBOL_GPL(i915_read_mch_val);
6350
6351/**
6352 * i915_gpu_raise - raise GPU frequency limit
6353 *
6354 * Raise the limit; IPS indicates we have thermal headroom.
6355 */
6356bool i915_gpu_raise(void)
6357{
6358 struct drm_i915_private *dev_priv;
6359 bool ret = true;
6360
9270388e 6361 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
6362 if (!i915_mch_dev) {
6363 ret = false;
6364 goto out_unlock;
6365 }
6366 dev_priv = i915_mch_dev;
6367
20e4d407
DV
6368 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6369 dev_priv->ips.max_delay--;
eb48eb00
DV
6370
6371out_unlock:
9270388e 6372 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6373
6374 return ret;
6375}
6376EXPORT_SYMBOL_GPL(i915_gpu_raise);
6377
6378/**
6379 * i915_gpu_lower - lower GPU frequency limit
6380 *
6381 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6382 * frequency maximum.
6383 */
6384bool i915_gpu_lower(void)
6385{
6386 struct drm_i915_private *dev_priv;
6387 bool ret = true;
6388
9270388e 6389 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
6390 if (!i915_mch_dev) {
6391 ret = false;
6392 goto out_unlock;
6393 }
6394 dev_priv = i915_mch_dev;
6395
20e4d407
DV
6396 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6397 dev_priv->ips.max_delay++;
eb48eb00
DV
6398
6399out_unlock:
9270388e 6400 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6401
6402 return ret;
6403}
6404EXPORT_SYMBOL_GPL(i915_gpu_lower);
6405
6406/**
6407 * i915_gpu_busy - indicate GPU business to IPS
6408 *
6409 * Tell the IPS driver whether or not the GPU is busy.
6410 */
6411bool i915_gpu_busy(void)
6412{
eb48eb00
DV
6413 bool ret = false;
6414
9270388e 6415 spin_lock_irq(&mchdev_lock);
dcff85c8
CW
6416 if (i915_mch_dev)
6417 ret = i915_mch_dev->gt.awake;
9270388e 6418 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6419
6420 return ret;
6421}
6422EXPORT_SYMBOL_GPL(i915_gpu_busy);
6423
6424/**
6425 * i915_gpu_turbo_disable - disable graphics turbo
6426 *
6427 * Disable graphics turbo by resetting the max frequency and setting the
6428 * current frequency to the default.
6429 */
6430bool i915_gpu_turbo_disable(void)
6431{
6432 struct drm_i915_private *dev_priv;
6433 bool ret = true;
6434
9270388e 6435 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
6436 if (!i915_mch_dev) {
6437 ret = false;
6438 goto out_unlock;
6439 }
6440 dev_priv = i915_mch_dev;
6441
20e4d407 6442 dev_priv->ips.max_delay = dev_priv->ips.fstart;
eb48eb00 6443
91d14251 6444 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
eb48eb00
DV
6445 ret = false;
6446
6447out_unlock:
9270388e 6448 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6449
6450 return ret;
6451}
6452EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6453
6454/**
6455 * Tells the intel_ips driver that the i915 driver is now loaded, if
6456 * IPS got loaded first.
6457 *
6458 * This awkward dance is so that neither module has to depend on the
6459 * other in order for IPS to do the appropriate communication of
6460 * GPU turbo limits to i915.
6461 */
6462static void
6463ips_ping_for_i915_load(void)
6464{
6465 void (*link)(void);
6466
6467 link = symbol_get(ips_link_to_i915_driver);
6468 if (link) {
6469 link();
6470 symbol_put(ips_link_to_i915_driver);
6471 }
6472}
6473
6474void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6475{
02d71956
DV
6476 /* We only register the i915 ips part with intel-ips once everything is
6477 * set up, to avoid intel-ips sneaking in and reading bogus values. */
9270388e 6478 spin_lock_irq(&mchdev_lock);
eb48eb00 6479 i915_mch_dev = dev_priv;
9270388e 6480 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6481
6482 ips_ping_for_i915_load();
6483}
6484
6485void intel_gpu_ips_teardown(void)
6486{
9270388e 6487 spin_lock_irq(&mchdev_lock);
eb48eb00 6488 i915_mch_dev = NULL;
9270388e 6489 spin_unlock_irq(&mchdev_lock);
eb48eb00 6490}
76c3552f 6491
dc97997a 6492static void intel_init_emon(struct drm_i915_private *dev_priv)
dde18883 6493{
dde18883
ED
6494 u32 lcfuse;
6495 u8 pxw[16];
6496 int i;
6497
6498 /* Disable to program */
6499 I915_WRITE(ECR, 0);
6500 POSTING_READ(ECR);
6501
6502 /* Program energy weights for various events */
6503 I915_WRITE(SDEW, 0x15040d00);
6504 I915_WRITE(CSIEW0, 0x007f0000);
6505 I915_WRITE(CSIEW1, 0x1e220004);
6506 I915_WRITE(CSIEW2, 0x04000004);
6507
6508 for (i = 0; i < 5; i++)
616847e7 6509 I915_WRITE(PEW(i), 0);
dde18883 6510 for (i = 0; i < 3; i++)
616847e7 6511 I915_WRITE(DEW(i), 0);
dde18883
ED
6512
6513 /* Program P-state weights to account for frequency power adjustment */
6514 for (i = 0; i < 16; i++) {
616847e7 6515 u32 pxvidfreq = I915_READ(PXVFREQ(i));
dde18883
ED
6516 unsigned long freq = intel_pxfreq(pxvidfreq);
6517 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6518 PXVFREQ_PX_SHIFT;
6519 unsigned long val;
6520
6521 val = vid * vid;
6522 val *= (freq / 1000);
6523 val *= 255;
6524 val /= (127*127*900);
6525 if (val > 0xff)
6526 DRM_ERROR("bad pxval: %ld\n", val);
6527 pxw[i] = val;
6528 }
6529 /* Render standby states get 0 weight */
6530 pxw[14] = 0;
6531 pxw[15] = 0;
6532
6533 for (i = 0; i < 4; i++) {
6534 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6535 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
616847e7 6536 I915_WRITE(PXW(i), val);
dde18883
ED
6537 }
6538
6539 /* Adjust magic regs to magic values (more experimental results) */
6540 I915_WRITE(OGW0, 0);
6541 I915_WRITE(OGW1, 0);
6542 I915_WRITE(EG0, 0x00007f00);
6543 I915_WRITE(EG1, 0x0000000e);
6544 I915_WRITE(EG2, 0x000e0000);
6545 I915_WRITE(EG3, 0x68000300);
6546 I915_WRITE(EG4, 0x42000000);
6547 I915_WRITE(EG5, 0x00140031);
6548 I915_WRITE(EG6, 0);
6549 I915_WRITE(EG7, 0);
6550
6551 for (i = 0; i < 8; i++)
616847e7 6552 I915_WRITE(PXWL(i), 0);
dde18883
ED
6553
6554 /* Enable PMON + select events */
6555 I915_WRITE(ECR, 0x80000019);
6556
6557 lcfuse = I915_READ(LCFUSE02);
6558
20e4d407 6559 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
dde18883
ED
6560}
6561
dc97997a 6562void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
ae48434c 6563{
b268c699
ID
6564 /*
6565 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6566 * requirement.
6567 */
6568 if (!i915.enable_rc6) {
6569 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6570 intel_runtime_pm_get(dev_priv);
6571 }
e6069ca8 6572
b5163dbb 6573 mutex_lock(&dev_priv->drm.struct_mutex);
773ea9a8
CW
6574 mutex_lock(&dev_priv->rps.hw_lock);
6575
6576 /* Initialize RPS limits (for userspace) */
dc97997a
CW
6577 if (IS_CHERRYVIEW(dev_priv))
6578 cherryview_init_gt_powersave(dev_priv);
6579 else if (IS_VALLEYVIEW(dev_priv))
6580 valleyview_init_gt_powersave(dev_priv);
2a13ae79 6581 else if (INTEL_GEN(dev_priv) >= 6)
773ea9a8
CW
6582 gen6_init_rps_frequencies(dev_priv);
6583
6584 /* Derive initial user preferences/limits from the hardware limits */
6585 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
6586 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
6587
6588 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
6589 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
6590
6591 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6592 dev_priv->rps.min_freq_softlimit =
6593 max_t(int,
6594 dev_priv->rps.efficient_freq,
6595 intel_freq_opcode(dev_priv, 450));
6596
99ac9612
CW
6597 /* After setting max-softlimit, find the overclock max freq */
6598 if (IS_GEN6(dev_priv) ||
6599 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
6600 u32 params = 0;
6601
6602 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
6603 if (params & BIT(31)) { /* OC supported */
6604 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6605 (dev_priv->rps.max_freq & 0xff) * 50,
6606 (params & 0xff) * 50);
6607 dev_priv->rps.max_freq = params & 0xff;
6608 }
6609 }
6610
29ecd78d
CW
6611 /* Finally allow us to boost to max by default */
6612 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
6613
773ea9a8 6614 mutex_unlock(&dev_priv->rps.hw_lock);
b5163dbb 6615 mutex_unlock(&dev_priv->drm.struct_mutex);
54b4f68f
CW
6616
6617 intel_autoenable_gt_powersave(dev_priv);
ae48434c
ID
6618}
6619
dc97997a 6620void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
ae48434c 6621{
8dac1e1f 6622 if (IS_VALLEYVIEW(dev_priv))
dc97997a 6623 valleyview_cleanup_gt_powersave(dev_priv);
b268c699
ID
6624
6625 if (!i915.enable_rc6)
6626 intel_runtime_pm_put(dev_priv);
ae48434c
ID
6627}
6628
54b4f68f
CW
6629/**
6630 * intel_suspend_gt_powersave - suspend PM work and helper threads
6631 * @dev_priv: i915 device
6632 *
6633 * We don't want to disable RC6 or other features here, we just want
6634 * to make sure any work we've queued has finished and won't bother
6635 * us while we're suspended.
6636 */
6637void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6638{
6639 if (INTEL_GEN(dev_priv) < 6)
6640 return;
6641
6642 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
6643 intel_runtime_pm_put(dev_priv);
6644
6645 /* gen6_rps_idle() will be called later to disable interrupts */
6646}
6647
b7137e0c
CW
6648void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
6649{
6650 dev_priv->rps.enabled = true; /* force disabling */
6651 intel_disable_gt_powersave(dev_priv);
54b4f68f
CW
6652
6653 gen6_reset_rps_interrupts(dev_priv);
156c7ca0
JB
6654}
6655
dc97997a 6656void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
8090c6b9 6657{
b7137e0c
CW
6658 if (!READ_ONCE(dev_priv->rps.enabled))
6659 return;
e494837a 6660
b7137e0c 6661 mutex_lock(&dev_priv->rps.hw_lock);
e534770a 6662
b7137e0c
CW
6663 if (INTEL_GEN(dev_priv) >= 9) {
6664 gen9_disable_rc6(dev_priv);
6665 gen9_disable_rps(dev_priv);
6666 } else if (IS_CHERRYVIEW(dev_priv)) {
6667 cherryview_disable_rps(dev_priv);
6668 } else if (IS_VALLEYVIEW(dev_priv)) {
6669 valleyview_disable_rps(dev_priv);
6670 } else if (INTEL_GEN(dev_priv) >= 6) {
6671 gen6_disable_rps(dev_priv);
6672 } else if (IS_IRONLAKE_M(dev_priv)) {
6673 ironlake_disable_drps(dev_priv);
930ebb46 6674 }
b7137e0c
CW
6675
6676 dev_priv->rps.enabled = false;
6677 mutex_unlock(&dev_priv->rps.hw_lock);
8090c6b9
DV
6678}
6679
b7137e0c 6680void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
1a01ab3b 6681{
54b4f68f
CW
6682 /* We shouldn't be disabling as we submit, so this should be less
6683 * racy than it appears!
6684 */
b7137e0c
CW
6685 if (READ_ONCE(dev_priv->rps.enabled))
6686 return;
1a01ab3b 6687
b7137e0c
CW
6688 /* Powersaving is controlled by the host when inside a VM */
6689 if (intel_vgpu_active(dev_priv))
6690 return;
0a073b84 6691
b7137e0c 6692 mutex_lock(&dev_priv->rps.hw_lock);
dc97997a
CW
6693
6694 if (IS_CHERRYVIEW(dev_priv)) {
6695 cherryview_enable_rps(dev_priv);
6696 } else if (IS_VALLEYVIEW(dev_priv)) {
6697 valleyview_enable_rps(dev_priv);
b7137e0c 6698 } else if (INTEL_GEN(dev_priv) >= 9) {
dc97997a
CW
6699 gen9_enable_rc6(dev_priv);
6700 gen9_enable_rps(dev_priv);
6701 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
fb7404e8 6702 gen6_update_ring_freq(dev_priv);
dc97997a
CW
6703 } else if (IS_BROADWELL(dev_priv)) {
6704 gen8_enable_rps(dev_priv);
fb7404e8 6705 gen6_update_ring_freq(dev_priv);
b7137e0c 6706 } else if (INTEL_GEN(dev_priv) >= 6) {
dc97997a 6707 gen6_enable_rps(dev_priv);
fb7404e8 6708 gen6_update_ring_freq(dev_priv);
b7137e0c
CW
6709 } else if (IS_IRONLAKE_M(dev_priv)) {
6710 ironlake_enable_drps(dev_priv);
6711 intel_init_emon(dev_priv);
0a073b84 6712 }
aed242ff
CW
6713
6714 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6715 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6716
6717 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6718 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6719
54b4f68f 6720 dev_priv->rps.enabled = true;
b7137e0c
CW
6721 mutex_unlock(&dev_priv->rps.hw_lock);
6722}
3cc134e3 6723
54b4f68f
CW
6724static void __intel_autoenable_gt_powersave(struct work_struct *work)
6725{
6726 struct drm_i915_private *dev_priv =
6727 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
6728 struct intel_engine_cs *rcs;
6729 struct drm_i915_gem_request *req;
6730
6731 if (READ_ONCE(dev_priv->rps.enabled))
6732 goto out;
6733
6734 rcs = &dev_priv->engine[RCS];
6735 if (rcs->last_context)
6736 goto out;
6737
6738 if (!rcs->init_context)
6739 goto out;
6740
6741 mutex_lock(&dev_priv->drm.struct_mutex);
6742
6743 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
6744 if (IS_ERR(req))
6745 goto unlock;
6746
6747 if (!i915.enable_execlists && i915_switch_context(req) == 0)
6748 rcs->init_context(req);
6749
6750 /* Mark the device busy, calling intel_enable_gt_powersave() */
6751 i915_add_request_no_flush(req);
6752
6753unlock:
6754 mutex_unlock(&dev_priv->drm.struct_mutex);
6755out:
6756 intel_runtime_pm_put(dev_priv);
6757}
6758
6759void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
6760{
6761 if (READ_ONCE(dev_priv->rps.enabled))
6762 return;
6763
6764 if (IS_IRONLAKE_M(dev_priv)) {
6765 ironlake_enable_drps(dev_priv);
54b4f68f 6766 intel_init_emon(dev_priv);
54b4f68f
CW
6767 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6768 /*
6769 * PCU communication is slow and this doesn't need to be
6770 * done at any specific time, so do this out of our fast path
6771 * to make resume and init faster.
6772 *
6773 * We depend on the HW RC6 power context save/restore
6774 * mechanism when entering D3 through runtime PM suspend. So
6775 * disable RPM until RPS/RC6 is properly setup. We can only
6776 * get here via the driver load/system resume/runtime resume
6777 * paths, so the _noresume version is enough (and in case of
6778 * runtime resume it's necessary).
6779 */
6780 if (queue_delayed_work(dev_priv->wq,
6781 &dev_priv->rps.autoenable_work,
6782 round_jiffies_up_relative(HZ)))
6783 intel_runtime_pm_get_noresume(dev_priv);
6784 }
6785}
6786
3107bd48
DV
6787static void ibx_init_clock_gating(struct drm_device *dev)
6788{
fac5e23e 6789 struct drm_i915_private *dev_priv = to_i915(dev);
3107bd48
DV
6790
6791 /*
6792 * On Ibex Peak and Cougar Point, we need to disable clock
6793 * gating for the panel power sequencer or it will fail to
6794 * start up when no ports are active.
6795 */
6796 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6797}
6798
0e088b8f
VS
6799static void g4x_disable_trickle_feed(struct drm_device *dev)
6800{
fac5e23e 6801 struct drm_i915_private *dev_priv = to_i915(dev);
b12ce1d8 6802 enum pipe pipe;
0e088b8f 6803
055e393f 6804 for_each_pipe(dev_priv, pipe) {
0e088b8f
VS
6805 I915_WRITE(DSPCNTR(pipe),
6806 I915_READ(DSPCNTR(pipe)) |
6807 DISPPLANE_TRICKLE_FEED_DISABLE);
b12ce1d8
VS
6808
6809 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6810 POSTING_READ(DSPSURF(pipe));
0e088b8f
VS
6811 }
6812}
6813
017636cc
VS
6814static void ilk_init_lp_watermarks(struct drm_device *dev)
6815{
fac5e23e 6816 struct drm_i915_private *dev_priv = to_i915(dev);
017636cc
VS
6817
6818 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6819 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6820 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6821
6822 /*
6823 * Don't touch WM1S_LP_EN here.
6824 * Doing so could cause underruns.
6825 */
6826}
6827
1fa61106 6828static void ironlake_init_clock_gating(struct drm_device *dev)
6f1d69b0 6829{
fac5e23e 6830 struct drm_i915_private *dev_priv = to_i915(dev);
231e54f6 6831 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6f1d69b0 6832
f1e8fa56
DL
6833 /*
6834 * Required for FBC
6835 * WaFbcDisableDpfcClockGating:ilk
6836 */
4d47e4f5
DL
6837 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6838 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6839 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6f1d69b0
ED
6840
6841 I915_WRITE(PCH_3DCGDIS0,
6842 MARIUNIT_CLOCK_GATE_DISABLE |
6843 SVSMUNIT_CLOCK_GATE_DISABLE);
6844 I915_WRITE(PCH_3DCGDIS1,
6845 VFMUNIT_CLOCK_GATE_DISABLE);
6846
6f1d69b0
ED
6847 /*
6848 * According to the spec the following bits should be set in
6849 * order to enable memory self-refresh
6850 * The bit 22/21 of 0x42004
6851 * The bit 5 of 0x42020
6852 * The bit 15 of 0x45000
6853 */
6854 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6855 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6856 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4d47e4f5 6857 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6f1d69b0
ED
6858 I915_WRITE(DISP_ARB_CTL,
6859 (I915_READ(DISP_ARB_CTL) |
6860 DISP_FBC_WM_DIS));
017636cc
VS
6861
6862 ilk_init_lp_watermarks(dev);
6f1d69b0
ED
6863
6864 /*
6865 * Based on the document from hardware guys the following bits
6866 * should be set unconditionally in order to enable FBC.
6867 * The bit 22 of 0x42000
6868 * The bit 22 of 0x42004
6869 * The bit 7,8,9 of 0x42020.
6870 */
6871 if (IS_IRONLAKE_M(dev)) {
4bb35334 6872 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6f1d69b0
ED
6873 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6874 I915_READ(ILK_DISPLAY_CHICKEN1) |
6875 ILK_FBCQ_DIS);
6876 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6877 I915_READ(ILK_DISPLAY_CHICKEN2) |
6878 ILK_DPARB_GATE);
6f1d69b0
ED
6879 }
6880
4d47e4f5
DL
6881 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6882
6f1d69b0
ED
6883 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6884 I915_READ(ILK_DISPLAY_CHICKEN2) |
6885 ILK_ELPIN_409_SELECT);
6886 I915_WRITE(_3D_CHICKEN2,
6887 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6888 _3D_CHICKEN2_WM_READ_PIPELINED);
4358a374 6889
ecdb4eb7 6890 /* WaDisableRenderCachePipelinedFlush:ilk */
4358a374
DV
6891 I915_WRITE(CACHE_MODE_0,
6892 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3107bd48 6893
4e04632e
AG
6894 /* WaDisable_RenderCache_OperationalFlush:ilk */
6895 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6896
0e088b8f 6897 g4x_disable_trickle_feed(dev);
bdad2b2f 6898
3107bd48
DV
6899 ibx_init_clock_gating(dev);
6900}
6901
6902static void cpt_init_clock_gating(struct drm_device *dev)
6903{
fac5e23e 6904 struct drm_i915_private *dev_priv = to_i915(dev);
3107bd48 6905 int pipe;
3f704fa2 6906 uint32_t val;
3107bd48
DV
6907
6908 /*
6909 * On Ibex Peak and Cougar Point, we need to disable clock
6910 * gating for the panel power sequencer or it will fail to
6911 * start up when no ports are active.
6912 */
cd664078
JB
6913 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6914 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6915 PCH_CPUNIT_CLOCK_GATE_DISABLE);
3107bd48
DV
6916 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6917 DPLS_EDP_PPS_FIX_DIS);
335c07b7
TI
6918 /* The below fixes the weird display corruption, a few pixels shifted
6919 * downward, on (only) LVDS of some HP laptops with IVY.
6920 */
055e393f 6921 for_each_pipe(dev_priv, pipe) {
dc4bd2d1
PZ
6922 val = I915_READ(TRANS_CHICKEN2(pipe));
6923 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6924 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
41aa3448 6925 if (dev_priv->vbt.fdi_rx_polarity_inverted)
3f704fa2 6926 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
dc4bd2d1
PZ
6927 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6928 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6929 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
3f704fa2
PZ
6930 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6931 }
3107bd48 6932 /* WADP0ClockGatingDisable */
055e393f 6933 for_each_pipe(dev_priv, pipe) {
3107bd48
DV
6934 I915_WRITE(TRANS_CHICKEN1(pipe),
6935 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6936 }
6f1d69b0
ED
6937}
6938
1d7aaa0c
DV
6939static void gen6_check_mch_setup(struct drm_device *dev)
6940{
fac5e23e 6941 struct drm_i915_private *dev_priv = to_i915(dev);
1d7aaa0c
DV
6942 uint32_t tmp;
6943
6944 tmp = I915_READ(MCH_SSKPD);
df662a28
DV
6945 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6946 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6947 tmp);
1d7aaa0c
DV
6948}
6949
1fa61106 6950static void gen6_init_clock_gating(struct drm_device *dev)
6f1d69b0 6951{
fac5e23e 6952 struct drm_i915_private *dev_priv = to_i915(dev);
231e54f6 6953 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6f1d69b0 6954
231e54f6 6955 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6f1d69b0
ED
6956
6957 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6958 I915_READ(ILK_DISPLAY_CHICKEN2) |
6959 ILK_ELPIN_409_SELECT);
6960
ecdb4eb7 6961 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4283908e
DV
6962 I915_WRITE(_3D_CHICKEN,
6963 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6964
4e04632e
AG
6965 /* WaDisable_RenderCache_OperationalFlush:snb */
6966 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6967
8d85d272
VS
6968 /*
6969 * BSpec recoomends 8x4 when MSAA is used,
6970 * however in practice 16x4 seems fastest.
c5c98a58
VS
6971 *
6972 * Note that PS/WM thread counts depend on the WIZ hashing
6973 * disable bit, which we don't touch here, but it's good
6974 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8d85d272
VS
6975 */
6976 I915_WRITE(GEN6_GT_MODE,
98533251 6977 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8d85d272 6978
017636cc 6979 ilk_init_lp_watermarks(dev);
6f1d69b0 6980
6f1d69b0 6981 I915_WRITE(CACHE_MODE_0,
50743298 6982 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6f1d69b0
ED
6983
6984 I915_WRITE(GEN6_UCGCTL1,
6985 I915_READ(GEN6_UCGCTL1) |
6986 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6987 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6988
6989 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6990 * gating disable must be set. Failure to set it results in
6991 * flickering pixels due to Z write ordering failures after
6992 * some amount of runtime in the Mesa "fire" demo, and Unigine
6993 * Sanctuary and Tropics, and apparently anything else with
6994 * alpha test or pixel discard.
6995 *
6996 * According to the spec, bit 11 (RCCUNIT) must also be set,
6997 * but we didn't debug actual testcases to find it out.
0f846f81 6998 *
ef59318c
VS
6999 * WaDisableRCCUnitClockGating:snb
7000 * WaDisableRCPBUnitClockGating:snb
6f1d69b0
ED
7001 */
7002 I915_WRITE(GEN6_UCGCTL2,
7003 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7004 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7005
5eb146dd 7006 /* WaStripsFansDisableFastClipPerformanceFix:snb */
743b57d8
VS
7007 I915_WRITE(_3D_CHICKEN3,
7008 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6f1d69b0 7009
e927ecde
VS
7010 /*
7011 * Bspec says:
7012 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7013 * 3DSTATE_SF number of SF output attributes is more than 16."
7014 */
7015 I915_WRITE(_3D_CHICKEN3,
7016 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
7017
6f1d69b0
ED
7018 /*
7019 * According to the spec the following bits should be
7020 * set in order to enable memory self-refresh and fbc:
7021 * The bit21 and bit22 of 0x42000
7022 * The bit21 and bit22 of 0x42004
7023 * The bit5 and bit7 of 0x42020
7024 * The bit14 of 0x70180
7025 * The bit14 of 0x71180
4bb35334
DL
7026 *
7027 * WaFbcAsynchFlipDisableFbcQueue:snb
6f1d69b0
ED
7028 */
7029 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7030 I915_READ(ILK_DISPLAY_CHICKEN1) |
7031 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7032 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7033 I915_READ(ILK_DISPLAY_CHICKEN2) |
7034 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
231e54f6
DL
7035 I915_WRITE(ILK_DSPCLK_GATE_D,
7036 I915_READ(ILK_DSPCLK_GATE_D) |
7037 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7038 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6f1d69b0 7039
0e088b8f 7040 g4x_disable_trickle_feed(dev);
f8f2ac9a 7041
3107bd48 7042 cpt_init_clock_gating(dev);
1d7aaa0c
DV
7043
7044 gen6_check_mch_setup(dev);
6f1d69b0
ED
7045}
7046
7047static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
7048{
7049 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
7050
3aad9059 7051 /*
46680e0a 7052 * WaVSThreadDispatchOverride:ivb,vlv
3aad9059
VS
7053 *
7054 * This actually overrides the dispatch
7055 * mode for all thread types.
7056 */
6f1d69b0
ED
7057 reg &= ~GEN7_FF_SCHED_MASK;
7058 reg |= GEN7_FF_TS_SCHED_HW;
7059 reg |= GEN7_FF_VS_SCHED_HW;
7060 reg |= GEN7_FF_DS_SCHED_HW;
7061
7062 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
7063}
7064
17a303ec
PZ
7065static void lpt_init_clock_gating(struct drm_device *dev)
7066{
fac5e23e 7067 struct drm_i915_private *dev_priv = to_i915(dev);
17a303ec
PZ
7068
7069 /*
7070 * TODO: this bit should only be enabled when really needed, then
7071 * disabled when not needed anymore in order to save power.
7072 */
c2699524 7073 if (HAS_PCH_LPT_LP(dev))
17a303ec
PZ
7074 I915_WRITE(SOUTH_DSPCLK_GATE_D,
7075 I915_READ(SOUTH_DSPCLK_GATE_D) |
7076 PCH_LP_PARTITION_LEVEL_DISABLE);
0a790cdb
PZ
7077
7078 /* WADPOClockGatingDisable:hsw */
36c0d0cf
VS
7079 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
7080 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
0a790cdb 7081 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
17a303ec
PZ
7082}
7083
7d708ee4
ID
7084static void lpt_suspend_hw(struct drm_device *dev)
7085{
fac5e23e 7086 struct drm_i915_private *dev_priv = to_i915(dev);
7d708ee4 7087
c2699524 7088 if (HAS_PCH_LPT_LP(dev)) {
7d708ee4
ID
7089 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
7090
7091 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7092 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7093 }
7094}
7095
450174fe
ID
7096static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7097 int general_prio_credits,
7098 int high_prio_credits)
7099{
7100 u32 misccpctl;
7101
7102 /* WaTempDisableDOPClkGating:bdw */
7103 misccpctl = I915_READ(GEN7_MISCCPCTL);
7104 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7105
7106 I915_WRITE(GEN8_L3SQCREG1,
7107 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7108 L3_HIGH_PRIO_CREDITS(high_prio_credits));
7109
7110 /*
7111 * Wait at least 100 clocks before re-enabling clock gating.
7112 * See the definition of L3SQCREG1 in BSpec.
7113 */
7114 POSTING_READ(GEN8_L3SQCREG1);
7115 udelay(1);
7116 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7117}
7118
9498dba7
MK
7119static void kabylake_init_clock_gating(struct drm_device *dev)
7120{
9146f308 7121 struct drm_i915_private *dev_priv = dev->dev_private;
9498dba7 7122
b033bb6d 7123 gen9_init_clock_gating(dev);
9498dba7
MK
7124
7125 /* WaDisableSDEUnitClockGating:kbl */
7126 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7127 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7128 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8aeb7f62
MK
7129
7130 /* WaDisableGamClockGating:kbl */
7131 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7132 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7133 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
031cd8c8
MK
7134
7135 /* WaFbcNukeOnHostModify:kbl */
7136 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7137 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
9498dba7
MK
7138}
7139
dc00b6a0
DV
7140static void skylake_init_clock_gating(struct drm_device *dev)
7141{
c584e2d3 7142 struct drm_i915_private *dev_priv = dev->dev_private;
44fff99f 7143
b033bb6d 7144 gen9_init_clock_gating(dev);
44fff99f
MK
7145
7146 /* WAC6entrylatency:skl */
7147 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7148 FBC_LLC_FULLY_OPEN);
031cd8c8
MK
7149
7150 /* WaFbcNukeOnHostModify:skl */
7151 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7152 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
dc00b6a0
DV
7153}
7154
47c2bd97 7155static void broadwell_init_clock_gating(struct drm_device *dev)
1020a5c2 7156{
fac5e23e 7157 struct drm_i915_private *dev_priv = to_i915(dev);
07d27e20 7158 enum pipe pipe;
1020a5c2 7159
7ad0dbab 7160 ilk_init_lp_watermarks(dev);
50ed5fbd 7161
ab57fff1 7162 /* WaSwitchSolVfFArbitrationPriority:bdw */
50ed5fbd 7163 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
fe4ab3ce 7164
ab57fff1 7165 /* WaPsrDPAMaskVBlankInSRD:bdw */
fe4ab3ce
BW
7166 I915_WRITE(CHICKEN_PAR1_1,
7167 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7168
ab57fff1 7169 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
055e393f 7170 for_each_pipe(dev_priv, pipe) {
07d27e20 7171 I915_WRITE(CHICKEN_PIPESL_1(pipe),
c7c65622 7172 I915_READ(CHICKEN_PIPESL_1(pipe)) |
8f670bb1 7173 BDW_DPRS_MASK_VBLANK_SRD);
fe4ab3ce 7174 }
63801f21 7175
ab57fff1
BW
7176 /* WaVSRefCountFullforceMissDisable:bdw */
7177 /* WaDSRefCountFullforceMissDisable:bdw */
7178 I915_WRITE(GEN7_FF_THREAD_MODE,
7179 I915_READ(GEN7_FF_THREAD_MODE) &
7180 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
36075a4c 7181
295e8bb7
VS
7182 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7183 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
4f1ca9e9
VS
7184
7185 /* WaDisableSDEUnitClockGating:bdw */
7186 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7187 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5d708680 7188
450174fe
ID
7189 /* WaProgramL3SqcReg1Default:bdw */
7190 gen8_set_l3sqc_credits(dev_priv, 30, 2);
4d487cff 7191
6d50b065
VS
7192 /*
7193 * WaGttCachingOffByDefault:bdw
7194 * GTT cache may not work with big pages, so if those
7195 * are ever enabled GTT cache may need to be disabled.
7196 */
7197 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7198
17e0adf0
MK
7199 /* WaKVMNotificationOnConfigChange:bdw */
7200 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7201 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7202
89d6b2b8 7203 lpt_init_clock_gating(dev);
1020a5c2
BW
7204}
7205
cad2a2d7
ED
7206static void haswell_init_clock_gating(struct drm_device *dev)
7207{
fac5e23e 7208 struct drm_i915_private *dev_priv = to_i915(dev);
cad2a2d7 7209
017636cc 7210 ilk_init_lp_watermarks(dev);
cad2a2d7 7211
f3fc4884
FJ
7212 /* L3 caching of data atomics doesn't work -- disable it. */
7213 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
7214 I915_WRITE(HSW_ROW_CHICKEN3,
7215 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
7216
ecdb4eb7 7217 /* This is required by WaCatErrorRejectionIssue:hsw */
cad2a2d7
ED
7218 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7219 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7220 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7221
e36ea7ff
VS
7222 /* WaVSRefCountFullforceMissDisable:hsw */
7223 I915_WRITE(GEN7_FF_THREAD_MODE,
7224 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
cad2a2d7 7225
4e04632e
AG
7226 /* WaDisable_RenderCache_OperationalFlush:hsw */
7227 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7228
fe27c606
CW
7229 /* enable HiZ Raw Stall Optimization */
7230 I915_WRITE(CACHE_MODE_0_GEN7,
7231 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7232
ecdb4eb7 7233 /* WaDisable4x2SubspanOptimization:hsw */
cad2a2d7
ED
7234 I915_WRITE(CACHE_MODE_1,
7235 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
1544d9d5 7236
a12c4967
VS
7237 /*
7238 * BSpec recommends 8x4 when MSAA is used,
7239 * however in practice 16x4 seems fastest.
c5c98a58
VS
7240 *
7241 * Note that PS/WM thread counts depend on the WIZ hashing
7242 * disable bit, which we don't touch here, but it's good
7243 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
a12c4967
VS
7244 */
7245 I915_WRITE(GEN7_GT_MODE,
98533251 7246 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
a12c4967 7247
94411593
KG
7248 /* WaSampleCChickenBitEnable:hsw */
7249 I915_WRITE(HALF_SLICE_CHICKEN3,
7250 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7251
ecdb4eb7 7252 /* WaSwitchSolVfFArbitrationPriority:hsw */
e3dff585
BW
7253 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7254
90a88643
PZ
7255 /* WaRsPkgCStateDisplayPMReq:hsw */
7256 I915_WRITE(CHICKEN_PAR1_1,
7257 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
1544d9d5 7258
17a303ec 7259 lpt_init_clock_gating(dev);
cad2a2d7
ED
7260}
7261
1fa61106 7262static void ivybridge_init_clock_gating(struct drm_device *dev)
6f1d69b0 7263{
fac5e23e 7264 struct drm_i915_private *dev_priv = to_i915(dev);
20848223 7265 uint32_t snpcr;
6f1d69b0 7266
017636cc 7267 ilk_init_lp_watermarks(dev);
6f1d69b0 7268
231e54f6 7269 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6f1d69b0 7270
ecdb4eb7 7271 /* WaDisableEarlyCull:ivb */
87f8020e
JB
7272 I915_WRITE(_3D_CHICKEN3,
7273 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7274
ecdb4eb7 7275 /* WaDisableBackToBackFlipFix:ivb */
6f1d69b0
ED
7276 I915_WRITE(IVB_CHICKEN3,
7277 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7278 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7279
ecdb4eb7 7280 /* WaDisablePSDDualDispatchEnable:ivb */
12f3382b
JB
7281 if (IS_IVB_GT1(dev))
7282 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7283 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
12f3382b 7284
4e04632e
AG
7285 /* WaDisable_RenderCache_OperationalFlush:ivb */
7286 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7287
ecdb4eb7 7288 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6f1d69b0
ED
7289 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7290 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7291
ecdb4eb7 7292 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6f1d69b0
ED
7293 I915_WRITE(GEN7_L3CNTLREG1,
7294 GEN7_WA_FOR_GEN7_L3_CONTROL);
7295 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8ab43976
JB
7296 GEN7_WA_L3_CHICKEN_MODE);
7297 if (IS_IVB_GT1(dev))
7298 I915_WRITE(GEN7_ROW_CHICKEN2,
7299 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
412236c2
VS
7300 else {
7301 /* must write both registers */
7302 I915_WRITE(GEN7_ROW_CHICKEN2,
7303 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8ab43976
JB
7304 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7305 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
412236c2 7306 }
6f1d69b0 7307
ecdb4eb7 7308 /* WaForceL3Serialization:ivb */
61939d97
JB
7309 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7310 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7311
1b80a19a 7312 /*
0f846f81 7313 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 7314 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
0f846f81
JB
7315 */
7316 I915_WRITE(GEN6_UCGCTL2,
28acf3b2 7317 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
0f846f81 7318
ecdb4eb7 7319 /* This is required by WaCatErrorRejectionIssue:ivb */
6f1d69b0
ED
7320 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7321 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7322 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7323
0e088b8f 7324 g4x_disable_trickle_feed(dev);
6f1d69b0
ED
7325
7326 gen7_setup_fixed_func_scheduler(dev_priv);
97e1930f 7327
22721343
CW
7328 if (0) { /* causes HiZ corruption on ivb:gt1 */
7329 /* enable HiZ Raw Stall Optimization */
7330 I915_WRITE(CACHE_MODE_0_GEN7,
7331 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7332 }
116f2b6d 7333
ecdb4eb7 7334 /* WaDisable4x2SubspanOptimization:ivb */
97e1930f
DV
7335 I915_WRITE(CACHE_MODE_1,
7336 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
20848223 7337
a607c1a4
VS
7338 /*
7339 * BSpec recommends 8x4 when MSAA is used,
7340 * however in practice 16x4 seems fastest.
c5c98a58
VS
7341 *
7342 * Note that PS/WM thread counts depend on the WIZ hashing
7343 * disable bit, which we don't touch here, but it's good
7344 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
a607c1a4
VS
7345 */
7346 I915_WRITE(GEN7_GT_MODE,
98533251 7347 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
a607c1a4 7348
20848223
BW
7349 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7350 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7351 snpcr |= GEN6_MBC_SNPCR_MED;
7352 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3107bd48 7353
ab5c608b
BW
7354 if (!HAS_PCH_NOP(dev))
7355 cpt_init_clock_gating(dev);
1d7aaa0c
DV
7356
7357 gen6_check_mch_setup(dev);
6f1d69b0
ED
7358}
7359
1fa61106 7360static void valleyview_init_clock_gating(struct drm_device *dev)
6f1d69b0 7361{
fac5e23e 7362 struct drm_i915_private *dev_priv = to_i915(dev);
6f1d69b0 7363
ecdb4eb7 7364 /* WaDisableEarlyCull:vlv */
87f8020e
JB
7365 I915_WRITE(_3D_CHICKEN3,
7366 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7367
ecdb4eb7 7368 /* WaDisableBackToBackFlipFix:vlv */
6f1d69b0
ED
7369 I915_WRITE(IVB_CHICKEN3,
7370 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7371 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7372
fad7d36e 7373 /* WaPsdDispatchEnable:vlv */
ecdb4eb7 7374 /* WaDisablePSDDualDispatchEnable:vlv */
12f3382b 7375 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
d3bc0303
JB
7376 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7377 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
12f3382b 7378
4e04632e
AG
7379 /* WaDisable_RenderCache_OperationalFlush:vlv */
7380 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7381
ecdb4eb7 7382 /* WaForceL3Serialization:vlv */
61939d97
JB
7383 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7384 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7385
ecdb4eb7 7386 /* WaDisableDopClockGating:vlv */
8ab43976
JB
7387 I915_WRITE(GEN7_ROW_CHICKEN2,
7388 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7389
ecdb4eb7 7390 /* This is required by WaCatErrorRejectionIssue:vlv */
6f1d69b0
ED
7391 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7392 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7393 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7394
46680e0a
VS
7395 gen7_setup_fixed_func_scheduler(dev_priv);
7396
3c0edaeb 7397 /*
0f846f81 7398 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 7399 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
0f846f81
JB
7400 */
7401 I915_WRITE(GEN6_UCGCTL2,
3c0edaeb 7402 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
0f846f81 7403
c98f5062
AG
7404 /* WaDisableL3Bank2xClockGate:vlv
7405 * Disabling L3 clock gating- MMIO 940c[25] = 1
7406 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7407 I915_WRITE(GEN7_UCGCTL4,
7408 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
e3f33d46 7409
afd58e79
VS
7410 /*
7411 * BSpec says this must be set, even though
7412 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7413 */
6b26c86d
DV
7414 I915_WRITE(CACHE_MODE_1,
7415 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7983117f 7416
da2518f9
VS
7417 /*
7418 * BSpec recommends 8x4 when MSAA is used,
7419 * however in practice 16x4 seems fastest.
7420 *
7421 * Note that PS/WM thread counts depend on the WIZ hashing
7422 * disable bit, which we don't touch here, but it's good
7423 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7424 */
7425 I915_WRITE(GEN7_GT_MODE,
7426 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7427
031994ee
VS
7428 /*
7429 * WaIncreaseL3CreditsForVLVB0:vlv
7430 * This is the hardware default actually.
7431 */
7432 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7433
2d809570 7434 /*
ecdb4eb7 7435 * WaDisableVLVClockGating_VBIIssue:vlv
2d809570
JB
7436 * Disable clock gating on th GCFG unit to prevent a delay
7437 * in the reporting of vblank events.
7438 */
7a0d1eed 7439 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6f1d69b0
ED
7440}
7441
a4565da8
VS
7442static void cherryview_init_clock_gating(struct drm_device *dev)
7443{
fac5e23e 7444 struct drm_i915_private *dev_priv = to_i915(dev);
a4565da8 7445
232ce337
VS
7446 /* WaVSRefCountFullforceMissDisable:chv */
7447 /* WaDSRefCountFullforceMissDisable:chv */
7448 I915_WRITE(GEN7_FF_THREAD_MODE,
7449 I915_READ(GEN7_FF_THREAD_MODE) &
7450 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
acea6f95
VS
7451
7452 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7453 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7454 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
0846697c
VS
7455
7456 /* WaDisableCSUnitClockGating:chv */
7457 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7458 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
c631780f
VS
7459
7460 /* WaDisableSDEUnitClockGating:chv */
7461 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7462 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6d50b065 7463
450174fe
ID
7464 /*
7465 * WaProgramL3SqcReg1Default:chv
7466 * See gfxspecs/Related Documents/Performance Guide/
7467 * LSQC Setting Recommendations.
7468 */
7469 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7470
6d50b065
VS
7471 /*
7472 * GTT cache may not work with big pages, so if those
7473 * are ever enabled GTT cache may need to be disabled.
7474 */
7475 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
a4565da8
VS
7476}
7477
1fa61106 7478static void g4x_init_clock_gating(struct drm_device *dev)
6f1d69b0 7479{
fac5e23e 7480 struct drm_i915_private *dev_priv = to_i915(dev);
6f1d69b0
ED
7481 uint32_t dspclk_gate;
7482
7483 I915_WRITE(RENCLK_GATE_D1, 0);
7484 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7485 GS_UNIT_CLOCK_GATE_DISABLE |
7486 CL_UNIT_CLOCK_GATE_DISABLE);
7487 I915_WRITE(RAMCLK_GATE_D, 0);
7488 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7489 OVRUNIT_CLOCK_GATE_DISABLE |
7490 OVCUNIT_CLOCK_GATE_DISABLE;
7491 if (IS_GM45(dev))
7492 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7493 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
4358a374
DV
7494
7495 /* WaDisableRenderCachePipelinedFlush */
7496 I915_WRITE(CACHE_MODE_0,
7497 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
de1aa629 7498
4e04632e
AG
7499 /* WaDisable_RenderCache_OperationalFlush:g4x */
7500 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7501
0e088b8f 7502 g4x_disable_trickle_feed(dev);
6f1d69b0
ED
7503}
7504
1fa61106 7505static void crestline_init_clock_gating(struct drm_device *dev)
6f1d69b0 7506{
fac5e23e 7507 struct drm_i915_private *dev_priv = to_i915(dev);
6f1d69b0
ED
7508
7509 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7510 I915_WRITE(RENCLK_GATE_D2, 0);
7511 I915_WRITE(DSPCLK_GATE_D, 0);
7512 I915_WRITE(RAMCLK_GATE_D, 0);
7513 I915_WRITE16(DEUC, 0);
20f94967
VS
7514 I915_WRITE(MI_ARB_STATE,
7515 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4e04632e
AG
7516
7517 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7518 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6f1d69b0
ED
7519}
7520
1fa61106 7521static void broadwater_init_clock_gating(struct drm_device *dev)
6f1d69b0 7522{
fac5e23e 7523 struct drm_i915_private *dev_priv = to_i915(dev);
6f1d69b0
ED
7524
7525 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7526 I965_RCC_CLOCK_GATE_DISABLE |
7527 I965_RCPB_CLOCK_GATE_DISABLE |
7528 I965_ISC_CLOCK_GATE_DISABLE |
7529 I965_FBC_CLOCK_GATE_DISABLE);
7530 I915_WRITE(RENCLK_GATE_D2, 0);
20f94967
VS
7531 I915_WRITE(MI_ARB_STATE,
7532 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4e04632e
AG
7533
7534 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7535 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6f1d69b0
ED
7536}
7537
1fa61106 7538static void gen3_init_clock_gating(struct drm_device *dev)
6f1d69b0 7539{
fac5e23e 7540 struct drm_i915_private *dev_priv = to_i915(dev);
6f1d69b0
ED
7541 u32 dstate = I915_READ(D_STATE);
7542
7543 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7544 DSTATE_DOT_CLOCK_GATING;
7545 I915_WRITE(D_STATE, dstate);
13a86b85
CW
7546
7547 if (IS_PINEVIEW(dev))
7548 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
974a3b0f
DV
7549
7550 /* IIR "flip pending" means done if this bit is set */
7551 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
12fabbcb
VS
7552
7553 /* interrupts should cause a wake up from C3 */
3299254f 7554 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
dbb42748
VS
7555
7556 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7557 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
1038392b
VS
7558
7559 I915_WRITE(MI_ARB_STATE,
7560 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7561}
7562
1fa61106 7563static void i85x_init_clock_gating(struct drm_device *dev)
6f1d69b0 7564{
fac5e23e 7565 struct drm_i915_private *dev_priv = to_i915(dev);
6f1d69b0
ED
7566
7567 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
54e472ae
VS
7568
7569 /* interrupts should cause a wake up from C3 */
7570 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7571 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
1038392b
VS
7572
7573 I915_WRITE(MEM_MODE,
7574 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7575}
7576
1fa61106 7577static void i830_init_clock_gating(struct drm_device *dev)
6f1d69b0 7578{
fac5e23e 7579 struct drm_i915_private *dev_priv = to_i915(dev);
6f1d69b0
ED
7580
7581 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
1038392b
VS
7582
7583 I915_WRITE(MEM_MODE,
7584 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7585 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7586}
7587
6f1d69b0
ED
7588void intel_init_clock_gating(struct drm_device *dev)
7589{
fac5e23e 7590 struct drm_i915_private *dev_priv = to_i915(dev);
6f1d69b0 7591
bb400da9 7592 dev_priv->display.init_clock_gating(dev);
6f1d69b0
ED
7593}
7594
7d708ee4
ID
7595void intel_suspend_hw(struct drm_device *dev)
7596{
7597 if (HAS_PCH_LPT(dev))
7598 lpt_suspend_hw(dev);
7599}
7600
bb400da9
ID
7601static void nop_init_clock_gating(struct drm_device *dev)
7602{
7603 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7604}
7605
7606/**
7607 * intel_init_clock_gating_hooks - setup the clock gating hooks
7608 * @dev_priv: device private
7609 *
7610 * Setup the hooks that configure which clocks of a given platform can be
7611 * gated and also apply various GT and display specific workarounds for these
7612 * platforms. Note that some GT specific workarounds are applied separately
7613 * when GPU contexts or batchbuffers start their execution.
7614 */
7615void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7616{
7617 if (IS_SKYLAKE(dev_priv))
dc00b6a0 7618 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
bb400da9 7619 else if (IS_KABYLAKE(dev_priv))
9498dba7 7620 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
bb400da9
ID
7621 else if (IS_BROXTON(dev_priv))
7622 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7623 else if (IS_BROADWELL(dev_priv))
7624 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7625 else if (IS_CHERRYVIEW(dev_priv))
7626 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7627 else if (IS_HASWELL(dev_priv))
7628 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7629 else if (IS_IVYBRIDGE(dev_priv))
7630 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7631 else if (IS_VALLEYVIEW(dev_priv))
7632 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7633 else if (IS_GEN6(dev_priv))
7634 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7635 else if (IS_GEN5(dev_priv))
7636 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7637 else if (IS_G4X(dev_priv))
7638 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7639 else if (IS_CRESTLINE(dev_priv))
7640 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7641 else if (IS_BROADWATER(dev_priv))
7642 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7643 else if (IS_GEN3(dev_priv))
7644 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7645 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7646 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7647 else if (IS_GEN2(dev_priv))
7648 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7649 else {
7650 MISSING_CASE(INTEL_DEVID(dev_priv));
7651 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7652 }
7653}
7654
1fa61106
ED
7655/* Set up chip specific power management-related functions */
7656void intel_init_pm(struct drm_device *dev)
7657{
fac5e23e 7658 struct drm_i915_private *dev_priv = to_i915(dev);
1fa61106 7659
7ff0ebcc 7660 intel_fbc_init(dev_priv);
1fa61106 7661
c921aba8
DV
7662 /* For cxsr */
7663 if (IS_PINEVIEW(dev))
7664 i915_pineview_get_mem_freq(dev);
7665 else if (IS_GEN5(dev))
7666 i915_ironlake_get_mem_freq(dev);
7667
1fa61106 7668 /* For FIFO watermark updates */
f5ed50cb 7669 if (INTEL_INFO(dev)->gen >= 9) {
2af30a5c 7670 skl_setup_wm_latency(dev);
2d41c0b5 7671 dev_priv->display.update_wm = skl_update_wm;
98d39494 7672 dev_priv->display.compute_global_watermarks = skl_compute_wm;
c83155a6 7673 } else if (HAS_PCH_SPLIT(dev)) {
fa50ad61 7674 ilk_setup_wm_latency(dev);
53615a5e 7675
bd602544
VS
7676 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7677 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7678 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7679 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
86c8bbbe 7680 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
ed4a6a7c
MR
7681 dev_priv->display.compute_intermediate_wm =
7682 ilk_compute_intermediate_wm;
7683 dev_priv->display.initial_watermarks =
7684 ilk_initial_watermarks;
7685 dev_priv->display.optimize_watermarks =
7686 ilk_optimize_watermarks;
bd602544
VS
7687 } else {
7688 DRM_DEBUG_KMS("Failed to read display plane latency. "
7689 "Disable CxSR\n");
7690 }
a4565da8 7691 } else if (IS_CHERRYVIEW(dev)) {
262cd2e1 7692 vlv_setup_wm_latency(dev);
262cd2e1 7693 dev_priv->display.update_wm = vlv_update_wm;
1fa61106 7694 } else if (IS_VALLEYVIEW(dev)) {
26e1fe4f 7695 vlv_setup_wm_latency(dev);
26e1fe4f 7696 dev_priv->display.update_wm = vlv_update_wm;
1fa61106
ED
7697 } else if (IS_PINEVIEW(dev)) {
7698 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7699 dev_priv->is_ddr3,
7700 dev_priv->fsb_freq,
7701 dev_priv->mem_freq)) {
7702 DRM_INFO("failed to find known CxSR latency "
7703 "(found ddr%s fsb freq %d, mem freq %d), "
7704 "disabling CxSR\n",
7705 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7706 dev_priv->fsb_freq, dev_priv->mem_freq);
7707 /* Disable CxSR and never update its watermark again */
5209b1f4 7708 intel_set_memory_cxsr(dev_priv, false);
1fa61106
ED
7709 dev_priv->display.update_wm = NULL;
7710 } else
7711 dev_priv->display.update_wm = pineview_update_wm;
1fa61106
ED
7712 } else if (IS_G4X(dev)) {
7713 dev_priv->display.update_wm = g4x_update_wm;
1fa61106
ED
7714 } else if (IS_GEN4(dev)) {
7715 dev_priv->display.update_wm = i965_update_wm;
1fa61106
ED
7716 } else if (IS_GEN3(dev)) {
7717 dev_priv->display.update_wm = i9xx_update_wm;
7718 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
feb56b93
DV
7719 } else if (IS_GEN2(dev)) {
7720 if (INTEL_INFO(dev)->num_pipes == 1) {
7721 dev_priv->display.update_wm = i845_update_wm;
1fa61106 7722 dev_priv->display.get_fifo_size = i845_get_fifo_size;
feb56b93
DV
7723 } else {
7724 dev_priv->display.update_wm = i9xx_update_wm;
1fa61106 7725 dev_priv->display.get_fifo_size = i830_get_fifo_size;
feb56b93 7726 }
feb56b93
DV
7727 } else {
7728 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
1fa61106
ED
7729 }
7730}
7731
87660502
L
7732static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
7733{
7734 uint32_t flags =
7735 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7736
7737 switch (flags) {
7738 case GEN6_PCODE_SUCCESS:
7739 return 0;
7740 case GEN6_PCODE_UNIMPLEMENTED_CMD:
7741 case GEN6_PCODE_ILLEGAL_CMD:
7742 return -ENXIO;
7743 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7850d1c3 7744 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
87660502
L
7745 return -EOVERFLOW;
7746 case GEN6_PCODE_TIMEOUT:
7747 return -ETIMEDOUT;
7748 default:
7749 MISSING_CASE(flags)
7750 return 0;
7751 }
7752}
7753
7754static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
7755{
7756 uint32_t flags =
7757 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7758
7759 switch (flags) {
7760 case GEN6_PCODE_SUCCESS:
7761 return 0;
7762 case GEN6_PCODE_ILLEGAL_CMD:
7763 return -ENXIO;
7764 case GEN7_PCODE_TIMEOUT:
7765 return -ETIMEDOUT;
7766 case GEN7_PCODE_ILLEGAL_DATA:
7767 return -EINVAL;
7768 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7769 return -EOVERFLOW;
7770 default:
7771 MISSING_CASE(flags);
7772 return 0;
7773 }
7774}
7775
151a49d0 7776int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
42c0526c 7777{
87660502
L
7778 int status;
7779
4fc688ce 7780 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
42c0526c 7781
3f5582dd
CW
7782 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7783 * use te fw I915_READ variants to reduce the amount of work
7784 * required when reading/writing.
7785 */
7786
7787 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
42c0526c
BW
7788 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7789 return -EAGAIN;
7790 }
7791
3f5582dd
CW
7792 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7793 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7794 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
42c0526c 7795
3f5582dd
CW
7796 if (intel_wait_for_register_fw(dev_priv,
7797 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7798 500)) {
42c0526c
BW
7799 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7800 return -ETIMEDOUT;
7801 }
7802
3f5582dd
CW
7803 *val = I915_READ_FW(GEN6_PCODE_DATA);
7804 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
42c0526c 7805
87660502
L
7806 if (INTEL_GEN(dev_priv) > 6)
7807 status = gen7_check_mailbox_status(dev_priv);
7808 else
7809 status = gen6_check_mailbox_status(dev_priv);
7810
7811 if (status) {
7812 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7813 status);
7814 return status;
7815 }
7816
42c0526c
BW
7817 return 0;
7818}
7819
3f5582dd 7820int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
87660502 7821 u32 mbox, u32 val)
42c0526c 7822{
87660502
L
7823 int status;
7824
4fc688ce 7825 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
42c0526c 7826
3f5582dd
CW
7827 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7828 * use te fw I915_READ variants to reduce the amount of work
7829 * required when reading/writing.
7830 */
7831
7832 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
42c0526c
BW
7833 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7834 return -EAGAIN;
7835 }
7836
3f5582dd
CW
7837 I915_WRITE_FW(GEN6_PCODE_DATA, val);
7838 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
42c0526c 7839
3f5582dd
CW
7840 if (intel_wait_for_register_fw(dev_priv,
7841 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7842 500)) {
42c0526c
BW
7843 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7844 return -ETIMEDOUT;
7845 }
7846
3f5582dd 7847 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
42c0526c 7848
87660502
L
7849 if (INTEL_GEN(dev_priv) > 6)
7850 status = gen7_check_mailbox_status(dev_priv);
7851 else
7852 status = gen6_check_mailbox_status(dev_priv);
7853
7854 if (status) {
7855 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7856 status);
7857 return status;
7858 }
7859
42c0526c
BW
7860 return 0;
7861}
a0e4e199 7862
dd06f88c
VS
7863static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7864{
c30fec65
VS
7865 /*
7866 * N = val - 0xb7
7867 * Slow = Fast = GPLL ref * N
7868 */
7869 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
855ba3be
JB
7870}
7871
b55dd647 7872static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
855ba3be 7873{
c30fec65 7874 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
855ba3be
JB
7875}
7876
b55dd647 7877static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7878{
c30fec65
VS
7879 /*
7880 * N = val / 2
7881 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7882 */
7883 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
22b1b2f8
D
7884}
7885
b55dd647 7886static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7887{
1c14762d 7888 /* CHV needs even values */
c30fec65 7889 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
22b1b2f8
D
7890}
7891
616bc820 7892int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7893{
2d1fe073 7894 if (IS_GEN9(dev_priv))
500a3d2e
MK
7895 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7896 GEN9_FREQ_SCALER);
2d1fe073 7897 else if (IS_CHERRYVIEW(dev_priv))
616bc820 7898 return chv_gpu_freq(dev_priv, val);
2d1fe073 7899 else if (IS_VALLEYVIEW(dev_priv))
616bc820
VS
7900 return byt_gpu_freq(dev_priv, val);
7901 else
7902 return val * GT_FREQUENCY_MULTIPLIER;
22b1b2f8
D
7903}
7904
616bc820
VS
7905int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7906{
2d1fe073 7907 if (IS_GEN9(dev_priv))
500a3d2e
MK
7908 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7909 GT_FREQUENCY_MULTIPLIER);
2d1fe073 7910 else if (IS_CHERRYVIEW(dev_priv))
616bc820 7911 return chv_freq_opcode(dev_priv, val);
2d1fe073 7912 else if (IS_VALLEYVIEW(dev_priv))
616bc820
VS
7913 return byt_freq_opcode(dev_priv, val);
7914 else
500a3d2e 7915 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
616bc820 7916}
22b1b2f8 7917
6ad790c0
CW
7918struct request_boost {
7919 struct work_struct work;
eed29a5b 7920 struct drm_i915_gem_request *req;
6ad790c0
CW
7921};
7922
7923static void __intel_rps_boost_work(struct work_struct *work)
7924{
7925 struct request_boost *boost = container_of(work, struct request_boost, work);
e61b9958 7926 struct drm_i915_gem_request *req = boost->req;
6ad790c0 7927
f69a02c9 7928 if (!i915_gem_request_completed(req))
c033666a 7929 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
6ad790c0 7930
e8a261ea 7931 i915_gem_request_put(req);
6ad790c0
CW
7932 kfree(boost);
7933}
7934
91d14251 7935void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
6ad790c0
CW
7936{
7937 struct request_boost *boost;
7938
91d14251 7939 if (req == NULL || INTEL_GEN(req->i915) < 6)
6ad790c0
CW
7940 return;
7941
f69a02c9 7942 if (i915_gem_request_completed(req))
e61b9958
CW
7943 return;
7944
6ad790c0
CW
7945 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7946 if (boost == NULL)
7947 return;
7948
e8a261ea 7949 boost->req = i915_gem_request_get(req);
6ad790c0
CW
7950
7951 INIT_WORK(&boost->work, __intel_rps_boost_work);
91d14251 7952 queue_work(req->i915->wq, &boost->work);
6ad790c0
CW
7953}
7954
f742a552 7955void intel_pm_setup(struct drm_device *dev)
907b28c5 7956{
fac5e23e 7957 struct drm_i915_private *dev_priv = to_i915(dev);
907b28c5 7958
f742a552 7959 mutex_init(&dev_priv->rps.hw_lock);
8d3afd7d 7960 spin_lock_init(&dev_priv->rps.client_lock);
f742a552 7961
54b4f68f
CW
7962 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
7963 __intel_autoenable_gt_powersave);
1854d5ca 7964 INIT_LIST_HEAD(&dev_priv->rps.clients);
5d584b2e 7965
33688d95 7966 dev_priv->pm.suspended = false;
1f814dac 7967 atomic_set(&dev_priv->pm.wakeref_count, 0);
2b19efeb 7968 atomic_set(&dev_priv->pm.atomic_seq, 0);
907b28c5 7969}