]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/i915/intel_pm.c
drm/i915: WaRsDisableCoarsePowerGating
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / i915 / intel_pm.c
CommitLineData
85208be0
ED
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
2b4e57bd 28#include <linux/cpufreq.h>
85208be0
ED
29#include "i915_drv.h"
30#include "intel_drv.h"
eb48eb00
DV
31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
85208be0 33
dc39fff7
BW
34/**
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
39 *
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
43 *
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
50 */
51#define INTEL_RC6_ENABLE (1<<0)
52#define INTEL_RC6p_ENABLE (1<<1)
53#define INTEL_RC6pp_ENABLE (1<<2)
54
da2078cd
DL
55static void gen9_init_clock_gating(struct drm_device *dev)
56{
acd5c346
DL
57 struct drm_i915_private *dev_priv = dev->dev_private;
58
77719d28
DL
59 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
6381b550
NH
62
63 /* WaDisableKillLogic:bxt,skl */
64 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
65 ECOCHK_DIS_TLB);
77719d28 66}
91e41d16 67
45db2194 68static void skl_init_clock_gating(struct drm_device *dev)
da2078cd 69{
acd5c346 70 struct drm_i915_private *dev_priv = dev->dev_private;
3ca5da43 71
77719d28
DL
72 gen9_init_clock_gating(dev);
73
669506e7 74 if (INTEL_REVID(dev) <= SKL_REVID_B0) {
3dcd020a
HN
75 /*
76 * WaDisableSDEUnitClockGating:skl
9253c2e5 77 * WaSetGAPSunitClckGateDisable:skl
3dcd020a
HN
78 */
79 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
9253c2e5 80 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
3dcd020a 81 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
f9fc42f4
DL
82
83 /* WaDisableVFUnitClockGating:skl */
84 I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) |
85 GEN6_VFUNIT_CLOCK_GATE_DISABLE);
3dcd020a 86 }
8bc0ccf6 87
2caa3b26 88 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
81e231af
DL
89 /* WaDisableHDCInvalidation:skl */
90 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
91 BDW_DISABLE_HDC_INVALIDATION);
92
2caa3b26
DL
93 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
94 I915_WRITE(FF_SLICE_CS_CHICKEN2,
f1d3d34d 95 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
2caa3b26 96 }
81e231af 97
a4106a78
AS
98 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
99 * involving this register should also be added to WA batch as required.
100 */
8bc0ccf6
DL
101 if (INTEL_REVID(dev) <= SKL_REVID_E0)
102 /* WaDisableLSQCROPERFforOCL:skl */
103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
104 GEN8_LQSC_RO_PERF_DIS);
245d9667
AS
105
106 /* WaEnableGapsTsvCreditFix:skl */
107 if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
108 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
109 GEN9_GAPS_TSV_CREDIT_DISABLE));
110 }
da2078cd
DL
111}
112
a82abe43
ID
113static void bxt_init_clock_gating(struct drm_device *dev)
114{
32608ca2
ID
115 struct drm_i915_private *dev_priv = dev->dev_private;
116
a82abe43 117 gen9_init_clock_gating(dev);
32608ca2 118
a7546159
NH
119 /* WaDisableSDEUnitClockGating:bxt */
120 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
121 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
122
32608ca2
ID
123 /*
124 * FIXME:
868434c5 125 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
32608ca2 126 */
32608ca2 127 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
868434c5 128 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
32608ca2 129
a7546159
NH
130 if (INTEL_REVID(dev) == BXT_REVID_A0) {
131 /*
132 * Hardware specification requires this bit to be
133 * set to 1 for A0
134 */
135 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
136 }
5b88abac
AS
137
138 /* WaSetClckGatingDisableMedia:bxt */
139 if (INTEL_REVID(dev) == BXT_REVID_A0) {
140 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
141 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
142 }
a82abe43
ID
143}
144
c921aba8
DV
145static void i915_pineview_get_mem_freq(struct drm_device *dev)
146{
50227e1c 147 struct drm_i915_private *dev_priv = dev->dev_private;
c921aba8
DV
148 u32 tmp;
149
150 tmp = I915_READ(CLKCFG);
151
152 switch (tmp & CLKCFG_FSB_MASK) {
153 case CLKCFG_FSB_533:
154 dev_priv->fsb_freq = 533; /* 133*4 */
155 break;
156 case CLKCFG_FSB_800:
157 dev_priv->fsb_freq = 800; /* 200*4 */
158 break;
159 case CLKCFG_FSB_667:
160 dev_priv->fsb_freq = 667; /* 167*4 */
161 break;
162 case CLKCFG_FSB_400:
163 dev_priv->fsb_freq = 400; /* 100*4 */
164 break;
165 }
166
167 switch (tmp & CLKCFG_MEM_MASK) {
168 case CLKCFG_MEM_533:
169 dev_priv->mem_freq = 533;
170 break;
171 case CLKCFG_MEM_667:
172 dev_priv->mem_freq = 667;
173 break;
174 case CLKCFG_MEM_800:
175 dev_priv->mem_freq = 800;
176 break;
177 }
178
179 /* detect pineview DDR3 setting */
180 tmp = I915_READ(CSHRDDR3CTL);
181 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
182}
183
184static void i915_ironlake_get_mem_freq(struct drm_device *dev)
185{
50227e1c 186 struct drm_i915_private *dev_priv = dev->dev_private;
c921aba8
DV
187 u16 ddrpll, csipll;
188
189 ddrpll = I915_READ16(DDRMPLL1);
190 csipll = I915_READ16(CSIPLL0);
191
192 switch (ddrpll & 0xff) {
193 case 0xc:
194 dev_priv->mem_freq = 800;
195 break;
196 case 0x10:
197 dev_priv->mem_freq = 1066;
198 break;
199 case 0x14:
200 dev_priv->mem_freq = 1333;
201 break;
202 case 0x18:
203 dev_priv->mem_freq = 1600;
204 break;
205 default:
206 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
207 ddrpll & 0xff);
208 dev_priv->mem_freq = 0;
209 break;
210 }
211
20e4d407 212 dev_priv->ips.r_t = dev_priv->mem_freq;
c921aba8
DV
213
214 switch (csipll & 0x3ff) {
215 case 0x00c:
216 dev_priv->fsb_freq = 3200;
217 break;
218 case 0x00e:
219 dev_priv->fsb_freq = 3733;
220 break;
221 case 0x010:
222 dev_priv->fsb_freq = 4266;
223 break;
224 case 0x012:
225 dev_priv->fsb_freq = 4800;
226 break;
227 case 0x014:
228 dev_priv->fsb_freq = 5333;
229 break;
230 case 0x016:
231 dev_priv->fsb_freq = 5866;
232 break;
233 case 0x018:
234 dev_priv->fsb_freq = 6400;
235 break;
236 default:
237 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
238 csipll & 0x3ff);
239 dev_priv->fsb_freq = 0;
240 break;
241 }
242
243 if (dev_priv->fsb_freq == 3200) {
20e4d407 244 dev_priv->ips.c_m = 0;
c921aba8 245 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
20e4d407 246 dev_priv->ips.c_m = 1;
c921aba8 247 } else {
20e4d407 248 dev_priv->ips.c_m = 2;
c921aba8
DV
249 }
250}
251
b445e3b0
ED
252static const struct cxsr_latency cxsr_latency_table[] = {
253 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
254 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
255 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
256 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
257 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
258
259 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
260 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
261 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
262 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
263 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
264
265 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
266 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
267 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
268 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
269 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
270
271 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
272 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
273 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
274 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
275 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
276
277 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
278 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
279 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
280 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
281 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
282
283 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
284 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
285 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
286 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
287 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
288};
289
63c62275 290static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
b445e3b0
ED
291 int is_ddr3,
292 int fsb,
293 int mem)
294{
295 const struct cxsr_latency *latency;
296 int i;
297
298 if (fsb == 0 || mem == 0)
299 return NULL;
300
301 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
302 latency = &cxsr_latency_table[i];
303 if (is_desktop == latency->is_desktop &&
304 is_ddr3 == latency->is_ddr3 &&
305 fsb == latency->fsb_freq && mem == latency->mem_freq)
306 return latency;
307 }
308
309 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
310
311 return NULL;
312}
313
fc1ac8de
VS
314static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
315{
316 u32 val;
317
318 mutex_lock(&dev_priv->rps.hw_lock);
319
320 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
321 if (enable)
322 val &= ~FORCE_DDR_HIGH_FREQ;
323 else
324 val |= FORCE_DDR_HIGH_FREQ;
325 val &= ~FORCE_DDR_LOW_FREQ;
326 val |= FORCE_DDR_FREQ_REQ_ACK;
327 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
328
329 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
330 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
331 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
332
333 mutex_unlock(&dev_priv->rps.hw_lock);
334}
335
cfb41411
VS
336static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
337{
338 u32 val;
339
340 mutex_lock(&dev_priv->rps.hw_lock);
341
342 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
343 if (enable)
344 val |= DSP_MAXFIFO_PM5_ENABLE;
345 else
346 val &= ~DSP_MAXFIFO_PM5_ENABLE;
347 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
348
349 mutex_unlock(&dev_priv->rps.hw_lock);
350}
351
f4998963
VS
352#define FW_WM(value, plane) \
353 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
354
5209b1f4 355void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
b445e3b0 356{
5209b1f4
ID
357 struct drm_device *dev = dev_priv->dev;
358 u32 val;
b445e3b0 359
5209b1f4
ID
360 if (IS_VALLEYVIEW(dev)) {
361 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
a7a6c498 362 POSTING_READ(FW_BLC_SELF_VLV);
852eb00d 363 dev_priv->wm.vlv.cxsr = enable;
5209b1f4
ID
364 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
365 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
a7a6c498 366 POSTING_READ(FW_BLC_SELF);
5209b1f4
ID
367 } else if (IS_PINEVIEW(dev)) {
368 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
369 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
370 I915_WRITE(DSPFW3, val);
a7a6c498 371 POSTING_READ(DSPFW3);
5209b1f4
ID
372 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
373 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
374 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
375 I915_WRITE(FW_BLC_SELF, val);
a7a6c498 376 POSTING_READ(FW_BLC_SELF);
5209b1f4
ID
377 } else if (IS_I915GM(dev)) {
378 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
379 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
380 I915_WRITE(INSTPM, val);
a7a6c498 381 POSTING_READ(INSTPM);
5209b1f4
ID
382 } else {
383 return;
384 }
b445e3b0 385
5209b1f4
ID
386 DRM_DEBUG_KMS("memory self-refresh is %s\n",
387 enable ? "enabled" : "disabled");
b445e3b0
ED
388}
389
fc1ac8de 390
b445e3b0
ED
391/*
392 * Latency for FIFO fetches is dependent on several factors:
393 * - memory configuration (speed, channels)
394 * - chipset
395 * - current MCH state
396 * It can be fairly high in some situations, so here we assume a fairly
397 * pessimal value. It's a tradeoff between extra memory fetches (if we
398 * set this value too high, the FIFO will fetch frequently to stay full)
399 * and power consumption (set it too low to save power and we might see
400 * FIFO underruns and display "flicker").
401 *
402 * A value of 5us seems to be a good balance; safe for very low end
403 * platforms but not overly aggressive on lower latency configs.
404 */
5aef6003 405static const int pessimal_latency_ns = 5000;
b445e3b0 406
b5004720
VS
407#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
408 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
409
410static int vlv_get_fifo_size(struct drm_device *dev,
411 enum pipe pipe, int plane)
412{
413 struct drm_i915_private *dev_priv = dev->dev_private;
414 int sprite0_start, sprite1_start, size;
415
416 switch (pipe) {
417 uint32_t dsparb, dsparb2, dsparb3;
418 case PIPE_A:
419 dsparb = I915_READ(DSPARB);
420 dsparb2 = I915_READ(DSPARB2);
421 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
422 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
423 break;
424 case PIPE_B:
425 dsparb = I915_READ(DSPARB);
426 dsparb2 = I915_READ(DSPARB2);
427 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
428 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
429 break;
430 case PIPE_C:
431 dsparb2 = I915_READ(DSPARB2);
432 dsparb3 = I915_READ(DSPARB3);
433 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
434 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
435 break;
436 default:
437 return 0;
438 }
439
440 switch (plane) {
441 case 0:
442 size = sprite0_start;
443 break;
444 case 1:
445 size = sprite1_start - sprite0_start;
446 break;
447 case 2:
448 size = 512 - 1 - sprite1_start;
449 break;
450 default:
451 return 0;
452 }
453
454 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
455 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
456 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
457 size);
458
459 return size;
460}
461
1fa61106 462static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
463{
464 struct drm_i915_private *dev_priv = dev->dev_private;
465 uint32_t dsparb = I915_READ(DSPARB);
466 int size;
467
468 size = dsparb & 0x7f;
469 if (plane)
470 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
471
472 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
473 plane ? "B" : "A", size);
474
475 return size;
476}
477
feb56b93 478static int i830_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
479{
480 struct drm_i915_private *dev_priv = dev->dev_private;
481 uint32_t dsparb = I915_READ(DSPARB);
482 int size;
483
484 size = dsparb & 0x1ff;
485 if (plane)
486 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
487 size >>= 1; /* Convert to cachelines */
488
489 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
490 plane ? "B" : "A", size);
491
492 return size;
493}
494
1fa61106 495static int i845_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
496{
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 uint32_t dsparb = I915_READ(DSPARB);
499 int size;
500
501 size = dsparb & 0x7f;
502 size >>= 2; /* Convert to cachelines */
503
504 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
505 plane ? "B" : "A",
506 size);
507
508 return size;
509}
510
b445e3b0
ED
511/* Pineview has different values for various configs */
512static const struct intel_watermark_params pineview_display_wm = {
e0f0273e
VS
513 .fifo_size = PINEVIEW_DISPLAY_FIFO,
514 .max_wm = PINEVIEW_MAX_WM,
515 .default_wm = PINEVIEW_DFT_WM,
516 .guard_size = PINEVIEW_GUARD_WM,
517 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
518};
519static const struct intel_watermark_params pineview_display_hplloff_wm = {
e0f0273e
VS
520 .fifo_size = PINEVIEW_DISPLAY_FIFO,
521 .max_wm = PINEVIEW_MAX_WM,
522 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
523 .guard_size = PINEVIEW_GUARD_WM,
524 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
525};
526static const struct intel_watermark_params pineview_cursor_wm = {
e0f0273e
VS
527 .fifo_size = PINEVIEW_CURSOR_FIFO,
528 .max_wm = PINEVIEW_CURSOR_MAX_WM,
529 .default_wm = PINEVIEW_CURSOR_DFT_WM,
530 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
531 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
532};
533static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
e0f0273e
VS
534 .fifo_size = PINEVIEW_CURSOR_FIFO,
535 .max_wm = PINEVIEW_CURSOR_MAX_WM,
536 .default_wm = PINEVIEW_CURSOR_DFT_WM,
537 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
538 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
b445e3b0
ED
539};
540static const struct intel_watermark_params g4x_wm_info = {
e0f0273e
VS
541 .fifo_size = G4X_FIFO_SIZE,
542 .max_wm = G4X_MAX_WM,
543 .default_wm = G4X_MAX_WM,
544 .guard_size = 2,
545 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
546};
547static const struct intel_watermark_params g4x_cursor_wm_info = {
e0f0273e
VS
548 .fifo_size = I965_CURSOR_FIFO,
549 .max_wm = I965_CURSOR_MAX_WM,
550 .default_wm = I965_CURSOR_DFT_WM,
551 .guard_size = 2,
552 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
553};
554static const struct intel_watermark_params valleyview_wm_info = {
e0f0273e
VS
555 .fifo_size = VALLEYVIEW_FIFO_SIZE,
556 .max_wm = VALLEYVIEW_MAX_WM,
557 .default_wm = VALLEYVIEW_MAX_WM,
558 .guard_size = 2,
559 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
560};
561static const struct intel_watermark_params valleyview_cursor_wm_info = {
e0f0273e
VS
562 .fifo_size = I965_CURSOR_FIFO,
563 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
564 .default_wm = I965_CURSOR_DFT_WM,
565 .guard_size = 2,
566 .cacheline_size = G4X_FIFO_LINE_SIZE,
b445e3b0
ED
567};
568static const struct intel_watermark_params i965_cursor_wm_info = {
e0f0273e
VS
569 .fifo_size = I965_CURSOR_FIFO,
570 .max_wm = I965_CURSOR_MAX_WM,
571 .default_wm = I965_CURSOR_DFT_WM,
572 .guard_size = 2,
573 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0
ED
574};
575static const struct intel_watermark_params i945_wm_info = {
e0f0273e
VS
576 .fifo_size = I945_FIFO_SIZE,
577 .max_wm = I915_MAX_WM,
578 .default_wm = 1,
579 .guard_size = 2,
580 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0
ED
581};
582static const struct intel_watermark_params i915_wm_info = {
e0f0273e
VS
583 .fifo_size = I915_FIFO_SIZE,
584 .max_wm = I915_MAX_WM,
585 .default_wm = 1,
586 .guard_size = 2,
587 .cacheline_size = I915_FIFO_LINE_SIZE,
b445e3b0 588};
9d539105 589static const struct intel_watermark_params i830_a_wm_info = {
e0f0273e
VS
590 .fifo_size = I855GM_FIFO_SIZE,
591 .max_wm = I915_MAX_WM,
592 .default_wm = 1,
593 .guard_size = 2,
594 .cacheline_size = I830_FIFO_LINE_SIZE,
b445e3b0 595};
9d539105
VS
596static const struct intel_watermark_params i830_bc_wm_info = {
597 .fifo_size = I855GM_FIFO_SIZE,
598 .max_wm = I915_MAX_WM/2,
599 .default_wm = 1,
600 .guard_size = 2,
601 .cacheline_size = I830_FIFO_LINE_SIZE,
602};
feb56b93 603static const struct intel_watermark_params i845_wm_info = {
e0f0273e
VS
604 .fifo_size = I830_FIFO_SIZE,
605 .max_wm = I915_MAX_WM,
606 .default_wm = 1,
607 .guard_size = 2,
608 .cacheline_size = I830_FIFO_LINE_SIZE,
b445e3b0
ED
609};
610
b445e3b0
ED
611/**
612 * intel_calculate_wm - calculate watermark level
613 * @clock_in_khz: pixel clock
614 * @wm: chip FIFO params
615 * @pixel_size: display pixel size
616 * @latency_ns: memory latency for the platform
617 *
618 * Calculate the watermark level (the level at which the display plane will
619 * start fetching from memory again). Each chip has a different display
620 * FIFO size and allocation, so the caller needs to figure that out and pass
621 * in the correct intel_watermark_params structure.
622 *
623 * As the pixel clock runs, the FIFO will be drained at a rate that depends
624 * on the pixel size. When it reaches the watermark level, it'll start
625 * fetching FIFO line sized based chunks from memory until the FIFO fills
626 * past the watermark point. If the FIFO drains completely, a FIFO underrun
627 * will occur, and a display engine hang could result.
628 */
629static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
630 const struct intel_watermark_params *wm,
631 int fifo_size,
632 int pixel_size,
633 unsigned long latency_ns)
634{
635 long entries_required, wm_size;
636
637 /*
638 * Note: we need to make sure we don't overflow for various clock &
639 * latency values.
640 * clocks go from a few thousand to several hundred thousand.
641 * latency is usually a few thousand
642 */
643 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
644 1000;
645 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
646
647 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
648
649 wm_size = fifo_size - (entries_required + wm->guard_size);
650
651 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
652
653 /* Don't promote wm_size to unsigned... */
654 if (wm_size > (long)wm->max_wm)
655 wm_size = wm->max_wm;
656 if (wm_size <= 0)
657 wm_size = wm->default_wm;
d6feb196
VS
658
659 /*
660 * Bspec seems to indicate that the value shouldn't be lower than
661 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
662 * Lets go for 8 which is the burst size since certain platforms
663 * already use a hardcoded 8 (which is what the spec says should be
664 * done).
665 */
666 if (wm_size <= 8)
667 wm_size = 8;
668
b445e3b0
ED
669 return wm_size;
670}
671
672static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
673{
674 struct drm_crtc *crtc, *enabled = NULL;
675
70e1e0ec 676 for_each_crtc(dev, crtc) {
3490ea5d 677 if (intel_crtc_active(crtc)) {
b445e3b0
ED
678 if (enabled)
679 return NULL;
680 enabled = crtc;
681 }
682 }
683
684 return enabled;
685}
686
46ba614c 687static void pineview_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 688{
46ba614c 689 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
690 struct drm_i915_private *dev_priv = dev->dev_private;
691 struct drm_crtc *crtc;
692 const struct cxsr_latency *latency;
693 u32 reg;
694 unsigned long wm;
695
696 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
697 dev_priv->fsb_freq, dev_priv->mem_freq);
698 if (!latency) {
699 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
5209b1f4 700 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
701 return;
702 }
703
704 crtc = single_enabled_crtc(dev);
705 if (crtc) {
241bfc38 706 const struct drm_display_mode *adjusted_mode;
59bea882 707 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
241bfc38
DL
708 int clock;
709
6e3c9717 710 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 711 clock = adjusted_mode->crtc_clock;
b445e3b0
ED
712
713 /* Display SR */
714 wm = intel_calculate_wm(clock, &pineview_display_wm,
715 pineview_display_wm.fifo_size,
716 pixel_size, latency->display_sr);
717 reg = I915_READ(DSPFW1);
718 reg &= ~DSPFW_SR_MASK;
f4998963 719 reg |= FW_WM(wm, SR);
b445e3b0
ED
720 I915_WRITE(DSPFW1, reg);
721 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
722
723 /* cursor SR */
724 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
725 pineview_display_wm.fifo_size,
726 pixel_size, latency->cursor_sr);
727 reg = I915_READ(DSPFW3);
728 reg &= ~DSPFW_CURSOR_SR_MASK;
f4998963 729 reg |= FW_WM(wm, CURSOR_SR);
b445e3b0
ED
730 I915_WRITE(DSPFW3, reg);
731
732 /* Display HPLL off SR */
733 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
734 pineview_display_hplloff_wm.fifo_size,
735 pixel_size, latency->display_hpll_disable);
736 reg = I915_READ(DSPFW3);
737 reg &= ~DSPFW_HPLL_SR_MASK;
f4998963 738 reg |= FW_WM(wm, HPLL_SR);
b445e3b0
ED
739 I915_WRITE(DSPFW3, reg);
740
741 /* cursor HPLL off SR */
742 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
743 pineview_display_hplloff_wm.fifo_size,
744 pixel_size, latency->cursor_hpll_disable);
745 reg = I915_READ(DSPFW3);
746 reg &= ~DSPFW_HPLL_CURSOR_MASK;
f4998963 747 reg |= FW_WM(wm, HPLL_CURSOR);
b445e3b0
ED
748 I915_WRITE(DSPFW3, reg);
749 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
750
5209b1f4 751 intel_set_memory_cxsr(dev_priv, true);
b445e3b0 752 } else {
5209b1f4 753 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
754 }
755}
756
757static bool g4x_compute_wm0(struct drm_device *dev,
758 int plane,
759 const struct intel_watermark_params *display,
760 int display_latency_ns,
761 const struct intel_watermark_params *cursor,
762 int cursor_latency_ns,
763 int *plane_wm,
764 int *cursor_wm)
765{
766 struct drm_crtc *crtc;
4fe8590a 767 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
768 int htotal, hdisplay, clock, pixel_size;
769 int line_time_us, line_count;
770 int entries, tlb_miss;
771
772 crtc = intel_get_crtc_for_plane(dev, plane);
3490ea5d 773 if (!intel_crtc_active(crtc)) {
b445e3b0
ED
774 *cursor_wm = cursor->guard_size;
775 *plane_wm = display->guard_size;
776 return false;
777 }
778
6e3c9717 779 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 780 clock = adjusted_mode->crtc_clock;
fec8cba3 781 htotal = adjusted_mode->crtc_htotal;
6e3c9717 782 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
59bea882 783 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
b445e3b0
ED
784
785 /* Use the small buffer method to calculate plane watermark */
786 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
787 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
788 if (tlb_miss > 0)
789 entries += tlb_miss;
790 entries = DIV_ROUND_UP(entries, display->cacheline_size);
791 *plane_wm = entries + display->guard_size;
792 if (*plane_wm > (int)display->max_wm)
793 *plane_wm = display->max_wm;
794
795 /* Use the large buffer method to calculate cursor watermark */
922044c9 796 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0 797 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3dd512fb 798 entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
b445e3b0
ED
799 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
800 if (tlb_miss > 0)
801 entries += tlb_miss;
802 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
803 *cursor_wm = entries + cursor->guard_size;
804 if (*cursor_wm > (int)cursor->max_wm)
805 *cursor_wm = (int)cursor->max_wm;
806
807 return true;
808}
809
810/*
811 * Check the wm result.
812 *
813 * If any calculated watermark values is larger than the maximum value that
814 * can be programmed into the associated watermark register, that watermark
815 * must be disabled.
816 */
817static bool g4x_check_srwm(struct drm_device *dev,
818 int display_wm, int cursor_wm,
819 const struct intel_watermark_params *display,
820 const struct intel_watermark_params *cursor)
821{
822 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
823 display_wm, cursor_wm);
824
825 if (display_wm > display->max_wm) {
826 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
827 display_wm, display->max_wm);
828 return false;
829 }
830
831 if (cursor_wm > cursor->max_wm) {
832 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
833 cursor_wm, cursor->max_wm);
834 return false;
835 }
836
837 if (!(display_wm || cursor_wm)) {
838 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
839 return false;
840 }
841
842 return true;
843}
844
845static bool g4x_compute_srwm(struct drm_device *dev,
846 int plane,
847 int latency_ns,
848 const struct intel_watermark_params *display,
849 const struct intel_watermark_params *cursor,
850 int *display_wm, int *cursor_wm)
851{
852 struct drm_crtc *crtc;
4fe8590a 853 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
854 int hdisplay, htotal, pixel_size, clock;
855 unsigned long line_time_us;
856 int line_count, line_size;
857 int small, large;
858 int entries;
859
860 if (!latency_ns) {
861 *display_wm = *cursor_wm = 0;
862 return false;
863 }
864
865 crtc = intel_get_crtc_for_plane(dev, plane);
6e3c9717 866 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 867 clock = adjusted_mode->crtc_clock;
fec8cba3 868 htotal = adjusted_mode->crtc_htotal;
6e3c9717 869 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
59bea882 870 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
b445e3b0 871
922044c9 872 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0
ED
873 line_count = (latency_ns / line_time_us + 1000) / 1000;
874 line_size = hdisplay * pixel_size;
875
876 /* Use the minimum of the small and large buffer method for primary */
877 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
878 large = line_count * line_size;
879
880 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
881 *display_wm = entries + display->guard_size;
882
883 /* calculate the self-refresh watermark for display cursor */
3dd512fb 884 entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
b445e3b0
ED
885 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
886 *cursor_wm = entries + cursor->guard_size;
887
888 return g4x_check_srwm(dev,
889 *display_wm, *cursor_wm,
890 display, cursor);
891}
892
15665979
VS
893#define FW_WM_VLV(value, plane) \
894 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
895
0018fda1
VS
896static void vlv_write_wm_values(struct intel_crtc *crtc,
897 const struct vlv_wm_values *wm)
898{
899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
900 enum pipe pipe = crtc->pipe;
901
902 I915_WRITE(VLV_DDL(pipe),
903 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
904 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
905 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
906 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
907
ae80152d 908 I915_WRITE(DSPFW1,
15665979
VS
909 FW_WM(wm->sr.plane, SR) |
910 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
911 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
912 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
ae80152d 913 I915_WRITE(DSPFW2,
15665979
VS
914 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
915 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
916 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
ae80152d 917 I915_WRITE(DSPFW3,
15665979 918 FW_WM(wm->sr.cursor, CURSOR_SR));
ae80152d
VS
919
920 if (IS_CHERRYVIEW(dev_priv)) {
921 I915_WRITE(DSPFW7_CHV,
15665979
VS
922 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
923 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
ae80152d 924 I915_WRITE(DSPFW8_CHV,
15665979
VS
925 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
926 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
ae80152d 927 I915_WRITE(DSPFW9_CHV,
15665979
VS
928 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
929 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
ae80152d 930 I915_WRITE(DSPHOWM,
15665979
VS
931 FW_WM(wm->sr.plane >> 9, SR_HI) |
932 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
933 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
934 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
935 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
936 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
937 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
938 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
939 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
940 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
ae80152d
VS
941 } else {
942 I915_WRITE(DSPFW7,
15665979
VS
943 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
944 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
ae80152d 945 I915_WRITE(DSPHOWM,
15665979
VS
946 FW_WM(wm->sr.plane >> 9, SR_HI) |
947 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
948 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
949 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
950 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
951 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
952 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
ae80152d
VS
953 }
954
2cb389b7
VS
955 /* zero (unused) WM1 watermarks */
956 I915_WRITE(DSPFW4, 0);
957 I915_WRITE(DSPFW5, 0);
958 I915_WRITE(DSPFW6, 0);
959 I915_WRITE(DSPHOWM1, 0);
960
ae80152d 961 POSTING_READ(DSPFW1);
0018fda1
VS
962}
963
15665979
VS
964#undef FW_WM_VLV
965
6eb1a681
VS
966enum vlv_wm_level {
967 VLV_WM_LEVEL_PM2,
968 VLV_WM_LEVEL_PM5,
969 VLV_WM_LEVEL_DDR_DVFS,
970 CHV_WM_NUM_LEVELS,
971 VLV_WM_NUM_LEVELS = 1,
972};
973
262cd2e1
VS
974/* latency must be in 0.1us units. */
975static unsigned int vlv_wm_method2(unsigned int pixel_rate,
976 unsigned int pipe_htotal,
977 unsigned int horiz_pixels,
978 unsigned int bytes_per_pixel,
979 unsigned int latency)
980{
981 unsigned int ret;
982
983 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
984 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
985 ret = DIV_ROUND_UP(ret, 64);
986
987 return ret;
988}
989
990static void vlv_setup_wm_latency(struct drm_device *dev)
991{
992 struct drm_i915_private *dev_priv = dev->dev_private;
993
994 /* all latencies in usec */
995 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
996
997 if (IS_CHERRYVIEW(dev_priv)) {
998 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
999 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1000 }
1001}
1002
1003static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
1004 struct intel_crtc *crtc,
1005 const struct intel_plane_state *state,
1006 int level)
1007{
1008 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1009 int clock, htotal, pixel_size, width, wm;
1010
1011 if (dev_priv->wm.pri_latency[level] == 0)
1012 return USHRT_MAX;
1013
1014 if (!state->visible)
1015 return 0;
1016
1017 pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1018 clock = crtc->config->base.adjusted_mode.crtc_clock;
1019 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
1020 width = crtc->config->pipe_src_w;
1021 if (WARN_ON(htotal == 0))
1022 htotal = 1;
1023
1024 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1025 /*
1026 * FIXME the formula gives values that are
1027 * too big for the cursor FIFO, and hence we
1028 * would never be able to use cursors. For
1029 * now just hardcode the watermark.
1030 */
1031 wm = 63;
1032 } else {
1033 wm = vlv_wm_method2(clock, htotal, width, pixel_size,
1034 dev_priv->wm.pri_latency[level] * 10);
1035 }
1036
1037 return min_t(int, wm, USHRT_MAX);
1038}
1039
54f1b6e1
VS
1040static void vlv_compute_fifo(struct intel_crtc *crtc)
1041{
1042 struct drm_device *dev = crtc->base.dev;
1043 struct vlv_wm_state *wm_state = &crtc->wm_state;
1044 struct intel_plane *plane;
1045 unsigned int total_rate = 0;
1046 const int fifo_size = 512 - 1;
1047 int fifo_extra, fifo_left = fifo_size;
1048
1049 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1050 struct intel_plane_state *state =
1051 to_intel_plane_state(plane->base.state);
1052
1053 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1054 continue;
1055
1056 if (state->visible) {
1057 wm_state->num_active_planes++;
1058 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1059 }
1060 }
1061
1062 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1063 struct intel_plane_state *state =
1064 to_intel_plane_state(plane->base.state);
1065 unsigned int rate;
1066
1067 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1068 plane->wm.fifo_size = 63;
1069 continue;
1070 }
1071
1072 if (!state->visible) {
1073 plane->wm.fifo_size = 0;
1074 continue;
1075 }
1076
1077 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1078 plane->wm.fifo_size = fifo_size * rate / total_rate;
1079 fifo_left -= plane->wm.fifo_size;
1080 }
1081
1082 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1083
1084 /* spread the remainder evenly */
1085 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1086 int plane_extra;
1087
1088 if (fifo_left == 0)
1089 break;
1090
1091 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1092 continue;
1093
1094 /* give it all to the first plane if none are active */
1095 if (plane->wm.fifo_size == 0 &&
1096 wm_state->num_active_planes)
1097 continue;
1098
1099 plane_extra = min(fifo_extra, fifo_left);
1100 plane->wm.fifo_size += plane_extra;
1101 fifo_left -= plane_extra;
1102 }
1103
1104 WARN_ON(fifo_left != 0);
1105}
1106
262cd2e1
VS
1107static void vlv_invert_wms(struct intel_crtc *crtc)
1108{
1109 struct vlv_wm_state *wm_state = &crtc->wm_state;
1110 int level;
1111
1112 for (level = 0; level < wm_state->num_levels; level++) {
1113 struct drm_device *dev = crtc->base.dev;
1114 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1115 struct intel_plane *plane;
1116
1117 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1118 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1119
1120 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1121 switch (plane->base.type) {
1122 int sprite;
1123 case DRM_PLANE_TYPE_CURSOR:
1124 wm_state->wm[level].cursor = plane->wm.fifo_size -
1125 wm_state->wm[level].cursor;
1126 break;
1127 case DRM_PLANE_TYPE_PRIMARY:
1128 wm_state->wm[level].primary = plane->wm.fifo_size -
1129 wm_state->wm[level].primary;
1130 break;
1131 case DRM_PLANE_TYPE_OVERLAY:
1132 sprite = plane->plane;
1133 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1134 wm_state->wm[level].sprite[sprite];
1135 break;
1136 }
1137 }
1138 }
1139}
1140
26e1fe4f 1141static void vlv_compute_wm(struct intel_crtc *crtc)
262cd2e1
VS
1142{
1143 struct drm_device *dev = crtc->base.dev;
1144 struct vlv_wm_state *wm_state = &crtc->wm_state;
1145 struct intel_plane *plane;
1146 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1147 int level;
1148
1149 memset(wm_state, 0, sizeof(*wm_state));
1150
852eb00d 1151 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
262cd2e1
VS
1152 if (IS_CHERRYVIEW(dev))
1153 wm_state->num_levels = CHV_WM_NUM_LEVELS;
1154 else
1155 wm_state->num_levels = VLV_WM_NUM_LEVELS;
1156
1157 wm_state->num_active_planes = 0;
262cd2e1 1158
54f1b6e1 1159 vlv_compute_fifo(crtc);
262cd2e1
VS
1160
1161 if (wm_state->num_active_planes != 1)
1162 wm_state->cxsr = false;
1163
1164 if (wm_state->cxsr) {
1165 for (level = 0; level < wm_state->num_levels; level++) {
1166 wm_state->sr[level].plane = sr_fifo_size;
1167 wm_state->sr[level].cursor = 63;
1168 }
1169 }
1170
1171 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1172 struct intel_plane_state *state =
1173 to_intel_plane_state(plane->base.state);
1174
1175 if (!state->visible)
1176 continue;
1177
1178 /* normal watermarks */
1179 for (level = 0; level < wm_state->num_levels; level++) {
1180 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1181 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1182
1183 /* hack */
1184 if (WARN_ON(level == 0 && wm > max_wm))
1185 wm = max_wm;
1186
1187 if (wm > plane->wm.fifo_size)
1188 break;
1189
1190 switch (plane->base.type) {
1191 int sprite;
1192 case DRM_PLANE_TYPE_CURSOR:
1193 wm_state->wm[level].cursor = wm;
1194 break;
1195 case DRM_PLANE_TYPE_PRIMARY:
1196 wm_state->wm[level].primary = wm;
1197 break;
1198 case DRM_PLANE_TYPE_OVERLAY:
1199 sprite = plane->plane;
1200 wm_state->wm[level].sprite[sprite] = wm;
1201 break;
1202 }
1203 }
1204
1205 wm_state->num_levels = level;
1206
1207 if (!wm_state->cxsr)
1208 continue;
1209
1210 /* maxfifo watermarks */
1211 switch (plane->base.type) {
1212 int sprite, level;
1213 case DRM_PLANE_TYPE_CURSOR:
1214 for (level = 0; level < wm_state->num_levels; level++)
1215 wm_state->sr[level].cursor =
1216 wm_state->sr[level].cursor;
1217 break;
1218 case DRM_PLANE_TYPE_PRIMARY:
1219 for (level = 0; level < wm_state->num_levels; level++)
1220 wm_state->sr[level].plane =
1221 min(wm_state->sr[level].plane,
1222 wm_state->wm[level].primary);
1223 break;
1224 case DRM_PLANE_TYPE_OVERLAY:
1225 sprite = plane->plane;
1226 for (level = 0; level < wm_state->num_levels; level++)
1227 wm_state->sr[level].plane =
1228 min(wm_state->sr[level].plane,
1229 wm_state->wm[level].sprite[sprite]);
1230 break;
1231 }
1232 }
1233
1234 /* clear any (partially) filled invalid levels */
1235 for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
1236 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1237 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1238 }
1239
1240 vlv_invert_wms(crtc);
1241}
1242
54f1b6e1
VS
1243#define VLV_FIFO(plane, value) \
1244 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1245
1246static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1247{
1248 struct drm_device *dev = crtc->base.dev;
1249 struct drm_i915_private *dev_priv = to_i915(dev);
1250 struct intel_plane *plane;
1251 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1252
1253 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1254 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1255 WARN_ON(plane->wm.fifo_size != 63);
1256 continue;
1257 }
1258
1259 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1260 sprite0_start = plane->wm.fifo_size;
1261 else if (plane->plane == 0)
1262 sprite1_start = sprite0_start + plane->wm.fifo_size;
1263 else
1264 fifo_size = sprite1_start + plane->wm.fifo_size;
1265 }
1266
1267 WARN_ON(fifo_size != 512 - 1);
1268
1269 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1270 pipe_name(crtc->pipe), sprite0_start,
1271 sprite1_start, fifo_size);
1272
1273 switch (crtc->pipe) {
1274 uint32_t dsparb, dsparb2, dsparb3;
1275 case PIPE_A:
1276 dsparb = I915_READ(DSPARB);
1277 dsparb2 = I915_READ(DSPARB2);
1278
1279 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1280 VLV_FIFO(SPRITEB, 0xff));
1281 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1282 VLV_FIFO(SPRITEB, sprite1_start));
1283
1284 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1285 VLV_FIFO(SPRITEB_HI, 0x1));
1286 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1287 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1288
1289 I915_WRITE(DSPARB, dsparb);
1290 I915_WRITE(DSPARB2, dsparb2);
1291 break;
1292 case PIPE_B:
1293 dsparb = I915_READ(DSPARB);
1294 dsparb2 = I915_READ(DSPARB2);
1295
1296 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1297 VLV_FIFO(SPRITED, 0xff));
1298 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1299 VLV_FIFO(SPRITED, sprite1_start));
1300
1301 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1302 VLV_FIFO(SPRITED_HI, 0xff));
1303 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1304 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1305
1306 I915_WRITE(DSPARB, dsparb);
1307 I915_WRITE(DSPARB2, dsparb2);
1308 break;
1309 case PIPE_C:
1310 dsparb3 = I915_READ(DSPARB3);
1311 dsparb2 = I915_READ(DSPARB2);
1312
1313 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1314 VLV_FIFO(SPRITEF, 0xff));
1315 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1316 VLV_FIFO(SPRITEF, sprite1_start));
1317
1318 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1319 VLV_FIFO(SPRITEF_HI, 0xff));
1320 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1321 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1322
1323 I915_WRITE(DSPARB3, dsparb3);
1324 I915_WRITE(DSPARB2, dsparb2);
1325 break;
1326 default:
1327 break;
1328 }
1329}
1330
1331#undef VLV_FIFO
1332
262cd2e1
VS
1333static void vlv_merge_wm(struct drm_device *dev,
1334 struct vlv_wm_values *wm)
1335{
1336 struct intel_crtc *crtc;
1337 int num_active_crtcs = 0;
1338
1339 if (IS_CHERRYVIEW(dev))
1340 wm->level = VLV_WM_LEVEL_DDR_DVFS;
1341 else
1342 wm->level = VLV_WM_LEVEL_PM2;
1343 wm->cxsr = true;
1344
1345 for_each_intel_crtc(dev, crtc) {
1346 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1347
1348 if (!crtc->active)
1349 continue;
1350
1351 if (!wm_state->cxsr)
1352 wm->cxsr = false;
1353
1354 num_active_crtcs++;
1355 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1356 }
1357
1358 if (num_active_crtcs != 1)
1359 wm->cxsr = false;
1360
6f9c784b
VS
1361 if (num_active_crtcs > 1)
1362 wm->level = VLV_WM_LEVEL_PM2;
1363
262cd2e1
VS
1364 for_each_intel_crtc(dev, crtc) {
1365 struct vlv_wm_state *wm_state = &crtc->wm_state;
1366 enum pipe pipe = crtc->pipe;
1367
1368 if (!crtc->active)
1369 continue;
1370
1371 wm->pipe[pipe] = wm_state->wm[wm->level];
1372 if (wm->cxsr)
1373 wm->sr = wm_state->sr[wm->level];
1374
1375 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1376 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1377 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1378 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1379 }
1380}
1381
1382static void vlv_update_wm(struct drm_crtc *crtc)
1383{
1384 struct drm_device *dev = crtc->dev;
1385 struct drm_i915_private *dev_priv = dev->dev_private;
1386 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1387 enum pipe pipe = intel_crtc->pipe;
1388 struct vlv_wm_values wm = {};
1389
26e1fe4f 1390 vlv_compute_wm(intel_crtc);
262cd2e1
VS
1391 vlv_merge_wm(dev, &wm);
1392
54f1b6e1
VS
1393 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1394 /* FIXME should be part of crtc atomic commit */
1395 vlv_pipe_set_fifo_size(intel_crtc);
262cd2e1 1396 return;
54f1b6e1 1397 }
262cd2e1
VS
1398
1399 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1400 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1401 chv_set_memory_dvfs(dev_priv, false);
1402
1403 if (wm.level < VLV_WM_LEVEL_PM5 &&
1404 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1405 chv_set_memory_pm5(dev_priv, false);
1406
852eb00d 1407 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
262cd2e1 1408 intel_set_memory_cxsr(dev_priv, false);
262cd2e1 1409
54f1b6e1
VS
1410 /* FIXME should be part of crtc atomic commit */
1411 vlv_pipe_set_fifo_size(intel_crtc);
1412
262cd2e1
VS
1413 vlv_write_wm_values(intel_crtc, &wm);
1414
1415 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1416 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1417 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1418 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1419 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1420
852eb00d 1421 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
262cd2e1 1422 intel_set_memory_cxsr(dev_priv, true);
262cd2e1
VS
1423
1424 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1425 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1426 chv_set_memory_pm5(dev_priv, true);
1427
1428 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1429 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1430 chv_set_memory_dvfs(dev_priv, true);
1431
1432 dev_priv->wm.vlv = wm;
3c2777fd
VS
1433}
1434
ae80152d
VS
1435#define single_plane_enabled(mask) is_power_of_2(mask)
1436
46ba614c 1437static void g4x_update_wm(struct drm_crtc *crtc)
b445e3b0 1438{
46ba614c 1439 struct drm_device *dev = crtc->dev;
b445e3b0
ED
1440 static const int sr_latency_ns = 12000;
1441 struct drm_i915_private *dev_priv = dev->dev_private;
1442 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1443 int plane_sr, cursor_sr;
1444 unsigned int enabled = 0;
9858425c 1445 bool cxsr_enabled;
b445e3b0 1446
51cea1f4 1447 if (g4x_compute_wm0(dev, PIPE_A,
5aef6003
CW
1448 &g4x_wm_info, pessimal_latency_ns,
1449 &g4x_cursor_wm_info, pessimal_latency_ns,
b445e3b0 1450 &planea_wm, &cursora_wm))
51cea1f4 1451 enabled |= 1 << PIPE_A;
b445e3b0 1452
51cea1f4 1453 if (g4x_compute_wm0(dev, PIPE_B,
5aef6003
CW
1454 &g4x_wm_info, pessimal_latency_ns,
1455 &g4x_cursor_wm_info, pessimal_latency_ns,
b445e3b0 1456 &planeb_wm, &cursorb_wm))
51cea1f4 1457 enabled |= 1 << PIPE_B;
b445e3b0 1458
b445e3b0
ED
1459 if (single_plane_enabled(enabled) &&
1460 g4x_compute_srwm(dev, ffs(enabled) - 1,
1461 sr_latency_ns,
1462 &g4x_wm_info,
1463 &g4x_cursor_wm_info,
52bd02d8 1464 &plane_sr, &cursor_sr)) {
9858425c 1465 cxsr_enabled = true;
52bd02d8 1466 } else {
9858425c 1467 cxsr_enabled = false;
5209b1f4 1468 intel_set_memory_cxsr(dev_priv, false);
52bd02d8
CW
1469 plane_sr = cursor_sr = 0;
1470 }
b445e3b0 1471
a5043453
VS
1472 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1473 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
b445e3b0
ED
1474 planea_wm, cursora_wm,
1475 planeb_wm, cursorb_wm,
1476 plane_sr, cursor_sr);
1477
1478 I915_WRITE(DSPFW1,
f4998963
VS
1479 FW_WM(plane_sr, SR) |
1480 FW_WM(cursorb_wm, CURSORB) |
1481 FW_WM(planeb_wm, PLANEB) |
1482 FW_WM(planea_wm, PLANEA));
b445e3b0 1483 I915_WRITE(DSPFW2,
8c919b28 1484 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
f4998963 1485 FW_WM(cursora_wm, CURSORA));
b445e3b0
ED
1486 /* HPLL off in SR has some issues on G4x... disable it */
1487 I915_WRITE(DSPFW3,
8c919b28 1488 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
f4998963 1489 FW_WM(cursor_sr, CURSOR_SR));
9858425c
ID
1490
1491 if (cxsr_enabled)
1492 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1493}
1494
46ba614c 1495static void i965_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1496{
46ba614c 1497 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1498 struct drm_i915_private *dev_priv = dev->dev_private;
1499 struct drm_crtc *crtc;
1500 int srwm = 1;
1501 int cursor_sr = 16;
9858425c 1502 bool cxsr_enabled;
b445e3b0
ED
1503
1504 /* Calc sr entries for one plane configs */
1505 crtc = single_enabled_crtc(dev);
1506 if (crtc) {
1507 /* self-refresh has much higher latency */
1508 static const int sr_latency_ns = 12000;
4fe8590a 1509 const struct drm_display_mode *adjusted_mode =
6e3c9717 1510 &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1511 int clock = adjusted_mode->crtc_clock;
fec8cba3 1512 int htotal = adjusted_mode->crtc_htotal;
6e3c9717 1513 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
59bea882 1514 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
b445e3b0
ED
1515 unsigned long line_time_us;
1516 int entries;
1517
922044c9 1518 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0
ED
1519
1520 /* Use ns/us then divide to preserve precision */
1521 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1522 pixel_size * hdisplay;
1523 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1524 srwm = I965_FIFO_SIZE - entries;
1525 if (srwm < 0)
1526 srwm = 1;
1527 srwm &= 0x1ff;
1528 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1529 entries, srwm);
1530
1531 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3dd512fb 1532 pixel_size * crtc->cursor->state->crtc_w;
b445e3b0
ED
1533 entries = DIV_ROUND_UP(entries,
1534 i965_cursor_wm_info.cacheline_size);
1535 cursor_sr = i965_cursor_wm_info.fifo_size -
1536 (entries + i965_cursor_wm_info.guard_size);
1537
1538 if (cursor_sr > i965_cursor_wm_info.max_wm)
1539 cursor_sr = i965_cursor_wm_info.max_wm;
1540
1541 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1542 "cursor %d\n", srwm, cursor_sr);
1543
9858425c 1544 cxsr_enabled = true;
b445e3b0 1545 } else {
9858425c 1546 cxsr_enabled = false;
b445e3b0 1547 /* Turn off self refresh if both pipes are enabled */
5209b1f4 1548 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
1549 }
1550
1551 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1552 srwm);
1553
1554 /* 965 has limitations... */
f4998963
VS
1555 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1556 FW_WM(8, CURSORB) |
1557 FW_WM(8, PLANEB) |
1558 FW_WM(8, PLANEA));
1559 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1560 FW_WM(8, PLANEC_OLD));
b445e3b0 1561 /* update cursor SR watermark */
f4998963 1562 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
9858425c
ID
1563
1564 if (cxsr_enabled)
1565 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1566}
1567
f4998963
VS
1568#undef FW_WM
1569
46ba614c 1570static void i9xx_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1571{
46ba614c 1572 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1573 struct drm_i915_private *dev_priv = dev->dev_private;
1574 const struct intel_watermark_params *wm_info;
1575 uint32_t fwater_lo;
1576 uint32_t fwater_hi;
1577 int cwm, srwm = 1;
1578 int fifo_size;
1579 int planea_wm, planeb_wm;
1580 struct drm_crtc *crtc, *enabled = NULL;
1581
1582 if (IS_I945GM(dev))
1583 wm_info = &i945_wm_info;
1584 else if (!IS_GEN2(dev))
1585 wm_info = &i915_wm_info;
1586 else
9d539105 1587 wm_info = &i830_a_wm_info;
b445e3b0
ED
1588
1589 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1590 crtc = intel_get_crtc_for_plane(dev, 0);
3490ea5d 1591 if (intel_crtc_active(crtc)) {
241bfc38 1592 const struct drm_display_mode *adjusted_mode;
59bea882 1593 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
b9e0bda3
CW
1594 if (IS_GEN2(dev))
1595 cpp = 4;
1596
6e3c9717 1597 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1598 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
b9e0bda3 1599 wm_info, fifo_size, cpp,
5aef6003 1600 pessimal_latency_ns);
b445e3b0 1601 enabled = crtc;
9d539105 1602 } else {
b445e3b0 1603 planea_wm = fifo_size - wm_info->guard_size;
9d539105
VS
1604 if (planea_wm > (long)wm_info->max_wm)
1605 planea_wm = wm_info->max_wm;
1606 }
1607
1608 if (IS_GEN2(dev))
1609 wm_info = &i830_bc_wm_info;
b445e3b0
ED
1610
1611 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1612 crtc = intel_get_crtc_for_plane(dev, 1);
3490ea5d 1613 if (intel_crtc_active(crtc)) {
241bfc38 1614 const struct drm_display_mode *adjusted_mode;
59bea882 1615 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
b9e0bda3
CW
1616 if (IS_GEN2(dev))
1617 cpp = 4;
1618
6e3c9717 1619 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1620 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
b9e0bda3 1621 wm_info, fifo_size, cpp,
5aef6003 1622 pessimal_latency_ns);
b445e3b0
ED
1623 if (enabled == NULL)
1624 enabled = crtc;
1625 else
1626 enabled = NULL;
9d539105 1627 } else {
b445e3b0 1628 planeb_wm = fifo_size - wm_info->guard_size;
9d539105
VS
1629 if (planeb_wm > (long)wm_info->max_wm)
1630 planeb_wm = wm_info->max_wm;
1631 }
b445e3b0
ED
1632
1633 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1634
2ab1bc9d 1635 if (IS_I915GM(dev) && enabled) {
2ff8fde1 1636 struct drm_i915_gem_object *obj;
2ab1bc9d 1637
59bea882 1638 obj = intel_fb_obj(enabled->primary->state->fb);
2ab1bc9d
DV
1639
1640 /* self-refresh seems busted with untiled */
2ff8fde1 1641 if (obj->tiling_mode == I915_TILING_NONE)
2ab1bc9d
DV
1642 enabled = NULL;
1643 }
1644
b445e3b0
ED
1645 /*
1646 * Overlay gets an aggressive default since video jitter is bad.
1647 */
1648 cwm = 2;
1649
1650 /* Play safe and disable self-refresh before adjusting watermarks. */
5209b1f4 1651 intel_set_memory_cxsr(dev_priv, false);
b445e3b0
ED
1652
1653 /* Calc sr entries for one plane configs */
1654 if (HAS_FW_BLC(dev) && enabled) {
1655 /* self-refresh has much higher latency */
1656 static const int sr_latency_ns = 6000;
4fe8590a 1657 const struct drm_display_mode *adjusted_mode =
6e3c9717 1658 &to_intel_crtc(enabled)->config->base.adjusted_mode;
241bfc38 1659 int clock = adjusted_mode->crtc_clock;
fec8cba3 1660 int htotal = adjusted_mode->crtc_htotal;
6e3c9717 1661 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
59bea882 1662 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
b445e3b0
ED
1663 unsigned long line_time_us;
1664 int entries;
1665
922044c9 1666 line_time_us = max(htotal * 1000 / clock, 1);
b445e3b0
ED
1667
1668 /* Use ns/us then divide to preserve precision */
1669 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1670 pixel_size * hdisplay;
1671 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1672 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1673 srwm = wm_info->fifo_size - entries;
1674 if (srwm < 0)
1675 srwm = 1;
1676
1677 if (IS_I945G(dev) || IS_I945GM(dev))
1678 I915_WRITE(FW_BLC_SELF,
1679 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1680 else if (IS_I915GM(dev))
1681 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1682 }
1683
1684 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1685 planea_wm, planeb_wm, cwm, srwm);
1686
1687 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1688 fwater_hi = (cwm & 0x1f);
1689
1690 /* Set request length to 8 cachelines per fetch */
1691 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1692 fwater_hi = fwater_hi | (1 << 8);
1693
1694 I915_WRITE(FW_BLC, fwater_lo);
1695 I915_WRITE(FW_BLC2, fwater_hi);
1696
5209b1f4
ID
1697 if (enabled)
1698 intel_set_memory_cxsr(dev_priv, true);
b445e3b0
ED
1699}
1700
feb56b93 1701static void i845_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1702{
46ba614c 1703 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1704 struct drm_i915_private *dev_priv = dev->dev_private;
1705 struct drm_crtc *crtc;
241bfc38 1706 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
1707 uint32_t fwater_lo;
1708 int planea_wm;
1709
1710 crtc = single_enabled_crtc(dev);
1711 if (crtc == NULL)
1712 return;
1713
6e3c9717 1714 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
241bfc38 1715 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
feb56b93 1716 &i845_wm_info,
b445e3b0 1717 dev_priv->display.get_fifo_size(dev, 0),
5aef6003 1718 4, pessimal_latency_ns);
b445e3b0
ED
1719 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1720 fwater_lo |= (3<<8) | planea_wm;
1721
1722 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1723
1724 I915_WRITE(FW_BLC, fwater_lo);
1725}
1726
8cfb3407 1727uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
801bcfff 1728{
fd4daa9c 1729 uint32_t pixel_rate;
801bcfff 1730
8cfb3407 1731 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
801bcfff
PZ
1732
1733 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1734 * adjust the pixel_rate here. */
1735
8cfb3407 1736 if (pipe_config->pch_pfit.enabled) {
801bcfff 1737 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
8cfb3407
VS
1738 uint32_t pfit_size = pipe_config->pch_pfit.size;
1739
1740 pipe_w = pipe_config->pipe_src_w;
1741 pipe_h = pipe_config->pipe_src_h;
801bcfff 1742
801bcfff
PZ
1743 pfit_w = (pfit_size >> 16) & 0xFFFF;
1744 pfit_h = pfit_size & 0xFFFF;
1745 if (pipe_w < pfit_w)
1746 pipe_w = pfit_w;
1747 if (pipe_h < pfit_h)
1748 pipe_h = pfit_h;
1749
1750 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1751 pfit_w * pfit_h);
1752 }
1753
1754 return pixel_rate;
1755}
1756
37126462 1757/* latency must be in 0.1us units. */
23297044 1758static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
801bcfff
PZ
1759 uint32_t latency)
1760{
1761 uint64_t ret;
1762
3312ba65
VS
1763 if (WARN(latency == 0, "Latency value missing\n"))
1764 return UINT_MAX;
1765
801bcfff
PZ
1766 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1767 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1768
1769 return ret;
1770}
1771
37126462 1772/* latency must be in 0.1us units. */
23297044 1773static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
801bcfff
PZ
1774 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1775 uint32_t latency)
1776{
1777 uint32_t ret;
1778
3312ba65
VS
1779 if (WARN(latency == 0, "Latency value missing\n"))
1780 return UINT_MAX;
1781
801bcfff
PZ
1782 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1783 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1784 ret = DIV_ROUND_UP(ret, 64) + 2;
1785 return ret;
1786}
1787
23297044 1788static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
cca32e9a
PZ
1789 uint8_t bytes_per_pixel)
1790{
1791 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1792}
1793
2ac96d2a
PB
1794struct skl_pipe_wm_parameters {
1795 bool active;
1796 uint32_t pipe_htotal;
1797 uint32_t pixel_rate; /* in KHz */
1798 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1799 struct intel_plane_wm_parameters cursor;
1800};
1801
820c1980 1802struct ilk_pipe_wm_parameters {
801bcfff 1803 bool active;
801bcfff
PZ
1804 uint32_t pipe_htotal;
1805 uint32_t pixel_rate;
c35426d2
VS
1806 struct intel_plane_wm_parameters pri;
1807 struct intel_plane_wm_parameters spr;
1808 struct intel_plane_wm_parameters cur;
801bcfff
PZ
1809};
1810
820c1980 1811struct ilk_wm_maximums {
cca32e9a
PZ
1812 uint16_t pri;
1813 uint16_t spr;
1814 uint16_t cur;
1815 uint16_t fbc;
1816};
1817
240264f4
VS
1818/* used in computing the new watermarks state */
1819struct intel_wm_config {
1820 unsigned int num_pipes_active;
1821 bool sprites_enabled;
1822 bool sprites_scaled;
240264f4
VS
1823};
1824
37126462
VS
1825/*
1826 * For both WM_PIPE and WM_LP.
1827 * mem_value must be in 0.1us units.
1828 */
820c1980 1829static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
cca32e9a
PZ
1830 uint32_t mem_value,
1831 bool is_lp)
801bcfff 1832{
cca32e9a
PZ
1833 uint32_t method1, method2;
1834
c35426d2 1835 if (!params->active || !params->pri.enabled)
801bcfff
PZ
1836 return 0;
1837
23297044 1838 method1 = ilk_wm_method1(params->pixel_rate,
c35426d2 1839 params->pri.bytes_per_pixel,
cca32e9a
PZ
1840 mem_value);
1841
1842 if (!is_lp)
1843 return method1;
1844
23297044 1845 method2 = ilk_wm_method2(params->pixel_rate,
cca32e9a 1846 params->pipe_htotal,
c35426d2
VS
1847 params->pri.horiz_pixels,
1848 params->pri.bytes_per_pixel,
cca32e9a
PZ
1849 mem_value);
1850
1851 return min(method1, method2);
801bcfff
PZ
1852}
1853
37126462
VS
1854/*
1855 * For both WM_PIPE and WM_LP.
1856 * mem_value must be in 0.1us units.
1857 */
820c1980 1858static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
801bcfff
PZ
1859 uint32_t mem_value)
1860{
1861 uint32_t method1, method2;
1862
c35426d2 1863 if (!params->active || !params->spr.enabled)
801bcfff
PZ
1864 return 0;
1865
23297044 1866 method1 = ilk_wm_method1(params->pixel_rate,
c35426d2 1867 params->spr.bytes_per_pixel,
801bcfff 1868 mem_value);
23297044 1869 method2 = ilk_wm_method2(params->pixel_rate,
801bcfff 1870 params->pipe_htotal,
c35426d2
VS
1871 params->spr.horiz_pixels,
1872 params->spr.bytes_per_pixel,
801bcfff
PZ
1873 mem_value);
1874 return min(method1, method2);
1875}
1876
37126462
VS
1877/*
1878 * For both WM_PIPE and WM_LP.
1879 * mem_value must be in 0.1us units.
1880 */
820c1980 1881static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
801bcfff
PZ
1882 uint32_t mem_value)
1883{
c35426d2 1884 if (!params->active || !params->cur.enabled)
801bcfff
PZ
1885 return 0;
1886
23297044 1887 return ilk_wm_method2(params->pixel_rate,
801bcfff 1888 params->pipe_htotal,
c35426d2
VS
1889 params->cur.horiz_pixels,
1890 params->cur.bytes_per_pixel,
801bcfff
PZ
1891 mem_value);
1892}
1893
cca32e9a 1894/* Only for WM_LP. */
820c1980 1895static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1fda9882 1896 uint32_t pri_val)
cca32e9a 1897{
c35426d2 1898 if (!params->active || !params->pri.enabled)
cca32e9a
PZ
1899 return 0;
1900
23297044 1901 return ilk_wm_fbc(pri_val,
c35426d2
VS
1902 params->pri.horiz_pixels,
1903 params->pri.bytes_per_pixel);
cca32e9a
PZ
1904}
1905
158ae64f
VS
1906static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1907{
416f4727
VS
1908 if (INTEL_INFO(dev)->gen >= 8)
1909 return 3072;
1910 else if (INTEL_INFO(dev)->gen >= 7)
158ae64f
VS
1911 return 768;
1912 else
1913 return 512;
1914}
1915
4e975081
VS
1916static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1917 int level, bool is_sprite)
1918{
1919 if (INTEL_INFO(dev)->gen >= 8)
1920 /* BDW primary/sprite plane watermarks */
1921 return level == 0 ? 255 : 2047;
1922 else if (INTEL_INFO(dev)->gen >= 7)
1923 /* IVB/HSW primary/sprite plane watermarks */
1924 return level == 0 ? 127 : 1023;
1925 else if (!is_sprite)
1926 /* ILK/SNB primary plane watermarks */
1927 return level == 0 ? 127 : 511;
1928 else
1929 /* ILK/SNB sprite plane watermarks */
1930 return level == 0 ? 63 : 255;
1931}
1932
1933static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1934 int level)
1935{
1936 if (INTEL_INFO(dev)->gen >= 7)
1937 return level == 0 ? 63 : 255;
1938 else
1939 return level == 0 ? 31 : 63;
1940}
1941
1942static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1943{
1944 if (INTEL_INFO(dev)->gen >= 8)
1945 return 31;
1946 else
1947 return 15;
1948}
1949
158ae64f
VS
1950/* Calculate the maximum primary/sprite plane watermark */
1951static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1952 int level,
240264f4 1953 const struct intel_wm_config *config,
158ae64f
VS
1954 enum intel_ddb_partitioning ddb_partitioning,
1955 bool is_sprite)
1956{
1957 unsigned int fifo_size = ilk_display_fifo_size(dev);
158ae64f
VS
1958
1959 /* if sprites aren't enabled, sprites get nothing */
240264f4 1960 if (is_sprite && !config->sprites_enabled)
158ae64f
VS
1961 return 0;
1962
1963 /* HSW allows LP1+ watermarks even with multiple pipes */
240264f4 1964 if (level == 0 || config->num_pipes_active > 1) {
158ae64f
VS
1965 fifo_size /= INTEL_INFO(dev)->num_pipes;
1966
1967 /*
1968 * For some reason the non self refresh
1969 * FIFO size is only half of the self
1970 * refresh FIFO size on ILK/SNB.
1971 */
1972 if (INTEL_INFO(dev)->gen <= 6)
1973 fifo_size /= 2;
1974 }
1975
240264f4 1976 if (config->sprites_enabled) {
158ae64f
VS
1977 /* level 0 is always calculated with 1:1 split */
1978 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1979 if (is_sprite)
1980 fifo_size *= 5;
1981 fifo_size /= 6;
1982 } else {
1983 fifo_size /= 2;
1984 }
1985 }
1986
1987 /* clamp to max that the registers can hold */
4e975081 1988 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
158ae64f
VS
1989}
1990
1991/* Calculate the maximum cursor plane watermark */
1992static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
240264f4
VS
1993 int level,
1994 const struct intel_wm_config *config)
158ae64f
VS
1995{
1996 /* HSW LP1+ watermarks w/ multiple pipes */
240264f4 1997 if (level > 0 && config->num_pipes_active > 1)
158ae64f
VS
1998 return 64;
1999
2000 /* otherwise just report max that registers can hold */
4e975081 2001 return ilk_cursor_wm_reg_max(dev, level);
158ae64f
VS
2002}
2003
d34ff9c6 2004static void ilk_compute_wm_maximums(const struct drm_device *dev,
34982fe1
VS
2005 int level,
2006 const struct intel_wm_config *config,
2007 enum intel_ddb_partitioning ddb_partitioning,
820c1980 2008 struct ilk_wm_maximums *max)
158ae64f 2009{
240264f4
VS
2010 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2011 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2012 max->cur = ilk_cursor_wm_max(dev, level, config);
4e975081 2013 max->fbc = ilk_fbc_wm_reg_max(dev);
158ae64f
VS
2014}
2015
a3cb4048
VS
2016static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
2017 int level,
2018 struct ilk_wm_maximums *max)
2019{
2020 max->pri = ilk_plane_wm_reg_max(dev, level, false);
2021 max->spr = ilk_plane_wm_reg_max(dev, level, true);
2022 max->cur = ilk_cursor_wm_reg_max(dev, level);
2023 max->fbc = ilk_fbc_wm_reg_max(dev);
2024}
2025
d9395655 2026static bool ilk_validate_wm_level(int level,
820c1980 2027 const struct ilk_wm_maximums *max,
d9395655 2028 struct intel_wm_level *result)
a9786a11
VS
2029{
2030 bool ret;
2031
2032 /* already determined to be invalid? */
2033 if (!result->enable)
2034 return false;
2035
2036 result->enable = result->pri_val <= max->pri &&
2037 result->spr_val <= max->spr &&
2038 result->cur_val <= max->cur;
2039
2040 ret = result->enable;
2041
2042 /*
2043 * HACK until we can pre-compute everything,
2044 * and thus fail gracefully if LP0 watermarks
2045 * are exceeded...
2046 */
2047 if (level == 0 && !result->enable) {
2048 if (result->pri_val > max->pri)
2049 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2050 level, result->pri_val, max->pri);
2051 if (result->spr_val > max->spr)
2052 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2053 level, result->spr_val, max->spr);
2054 if (result->cur_val > max->cur)
2055 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2056 level, result->cur_val, max->cur);
2057
2058 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2059 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2060 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2061 result->enable = true;
2062 }
2063
a9786a11
VS
2064 return ret;
2065}
2066
d34ff9c6 2067static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
6f5ddd17 2068 int level,
820c1980 2069 const struct ilk_pipe_wm_parameters *p,
1fd527cc 2070 struct intel_wm_level *result)
6f5ddd17
VS
2071{
2072 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2073 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2074 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2075
2076 /* WM1+ latency values stored in 0.5us units */
2077 if (level > 0) {
2078 pri_latency *= 5;
2079 spr_latency *= 5;
2080 cur_latency *= 5;
2081 }
2082
2083 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2084 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2085 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2086 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2087 result->enable = true;
2088}
2089
801bcfff
PZ
2090static uint32_t
2091hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1f8eeabf
ED
2092{
2093 struct drm_i915_private *dev_priv = dev->dev_private;
1011d8c4 2094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 2095 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
85a02deb 2096 u32 linetime, ips_linetime;
1f8eeabf 2097
3ef00284 2098 if (!intel_crtc->active)
801bcfff 2099 return 0;
1011d8c4 2100
1f8eeabf
ED
2101 /* The WM are computed with base on how long it takes to fill a single
2102 * row at the given clock rate, multiplied by 8.
2103 * */
fec8cba3
JB
2104 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2105 mode->crtc_clock);
2106 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
05024da3 2107 dev_priv->cdclk_freq);
1f8eeabf 2108
801bcfff
PZ
2109 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2110 PIPE_WM_LINETIME_TIME(linetime);
1f8eeabf
ED
2111}
2112
2af30a5c 2113static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
12b134df
VS
2114{
2115 struct drm_i915_private *dev_priv = dev->dev_private;
2116
2af30a5c
PB
2117 if (IS_GEN9(dev)) {
2118 uint32_t val;
4f947386 2119 int ret, i;
367294be 2120 int level, max_level = ilk_wm_max_level(dev);
2af30a5c
PB
2121
2122 /* read the first set of memory latencies[0:3] */
2123 val = 0; /* data0 to be programmed to 0 for first set */
2124 mutex_lock(&dev_priv->rps.hw_lock);
2125 ret = sandybridge_pcode_read(dev_priv,
2126 GEN9_PCODE_READ_MEM_LATENCY,
2127 &val);
2128 mutex_unlock(&dev_priv->rps.hw_lock);
2129
2130 if (ret) {
2131 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2132 return;
2133 }
2134
2135 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2136 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2137 GEN9_MEM_LATENCY_LEVEL_MASK;
2138 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2139 GEN9_MEM_LATENCY_LEVEL_MASK;
2140 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2141 GEN9_MEM_LATENCY_LEVEL_MASK;
2142
2143 /* read the second set of memory latencies[4:7] */
2144 val = 1; /* data0 to be programmed to 1 for second set */
2145 mutex_lock(&dev_priv->rps.hw_lock);
2146 ret = sandybridge_pcode_read(dev_priv,
2147 GEN9_PCODE_READ_MEM_LATENCY,
2148 &val);
2149 mutex_unlock(&dev_priv->rps.hw_lock);
2150 if (ret) {
2151 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2152 return;
2153 }
2154
2155 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2156 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2157 GEN9_MEM_LATENCY_LEVEL_MASK;
2158 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2159 GEN9_MEM_LATENCY_LEVEL_MASK;
2160 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2161 GEN9_MEM_LATENCY_LEVEL_MASK;
2162
367294be 2163 /*
6f97235b
DL
2164 * WaWmMemoryReadLatency:skl
2165 *
367294be
VK
2166 * punit doesn't take into account the read latency so we need
2167 * to add 2us to the various latency levels we retrieve from
2168 * the punit.
2169 * - W0 is a bit special in that it's the only level that
2170 * can't be disabled if we want to have display working, so
2171 * we always add 2us there.
2172 * - For levels >=1, punit returns 0us latency when they are
2173 * disabled, so we respect that and don't add 2us then
4f947386
VK
2174 *
2175 * Additionally, if a level n (n > 1) has a 0us latency, all
2176 * levels m (m >= n) need to be disabled. We make sure to
2177 * sanitize the values out of the punit to satisfy this
2178 * requirement.
367294be
VK
2179 */
2180 wm[0] += 2;
2181 for (level = 1; level <= max_level; level++)
2182 if (wm[level] != 0)
2183 wm[level] += 2;
4f947386
VK
2184 else {
2185 for (i = level + 1; i <= max_level; i++)
2186 wm[i] = 0;
367294be 2187
4f947386
VK
2188 break;
2189 }
2af30a5c 2190 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
12b134df
VS
2191 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2192
2193 wm[0] = (sskpd >> 56) & 0xFF;
2194 if (wm[0] == 0)
2195 wm[0] = sskpd & 0xF;
e5d5019e
VS
2196 wm[1] = (sskpd >> 4) & 0xFF;
2197 wm[2] = (sskpd >> 12) & 0xFF;
2198 wm[3] = (sskpd >> 20) & 0x1FF;
2199 wm[4] = (sskpd >> 32) & 0x1FF;
63cf9a13
VS
2200 } else if (INTEL_INFO(dev)->gen >= 6) {
2201 uint32_t sskpd = I915_READ(MCH_SSKPD);
2202
2203 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2204 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2205 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2206 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
3a88d0ac
VS
2207 } else if (INTEL_INFO(dev)->gen >= 5) {
2208 uint32_t mltr = I915_READ(MLTR_ILK);
2209
2210 /* ILK primary LP0 latency is 700 ns */
2211 wm[0] = 7;
2212 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2213 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
12b134df
VS
2214 }
2215}
2216
53615a5e
VS
2217static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2218{
2219 /* ILK sprite LP0 latency is 1300 ns */
2220 if (INTEL_INFO(dev)->gen == 5)
2221 wm[0] = 13;
2222}
2223
2224static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2225{
2226 /* ILK cursor LP0 latency is 1300 ns */
2227 if (INTEL_INFO(dev)->gen == 5)
2228 wm[0] = 13;
2229
2230 /* WaDoubleCursorLP3Latency:ivb */
2231 if (IS_IVYBRIDGE(dev))
2232 wm[3] *= 2;
2233}
2234
546c81fd 2235int ilk_wm_max_level(const struct drm_device *dev)
26ec971e 2236{
26ec971e 2237 /* how many WM levels are we expecting */
b6e742f6 2238 if (INTEL_INFO(dev)->gen >= 9)
2af30a5c
PB
2239 return 7;
2240 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ad0d6dc4 2241 return 4;
26ec971e 2242 else if (INTEL_INFO(dev)->gen >= 6)
ad0d6dc4 2243 return 3;
26ec971e 2244 else
ad0d6dc4
VS
2245 return 2;
2246}
7526ed79 2247
ad0d6dc4
VS
2248static void intel_print_wm_latency(struct drm_device *dev,
2249 const char *name,
2af30a5c 2250 const uint16_t wm[8])
ad0d6dc4
VS
2251{
2252 int level, max_level = ilk_wm_max_level(dev);
26ec971e
VS
2253
2254 for (level = 0; level <= max_level; level++) {
2255 unsigned int latency = wm[level];
2256
2257 if (latency == 0) {
2258 DRM_ERROR("%s WM%d latency not provided\n",
2259 name, level);
2260 continue;
2261 }
2262
2af30a5c
PB
2263 /*
2264 * - latencies are in us on gen9.
2265 * - before then, WM1+ latency values are in 0.5us units
2266 */
2267 if (IS_GEN9(dev))
2268 latency *= 10;
2269 else if (level > 0)
26ec971e
VS
2270 latency *= 5;
2271
2272 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2273 name, level, wm[level],
2274 latency / 10, latency % 10);
2275 }
2276}
2277
e95a2f75
VS
2278static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2279 uint16_t wm[5], uint16_t min)
2280{
2281 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2282
2283 if (wm[0] >= min)
2284 return false;
2285
2286 wm[0] = max(wm[0], min);
2287 for (level = 1; level <= max_level; level++)
2288 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2289
2290 return true;
2291}
2292
2293static void snb_wm_latency_quirk(struct drm_device *dev)
2294{
2295 struct drm_i915_private *dev_priv = dev->dev_private;
2296 bool changed;
2297
2298 /*
2299 * The BIOS provided WM memory latency values are often
2300 * inadequate for high resolution displays. Adjust them.
2301 */
2302 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2303 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2304 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2305
2306 if (!changed)
2307 return;
2308
2309 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2310 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2311 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2312 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2313}
2314
fa50ad61 2315static void ilk_setup_wm_latency(struct drm_device *dev)
53615a5e
VS
2316{
2317 struct drm_i915_private *dev_priv = dev->dev_private;
2318
2319 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2320
2321 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2322 sizeof(dev_priv->wm.pri_latency));
2323 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2324 sizeof(dev_priv->wm.pri_latency));
2325
2326 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2327 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
26ec971e
VS
2328
2329 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2330 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2331 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
e95a2f75
VS
2332
2333 if (IS_GEN6(dev))
2334 snb_wm_latency_quirk(dev);
53615a5e
VS
2335}
2336
2af30a5c
PB
2337static void skl_setup_wm_latency(struct drm_device *dev)
2338{
2339 struct drm_i915_private *dev_priv = dev->dev_private;
2340
2341 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2342 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2343}
2344
820c1980 2345static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2a44b76b 2346 struct ilk_pipe_wm_parameters *p)
1011d8c4 2347{
7c4a395f
VS
2348 struct drm_device *dev = crtc->dev;
2349 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2350 enum pipe pipe = intel_crtc->pipe;
7c4a395f 2351 struct drm_plane *plane;
1011d8c4 2352
3ef00284 2353 if (!intel_crtc->active)
2a44b76b 2354 return;
801bcfff 2355
2a44b76b 2356 p->active = true;
6e3c9717 2357 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
8cfb3407 2358 p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
c9f038a1 2359
54da691d 2360 if (crtc->primary->state->fb)
c9f038a1
MR
2361 p->pri.bytes_per_pixel =
2362 crtc->primary->state->fb->bits_per_pixel / 8;
54da691d
TG
2363 else
2364 p->pri.bytes_per_pixel = 4;
2365
2366 p->cur.bytes_per_pixel = 4;
2367 /*
2368 * TODO: for now, assume primary and cursor planes are always enabled.
2369 * Setting them to false makes the screen flicker.
2370 */
2371 p->pri.enabled = true;
2372 p->cur.enabled = true;
c9f038a1 2373
6e3c9717 2374 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
3dd512fb 2375 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
7c4a395f 2376
4ea50e99 2377 drm_for_each_legacy_plane(plane, dev) {
801bcfff 2378 struct intel_plane *intel_plane = to_intel_plane(plane);
801bcfff 2379
2a44b76b 2380 if (intel_plane->pipe == pipe) {
7c4a395f 2381 p->spr = intel_plane->wm;
2a44b76b
VS
2382 break;
2383 }
2384 }
2385}
2386
2387static void ilk_compute_wm_config(struct drm_device *dev,
2388 struct intel_wm_config *config)
2389{
2390 struct intel_crtc *intel_crtc;
2391
2392 /* Compute the currently _active_ config */
d3fcc808 2393 for_each_intel_crtc(dev, intel_crtc) {
2a44b76b 2394 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
cca32e9a 2395
2a44b76b
VS
2396 if (!wm->pipe_enabled)
2397 continue;
cca32e9a 2398
2a44b76b
VS
2399 config->sprites_enabled |= wm->sprites_enabled;
2400 config->sprites_scaled |= wm->sprites_scaled;
2401 config->num_pipes_active++;
cca32e9a 2402 }
801bcfff
PZ
2403}
2404
0b2ae6d7
VS
2405/* Compute new watermarks for the pipe */
2406static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
820c1980 2407 const struct ilk_pipe_wm_parameters *params,
0b2ae6d7
VS
2408 struct intel_pipe_wm *pipe_wm)
2409{
2410 struct drm_device *dev = crtc->dev;
d34ff9c6 2411 const struct drm_i915_private *dev_priv = dev->dev_private;
0b2ae6d7
VS
2412 int level, max_level = ilk_wm_max_level(dev);
2413 /* LP0 watermark maximums depend on this pipe alone */
2414 struct intel_wm_config config = {
2415 .num_pipes_active = 1,
2416 .sprites_enabled = params->spr.enabled,
2417 .sprites_scaled = params->spr.scaled,
2418 };
820c1980 2419 struct ilk_wm_maximums max;
0b2ae6d7 2420
2a44b76b
VS
2421 pipe_wm->pipe_enabled = params->active;
2422 pipe_wm->sprites_enabled = params->spr.enabled;
2423 pipe_wm->sprites_scaled = params->spr.scaled;
2424
7b39a0b7
VS
2425 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2426 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2427 max_level = 1;
2428
2429 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2430 if (params->spr.scaled)
2431 max_level = 0;
2432
a3cb4048 2433 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
0b2ae6d7 2434
a42a5719 2435 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ce0e0713 2436 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
0b2ae6d7 2437
a3cb4048
VS
2438 /* LP0 watermarks always use 1/2 DDB partitioning */
2439 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2440
0b2ae6d7 2441 /* At least LP0 must be valid */
a3cb4048
VS
2442 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2443 return false;
2444
2445 ilk_compute_wm_reg_maximums(dev, 1, &max);
2446
2447 for (level = 1; level <= max_level; level++) {
2448 struct intel_wm_level wm = {};
2449
2450 ilk_compute_wm_level(dev_priv, level, params, &wm);
2451
2452 /*
2453 * Disable any watermark level that exceeds the
2454 * register maximums since such watermarks are
2455 * always invalid.
2456 */
2457 if (!ilk_validate_wm_level(level, &max, &wm))
2458 break;
2459
2460 pipe_wm->wm[level] = wm;
2461 }
2462
2463 return true;
0b2ae6d7
VS
2464}
2465
2466/*
2467 * Merge the watermarks from all active pipes for a specific level.
2468 */
2469static void ilk_merge_wm_level(struct drm_device *dev,
2470 int level,
2471 struct intel_wm_level *ret_wm)
2472{
2473 const struct intel_crtc *intel_crtc;
2474
d52fea5b
VS
2475 ret_wm->enable = true;
2476
d3fcc808 2477 for_each_intel_crtc(dev, intel_crtc) {
fe392efd
VS
2478 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2479 const struct intel_wm_level *wm = &active->wm[level];
2480
2481 if (!active->pipe_enabled)
2482 continue;
0b2ae6d7 2483
d52fea5b
VS
2484 /*
2485 * The watermark values may have been used in the past,
2486 * so we must maintain them in the registers for some
2487 * time even if the level is now disabled.
2488 */
0b2ae6d7 2489 if (!wm->enable)
d52fea5b 2490 ret_wm->enable = false;
0b2ae6d7
VS
2491
2492 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2493 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2494 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2495 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2496 }
0b2ae6d7
VS
2497}
2498
2499/*
2500 * Merge all low power watermarks for all active pipes.
2501 */
2502static void ilk_wm_merge(struct drm_device *dev,
0ba22e26 2503 const struct intel_wm_config *config,
820c1980 2504 const struct ilk_wm_maximums *max,
0b2ae6d7
VS
2505 struct intel_pipe_wm *merged)
2506{
7733b49b 2507 struct drm_i915_private *dev_priv = dev->dev_private;
0b2ae6d7 2508 int level, max_level = ilk_wm_max_level(dev);
d52fea5b 2509 int last_enabled_level = max_level;
0b2ae6d7 2510
0ba22e26
VS
2511 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2512 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2513 config->num_pipes_active > 1)
2514 return;
2515
6c8b6c28
VS
2516 /* ILK: FBC WM must be disabled always */
2517 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
0b2ae6d7
VS
2518
2519 /* merge each WM1+ level */
2520 for (level = 1; level <= max_level; level++) {
2521 struct intel_wm_level *wm = &merged->wm[level];
2522
2523 ilk_merge_wm_level(dev, level, wm);
2524
d52fea5b
VS
2525 if (level > last_enabled_level)
2526 wm->enable = false;
2527 else if (!ilk_validate_wm_level(level, max, wm))
2528 /* make sure all following levels get disabled */
2529 last_enabled_level = level - 1;
0b2ae6d7
VS
2530
2531 /*
2532 * The spec says it is preferred to disable
2533 * FBC WMs instead of disabling a WM level.
2534 */
2535 if (wm->fbc_val > max->fbc) {
d52fea5b
VS
2536 if (wm->enable)
2537 merged->fbc_wm_enabled = false;
0b2ae6d7
VS
2538 wm->fbc_val = 0;
2539 }
2540 }
6c8b6c28
VS
2541
2542 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2543 /*
2544 * FIXME this is racy. FBC might get enabled later.
2545 * What we should check here is whether FBC can be
2546 * enabled sometime later.
2547 */
7733b49b
PZ
2548 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2549 intel_fbc_enabled(dev_priv)) {
6c8b6c28
VS
2550 for (level = 2; level <= max_level; level++) {
2551 struct intel_wm_level *wm = &merged->wm[level];
2552
2553 wm->enable = false;
2554 }
2555 }
0b2ae6d7
VS
2556}
2557
b380ca3c
VS
2558static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2559{
2560 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2561 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2562}
2563
a68d68ee
VS
2564/* The value we need to program into the WM_LPx latency field */
2565static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2566{
2567 struct drm_i915_private *dev_priv = dev->dev_private;
2568
a42a5719 2569 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
a68d68ee
VS
2570 return 2 * level;
2571 else
2572 return dev_priv->wm.pri_latency[level];
2573}
2574
820c1980 2575static void ilk_compute_wm_results(struct drm_device *dev,
0362c781 2576 const struct intel_pipe_wm *merged,
609cedef 2577 enum intel_ddb_partitioning partitioning,
820c1980 2578 struct ilk_wm_values *results)
801bcfff 2579{
0b2ae6d7
VS
2580 struct intel_crtc *intel_crtc;
2581 int level, wm_lp;
cca32e9a 2582
0362c781 2583 results->enable_fbc_wm = merged->fbc_wm_enabled;
609cedef 2584 results->partitioning = partitioning;
cca32e9a 2585
0b2ae6d7 2586 /* LP1+ register values */
cca32e9a 2587 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
1fd527cc 2588 const struct intel_wm_level *r;
801bcfff 2589
b380ca3c 2590 level = ilk_wm_lp_to_level(wm_lp, merged);
0b2ae6d7 2591
0362c781 2592 r = &merged->wm[level];
cca32e9a 2593
d52fea5b
VS
2594 /*
2595 * Maintain the watermark values even if the level is
2596 * disabled. Doing otherwise could cause underruns.
2597 */
2598 results->wm_lp[wm_lp - 1] =
a68d68ee 2599 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
416f4727
VS
2600 (r->pri_val << WM1_LP_SR_SHIFT) |
2601 r->cur_val;
2602
d52fea5b
VS
2603 if (r->enable)
2604 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2605
416f4727
VS
2606 if (INTEL_INFO(dev)->gen >= 8)
2607 results->wm_lp[wm_lp - 1] |=
2608 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2609 else
2610 results->wm_lp[wm_lp - 1] |=
2611 r->fbc_val << WM1_LP_FBC_SHIFT;
2612
d52fea5b
VS
2613 /*
2614 * Always set WM1S_LP_EN when spr_val != 0, even if the
2615 * level is disabled. Doing otherwise could cause underruns.
2616 */
6cef2b8a
VS
2617 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2618 WARN_ON(wm_lp != 1);
2619 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2620 } else
2621 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
cca32e9a 2622 }
801bcfff 2623
0b2ae6d7 2624 /* LP0 register values */
d3fcc808 2625 for_each_intel_crtc(dev, intel_crtc) {
0b2ae6d7
VS
2626 enum pipe pipe = intel_crtc->pipe;
2627 const struct intel_wm_level *r =
2628 &intel_crtc->wm.active.wm[0];
2629
2630 if (WARN_ON(!r->enable))
2631 continue;
2632
2633 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
1011d8c4 2634
0b2ae6d7
VS
2635 results->wm_pipe[pipe] =
2636 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2637 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2638 r->cur_val;
801bcfff
PZ
2639 }
2640}
2641
861f3389
PZ
2642/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2643 * case both are at the same level. Prefer r1 in case they're the same. */
820c1980 2644static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
198a1e9b
VS
2645 struct intel_pipe_wm *r1,
2646 struct intel_pipe_wm *r2)
861f3389 2647{
198a1e9b
VS
2648 int level, max_level = ilk_wm_max_level(dev);
2649 int level1 = 0, level2 = 0;
861f3389 2650
198a1e9b
VS
2651 for (level = 1; level <= max_level; level++) {
2652 if (r1->wm[level].enable)
2653 level1 = level;
2654 if (r2->wm[level].enable)
2655 level2 = level;
861f3389
PZ
2656 }
2657
198a1e9b
VS
2658 if (level1 == level2) {
2659 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
861f3389
PZ
2660 return r2;
2661 else
2662 return r1;
198a1e9b 2663 } else if (level1 > level2) {
861f3389
PZ
2664 return r1;
2665 } else {
2666 return r2;
2667 }
2668}
2669
49a687c4
VS
2670/* dirty bits used to track which watermarks need changes */
2671#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2672#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2673#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2674#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2675#define WM_DIRTY_FBC (1 << 24)
2676#define WM_DIRTY_DDB (1 << 25)
2677
055e393f 2678static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
820c1980
ID
2679 const struct ilk_wm_values *old,
2680 const struct ilk_wm_values *new)
49a687c4
VS
2681{
2682 unsigned int dirty = 0;
2683 enum pipe pipe;
2684 int wm_lp;
2685
055e393f 2686 for_each_pipe(dev_priv, pipe) {
49a687c4
VS
2687 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2688 dirty |= WM_DIRTY_LINETIME(pipe);
2689 /* Must disable LP1+ watermarks too */
2690 dirty |= WM_DIRTY_LP_ALL;
2691 }
2692
2693 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2694 dirty |= WM_DIRTY_PIPE(pipe);
2695 /* Must disable LP1+ watermarks too */
2696 dirty |= WM_DIRTY_LP_ALL;
2697 }
2698 }
2699
2700 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2701 dirty |= WM_DIRTY_FBC;
2702 /* Must disable LP1+ watermarks too */
2703 dirty |= WM_DIRTY_LP_ALL;
2704 }
2705
2706 if (old->partitioning != new->partitioning) {
2707 dirty |= WM_DIRTY_DDB;
2708 /* Must disable LP1+ watermarks too */
2709 dirty |= WM_DIRTY_LP_ALL;
2710 }
2711
2712 /* LP1+ watermarks already deemed dirty, no need to continue */
2713 if (dirty & WM_DIRTY_LP_ALL)
2714 return dirty;
2715
2716 /* Find the lowest numbered LP1+ watermark in need of an update... */
2717 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2718 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2719 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2720 break;
2721 }
2722
2723 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2724 for (; wm_lp <= 3; wm_lp++)
2725 dirty |= WM_DIRTY_LP(wm_lp);
2726
2727 return dirty;
2728}
2729
8553c18e
VS
2730static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2731 unsigned int dirty)
801bcfff 2732{
820c1980 2733 struct ilk_wm_values *previous = &dev_priv->wm.hw;
8553c18e 2734 bool changed = false;
801bcfff 2735
facd619b
VS
2736 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2737 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2738 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
8553c18e 2739 changed = true;
facd619b
VS
2740 }
2741 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2742 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2743 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
8553c18e 2744 changed = true;
facd619b
VS
2745 }
2746 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2747 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2748 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
8553c18e 2749 changed = true;
facd619b 2750 }
801bcfff 2751
facd619b
VS
2752 /*
2753 * Don't touch WM1S_LP_EN here.
2754 * Doing so could cause underruns.
2755 */
6cef2b8a 2756
8553c18e
VS
2757 return changed;
2758}
2759
2760/*
2761 * The spec says we shouldn't write when we don't need, because every write
2762 * causes WMs to be re-evaluated, expending some power.
2763 */
820c1980
ID
2764static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2765 struct ilk_wm_values *results)
8553c18e
VS
2766{
2767 struct drm_device *dev = dev_priv->dev;
820c1980 2768 struct ilk_wm_values *previous = &dev_priv->wm.hw;
8553c18e
VS
2769 unsigned int dirty;
2770 uint32_t val;
2771
055e393f 2772 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
8553c18e
VS
2773 if (!dirty)
2774 return;
2775
2776 _ilk_disable_lp_wm(dev_priv, dirty);
2777
49a687c4 2778 if (dirty & WM_DIRTY_PIPE(PIPE_A))
801bcfff 2779 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
49a687c4 2780 if (dirty & WM_DIRTY_PIPE(PIPE_B))
801bcfff 2781 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
49a687c4 2782 if (dirty & WM_DIRTY_PIPE(PIPE_C))
801bcfff
PZ
2783 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2784
49a687c4 2785 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
801bcfff 2786 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
49a687c4 2787 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
801bcfff 2788 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
49a687c4 2789 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
801bcfff
PZ
2790 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2791
49a687c4 2792 if (dirty & WM_DIRTY_DDB) {
a42a5719 2793 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ac9545fd
VS
2794 val = I915_READ(WM_MISC);
2795 if (results->partitioning == INTEL_DDB_PART_1_2)
2796 val &= ~WM_MISC_DATA_PARTITION_5_6;
2797 else
2798 val |= WM_MISC_DATA_PARTITION_5_6;
2799 I915_WRITE(WM_MISC, val);
2800 } else {
2801 val = I915_READ(DISP_ARB_CTL2);
2802 if (results->partitioning == INTEL_DDB_PART_1_2)
2803 val &= ~DISP_DATA_PARTITION_5_6;
2804 else
2805 val |= DISP_DATA_PARTITION_5_6;
2806 I915_WRITE(DISP_ARB_CTL2, val);
2807 }
1011d8c4
PZ
2808 }
2809
49a687c4 2810 if (dirty & WM_DIRTY_FBC) {
cca32e9a
PZ
2811 val = I915_READ(DISP_ARB_CTL);
2812 if (results->enable_fbc_wm)
2813 val &= ~DISP_FBC_WM_DIS;
2814 else
2815 val |= DISP_FBC_WM_DIS;
2816 I915_WRITE(DISP_ARB_CTL, val);
2817 }
2818
954911eb
ID
2819 if (dirty & WM_DIRTY_LP(1) &&
2820 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2821 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2822
2823 if (INTEL_INFO(dev)->gen >= 7) {
6cef2b8a
VS
2824 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2825 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2826 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2827 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2828 }
801bcfff 2829
facd619b 2830 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
801bcfff 2831 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
facd619b 2832 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
801bcfff 2833 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
facd619b 2834 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
801bcfff 2835 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
609cedef
VS
2836
2837 dev_priv->wm.hw = *results;
801bcfff
PZ
2838}
2839
8553c18e
VS
2840static bool ilk_disable_lp_wm(struct drm_device *dev)
2841{
2842 struct drm_i915_private *dev_priv = dev->dev_private;
2843
2844 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2845}
2846
b9cec075
DL
2847/*
2848 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2849 * different active planes.
2850 */
2851
2852#define SKL_DDB_SIZE 896 /* in blocks */
43d735a6 2853#define BXT_DDB_SIZE 512
b9cec075
DL
2854
2855static void
2856skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2857 struct drm_crtc *for_crtc,
2858 const struct intel_wm_config *config,
2859 const struct skl_pipe_wm_parameters *params,
2860 struct skl_ddb_entry *alloc /* out */)
2861{
2862 struct drm_crtc *crtc;
2863 unsigned int pipe_size, ddb_size;
2864 int nth_active_pipe;
2865
2866 if (!params->active) {
2867 alloc->start = 0;
2868 alloc->end = 0;
2869 return;
2870 }
2871
43d735a6
DL
2872 if (IS_BROXTON(dev))
2873 ddb_size = BXT_DDB_SIZE;
2874 else
2875 ddb_size = SKL_DDB_SIZE;
b9cec075
DL
2876
2877 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2878
2879 nth_active_pipe = 0;
2880 for_each_crtc(dev, crtc) {
3ef00284 2881 if (!to_intel_crtc(crtc)->active)
b9cec075
DL
2882 continue;
2883
2884 if (crtc == for_crtc)
2885 break;
2886
2887 nth_active_pipe++;
2888 }
2889
2890 pipe_size = ddb_size / config->num_pipes_active;
2891 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
16160e3d 2892 alloc->end = alloc->start + pipe_size;
b9cec075
DL
2893}
2894
2895static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2896{
2897 if (config->num_pipes_active == 1)
2898 return 32;
2899
2900 return 8;
2901}
2902
a269c583
DL
2903static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2904{
2905 entry->start = reg & 0x3ff;
2906 entry->end = (reg >> 16) & 0x3ff;
16160e3d
DL
2907 if (entry->end)
2908 entry->end += 1;
a269c583
DL
2909}
2910
08db6652
DL
2911void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2912 struct skl_ddb_allocation *ddb /* out */)
a269c583 2913{
a269c583
DL
2914 enum pipe pipe;
2915 int plane;
2916 u32 val;
2917
2918 for_each_pipe(dev_priv, pipe) {
dd740780 2919 for_each_plane(dev_priv, pipe, plane) {
a269c583
DL
2920 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2921 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2922 val);
2923 }
2924
2925 val = I915_READ(CUR_BUF_CFG(pipe));
2926 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
2927 }
2928}
2929
b9cec075 2930static unsigned int
2cd601c6 2931skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
b9cec075 2932{
2cd601c6
CK
2933
2934 /* for planar format */
2935 if (p->y_bytes_per_pixel) {
2936 if (y) /* y-plane data rate */
2937 return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
2938 else /* uv-plane data rate */
2939 return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
2940 }
2941
2942 /* for packed formats */
b9cec075
DL
2943 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
2944}
2945
2946/*
2947 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2948 * a 8192x4096@32bpp framebuffer:
2949 * 3 * 4096 * 8192 * 4 < 2^32
2950 */
2951static unsigned int
2952skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
2953 const struct skl_pipe_wm_parameters *params)
2954{
2955 unsigned int total_data_rate = 0;
2956 int plane;
2957
2958 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2959 const struct intel_plane_wm_parameters *p;
2960
2961 p = &params->plane[plane];
2962 if (!p->enabled)
2963 continue;
2964
2cd601c6
CK
2965 total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
2966 if (p->y_bytes_per_pixel) {
2967 total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
2968 }
b9cec075
DL
2969 }
2970
2971 return total_data_rate;
2972}
2973
2974static void
2975skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2976 const struct intel_wm_config *config,
2977 const struct skl_pipe_wm_parameters *params,
2978 struct skl_ddb_allocation *ddb /* out */)
2979{
2980 struct drm_device *dev = crtc->dev;
dd740780 2981 struct drm_i915_private *dev_priv = dev->dev_private;
b9cec075
DL
2982 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2983 enum pipe pipe = intel_crtc->pipe;
34bb56af 2984 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
b9cec075 2985 uint16_t alloc_size, start, cursor_blocks;
80958155 2986 uint16_t minimum[I915_MAX_PLANES];
2cd601c6 2987 uint16_t y_minimum[I915_MAX_PLANES];
b9cec075
DL
2988 unsigned int total_data_rate;
2989 int plane;
2990
34bb56af
DL
2991 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
2992 alloc_size = skl_ddb_entry_size(alloc);
b9cec075
DL
2993 if (alloc_size == 0) {
2994 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2995 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
2996 return;
2997 }
2998
2999 cursor_blocks = skl_cursor_allocation(config);
34bb56af
DL
3000 ddb->cursor[pipe].start = alloc->end - cursor_blocks;
3001 ddb->cursor[pipe].end = alloc->end;
b9cec075
DL
3002
3003 alloc_size -= cursor_blocks;
34bb56af 3004 alloc->end -= cursor_blocks;
b9cec075 3005
80958155 3006 /* 1. Allocate the mininum required blocks for each active plane */
dd740780 3007 for_each_plane(dev_priv, pipe, plane) {
80958155
DL
3008 const struct intel_plane_wm_parameters *p;
3009
3010 p = &params->plane[plane];
3011 if (!p->enabled)
3012 continue;
3013
3014 minimum[plane] = 8;
3015 alloc_size -= minimum[plane];
2cd601c6
CK
3016 y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
3017 alloc_size -= y_minimum[plane];
80958155
DL
3018 }
3019
b9cec075 3020 /*
80958155
DL
3021 * 2. Distribute the remaining space in proportion to the amount of
3022 * data each plane needs to fetch from memory.
b9cec075
DL
3023 *
3024 * FIXME: we may not allocate every single block here.
3025 */
3026 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
3027
34bb56af 3028 start = alloc->start;
b9cec075
DL
3029 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
3030 const struct intel_plane_wm_parameters *p;
2cd601c6
CK
3031 unsigned int data_rate, y_data_rate;
3032 uint16_t plane_blocks, y_plane_blocks = 0;
b9cec075
DL
3033
3034 p = &params->plane[plane];
3035 if (!p->enabled)
3036 continue;
3037
2cd601c6 3038 data_rate = skl_plane_relative_data_rate(p, 0);
b9cec075
DL
3039
3040 /*
2cd601c6 3041 * allocation for (packed formats) or (uv-plane part of planar format):
b9cec075
DL
3042 * promote the expression to 64 bits to avoid overflowing, the
3043 * result is < available as data_rate / total_data_rate < 1
3044 */
80958155
DL
3045 plane_blocks = minimum[plane];
3046 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3047 total_data_rate);
b9cec075
DL
3048
3049 ddb->plane[pipe][plane].start = start;
16160e3d 3050 ddb->plane[pipe][plane].end = start + plane_blocks;
b9cec075
DL
3051
3052 start += plane_blocks;
2cd601c6
CK
3053
3054 /*
3055 * allocation for y_plane part of planar format:
3056 */
3057 if (p->y_bytes_per_pixel) {
3058 y_data_rate = skl_plane_relative_data_rate(p, 1);
3059 y_plane_blocks = y_minimum[plane];
3060 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3061 total_data_rate);
3062
3063 ddb->y_plane[pipe][plane].start = start;
3064 ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
3065
3066 start += y_plane_blocks;
3067 }
3068
b9cec075
DL
3069 }
3070
3071}
3072
5cec258b 3073static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
2d41c0b5
PB
3074{
3075 /* TODO: Take into account the scalers once we support them */
2d112de7 3076 return config->base.adjusted_mode.crtc_clock;
2d41c0b5
PB
3077}
3078
3079/*
3080 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3081 * for the read latency) and bytes_per_pixel should always be <= 8, so that
3082 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3083 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3084*/
3085static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
3086 uint32_t latency)
3087{
3088 uint32_t wm_intermediate_val, ret;
3089
3090 if (latency == 0)
3091 return UINT_MAX;
3092
d4c2aa60 3093 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
2d41c0b5
PB
3094 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3095
3096 return ret;
3097}
3098
3099static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3100 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
0fda6568 3101 uint64_t tiling, uint32_t latency)
2d41c0b5 3102{
d4c2aa60
TU
3103 uint32_t ret;
3104 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3105 uint32_t wm_intermediate_val;
2d41c0b5
PB
3106
3107 if (latency == 0)
3108 return UINT_MAX;
3109
3110 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
0fda6568
TU
3111
3112 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3113 tiling == I915_FORMAT_MOD_Yf_TILED) {
3114 plane_bytes_per_line *= 4;
3115 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3116 plane_blocks_per_line /= 4;
3117 } else {
3118 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3119 }
3120
2d41c0b5
PB
3121 wm_intermediate_val = latency * pixel_rate;
3122 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
d4c2aa60 3123 plane_blocks_per_line;
2d41c0b5
PB
3124
3125 return ret;
3126}
3127
2d41c0b5
PB
3128static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3129 const struct intel_crtc *intel_crtc)
3130{
3131 struct drm_device *dev = intel_crtc->base.dev;
3132 struct drm_i915_private *dev_priv = dev->dev_private;
3133 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3134 enum pipe pipe = intel_crtc->pipe;
3135
3136 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
3137 sizeof(new_ddb->plane[pipe])))
3138 return true;
3139
3140 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
3141 sizeof(new_ddb->cursor[pipe])))
3142 return true;
3143
3144 return false;
3145}
3146
3147static void skl_compute_wm_global_parameters(struct drm_device *dev,
3148 struct intel_wm_config *config)
3149{
3150 struct drm_crtc *crtc;
3151 struct drm_plane *plane;
3152
3153 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3ef00284 3154 config->num_pipes_active += to_intel_crtc(crtc)->active;
2d41c0b5
PB
3155
3156 /* FIXME: I don't think we need those two global parameters on SKL */
3157 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3158 struct intel_plane *intel_plane = to_intel_plane(plane);
3159
3160 config->sprites_enabled |= intel_plane->wm.enabled;
3161 config->sprites_scaled |= intel_plane->wm.scaled;
3162 }
3163}
3164
3165static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3166 struct skl_pipe_wm_parameters *p)
3167{
3168 struct drm_device *dev = crtc->dev;
3169 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3170 enum pipe pipe = intel_crtc->pipe;
3171 struct drm_plane *plane;
0fda6568 3172 struct drm_framebuffer *fb;
2d41c0b5
PB
3173 int i = 1; /* Index for sprite planes start */
3174
3ef00284 3175 p->active = intel_crtc->active;
2d41c0b5 3176 if (p->active) {
6e3c9717
ACO
3177 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
3178 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
2d41c0b5 3179
0fda6568 3180 fb = crtc->primary->state->fb;
2cd601c6 3181 /* For planar: Bpp is for uv plane, y_Bpp is for y plane */
c9f038a1
MR
3182 if (fb) {
3183 p->plane[0].enabled = true;
2cd601c6 3184 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
395ab754
KM
3185 drm_format_plane_cpp(fb->pixel_format, 1) :
3186 drm_format_plane_cpp(fb->pixel_format, 0);
2cd601c6
CK
3187 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3188 drm_format_plane_cpp(fb->pixel_format, 0) : 0;
0fda6568 3189 p->plane[0].tiling = fb->modifier[0];
c9f038a1
MR
3190 } else {
3191 p->plane[0].enabled = false;
3192 p->plane[0].bytes_per_pixel = 0;
2cd601c6 3193 p->plane[0].y_bytes_per_pixel = 0;
c9f038a1
MR
3194 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
3195 }
3196 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
3197 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
1fc0a8f7 3198 p->plane[0].rotation = crtc->primary->state->rotation;
2d41c0b5 3199
c9f038a1 3200 fb = crtc->cursor->state->fb;
2cd601c6 3201 p->cursor.y_bytes_per_pixel = 0;
c9f038a1
MR
3202 if (fb) {
3203 p->cursor.enabled = true;
3204 p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
3205 p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
3206 p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
3207 } else {
3208 p->cursor.enabled = false;
3209 p->cursor.bytes_per_pixel = 0;
3210 p->cursor.horiz_pixels = 64;
3211 p->cursor.vert_pixels = 64;
3212 }
2d41c0b5
PB
3213 }
3214
3215 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3216 struct intel_plane *intel_plane = to_intel_plane(plane);
3217
a712f8eb
SJ
3218 if (intel_plane->pipe == pipe &&
3219 plane->type == DRM_PLANE_TYPE_OVERLAY)
2d41c0b5
PB
3220 p->plane[i++] = intel_plane->wm;
3221 }
3222}
3223
d4c2aa60
TU
3224static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3225 struct skl_pipe_wm_parameters *p,
afb024aa
DL
3226 struct intel_plane_wm_parameters *p_params,
3227 uint16_t ddb_allocation,
d4c2aa60 3228 int level,
afb024aa
DL
3229 uint16_t *out_blocks, /* out */
3230 uint8_t *out_lines /* out */)
2d41c0b5 3231{
d4c2aa60
TU
3232 uint32_t latency = dev_priv->wm.skl_latency[level];
3233 uint32_t method1, method2;
3234 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3235 uint32_t res_blocks, res_lines;
3236 uint32_t selected_result;
2cd601c6 3237 uint8_t bytes_per_pixel;
2d41c0b5 3238
d4c2aa60 3239 if (latency == 0 || !p->active || !p_params->enabled)
2d41c0b5
PB
3240 return false;
3241
2cd601c6
CK
3242 bytes_per_pixel = p_params->y_bytes_per_pixel ?
3243 p_params->y_bytes_per_pixel :
3244 p_params->bytes_per_pixel;
2d41c0b5 3245 method1 = skl_wm_method1(p->pixel_rate,
2cd601c6 3246 bytes_per_pixel,
d4c2aa60 3247 latency);
2d41c0b5
PB
3248 method2 = skl_wm_method2(p->pixel_rate,
3249 p->pipe_htotal,
3250 p_params->horiz_pixels,
2cd601c6 3251 bytes_per_pixel,
0fda6568 3252 p_params->tiling,
d4c2aa60 3253 latency);
2d41c0b5 3254
2cd601c6 3255 plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
d4c2aa60 3256 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2d41c0b5 3257
0fda6568
TU
3258 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
3259 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
1fc0a8f7
TU
3260 uint32_t min_scanlines = 4;
3261 uint32_t y_tile_minimum;
3262 if (intel_rotation_90_or_270(p_params->rotation)) {
3263 switch (p_params->bytes_per_pixel) {
3264 case 1:
3265 min_scanlines = 16;
3266 break;
3267 case 2:
3268 min_scanlines = 8;
3269 break;
3270 case 8:
3271 WARN(1, "Unsupported pixel depth for rotation");
2f0b5790 3272 }
1fc0a8f7
TU
3273 }
3274 y_tile_minimum = plane_blocks_per_line * min_scanlines;
0fda6568
TU
3275 selected_result = max(method2, y_tile_minimum);
3276 } else {
3277 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3278 selected_result = min(method1, method2);
3279 else
3280 selected_result = method1;
3281 }
2d41c0b5 3282
d4c2aa60
TU
3283 res_blocks = selected_result + 1;
3284 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
e6d66171 3285
0fda6568
TU
3286 if (level >= 1 && level <= 7) {
3287 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
3288 p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
3289 res_lines += 4;
3290 else
3291 res_blocks++;
3292 }
e6d66171 3293
d4c2aa60 3294 if (res_blocks >= ddb_allocation || res_lines > 31)
e6d66171
DL
3295 return false;
3296
3297 *out_blocks = res_blocks;
3298 *out_lines = res_lines;
2d41c0b5
PB
3299
3300 return true;
3301}
3302
3303static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3304 struct skl_ddb_allocation *ddb,
3305 struct skl_pipe_wm_parameters *p,
3306 enum pipe pipe,
3307 int level,
3308 int num_planes,
3309 struct skl_wm_level *result)
3310{
2d41c0b5
PB
3311 uint16_t ddb_blocks;
3312 int i;
3313
3314 for (i = 0; i < num_planes; i++) {
3315 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3316
d4c2aa60
TU
3317 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3318 p, &p->plane[i],
2d41c0b5 3319 ddb_blocks,
d4c2aa60 3320 level,
2d41c0b5
PB
3321 &result->plane_res_b[i],
3322 &result->plane_res_l[i]);
3323 }
3324
3325 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
d4c2aa60
TU
3326 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
3327 ddb_blocks, level,
3328 &result->cursor_res_b,
2d41c0b5
PB
3329 &result->cursor_res_l);
3330}
3331
407b50f3
DL
3332static uint32_t
3333skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
3334{
3ef00284 3335 if (!to_intel_crtc(crtc)->active)
407b50f3
DL
3336 return 0;
3337
661abfc0
MK
3338 if (WARN_ON(p->pixel_rate == 0))
3339 return 0;
407b50f3 3340
661abfc0 3341 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
407b50f3
DL
3342}
3343
3344static void skl_compute_transition_wm(struct drm_crtc *crtc,
3345 struct skl_pipe_wm_parameters *params,
9414f563 3346 struct skl_wm_level *trans_wm /* out */)
407b50f3 3347{
9414f563
DL
3348 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3349 int i;
3350
407b50f3
DL
3351 if (!params->active)
3352 return;
9414f563
DL
3353
3354 /* Until we know more, just disable transition WMs */
3355 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3356 trans_wm->plane_en[i] = false;
3357 trans_wm->cursor_en = false;
407b50f3
DL
3358}
3359
2d41c0b5
PB
3360static void skl_compute_pipe_wm(struct drm_crtc *crtc,
3361 struct skl_ddb_allocation *ddb,
3362 struct skl_pipe_wm_parameters *params,
3363 struct skl_pipe_wm *pipe_wm)
3364{
3365 struct drm_device *dev = crtc->dev;
3366 const struct drm_i915_private *dev_priv = dev->dev_private;
3367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3368 int level, max_level = ilk_wm_max_level(dev);
3369
3370 for (level = 0; level <= max_level; level++) {
3371 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
3372 level, intel_num_planes(intel_crtc),
3373 &pipe_wm->wm[level]);
3374 }
3375 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
3376
9414f563 3377 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
2d41c0b5
PB
3378}
3379
3380static void skl_compute_wm_results(struct drm_device *dev,
3381 struct skl_pipe_wm_parameters *p,
3382 struct skl_pipe_wm *p_wm,
3383 struct skl_wm_values *r,
3384 struct intel_crtc *intel_crtc)
3385{
3386 int level, max_level = ilk_wm_max_level(dev);
3387 enum pipe pipe = intel_crtc->pipe;
9414f563
DL
3388 uint32_t temp;
3389 int i;
2d41c0b5
PB
3390
3391 for (level = 0; level <= max_level; level++) {
2d41c0b5
PB
3392 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3393 temp = 0;
2d41c0b5
PB
3394
3395 temp |= p_wm->wm[level].plane_res_l[i] <<
3396 PLANE_WM_LINES_SHIFT;
3397 temp |= p_wm->wm[level].plane_res_b[i];
3398 if (p_wm->wm[level].plane_en[i])
3399 temp |= PLANE_WM_EN;
3400
3401 r->plane[pipe][i][level] = temp;
2d41c0b5
PB
3402 }
3403
3404 temp = 0;
2d41c0b5
PB
3405
3406 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
3407 temp |= p_wm->wm[level].cursor_res_b;
3408
3409 if (p_wm->wm[level].cursor_en)
3410 temp |= PLANE_WM_EN;
3411
3412 r->cursor[pipe][level] = temp;
2d41c0b5
PB
3413
3414 }
3415
9414f563
DL
3416 /* transition WMs */
3417 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3418 temp = 0;
3419 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3420 temp |= p_wm->trans_wm.plane_res_b[i];
3421 if (p_wm->trans_wm.plane_en[i])
3422 temp |= PLANE_WM_EN;
3423
3424 r->plane_trans[pipe][i] = temp;
3425 }
3426
3427 temp = 0;
3428 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
3429 temp |= p_wm->trans_wm.cursor_res_b;
3430 if (p_wm->trans_wm.cursor_en)
3431 temp |= PLANE_WM_EN;
3432
3433 r->cursor_trans[pipe] = temp;
3434
2d41c0b5
PB
3435 r->wm_linetime[pipe] = p_wm->linetime;
3436}
3437
16160e3d
DL
3438static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
3439 const struct skl_ddb_entry *entry)
3440{
3441 if (entry->end)
3442 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3443 else
3444 I915_WRITE(reg, 0);
3445}
3446
2d41c0b5
PB
3447static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3448 const struct skl_wm_values *new)
3449{
3450 struct drm_device *dev = dev_priv->dev;
3451 struct intel_crtc *crtc;
3452
3453 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3454 int i, level, max_level = ilk_wm_max_level(dev);
3455 enum pipe pipe = crtc->pipe;
3456
5d374d96
DL
3457 if (!new->dirty[pipe])
3458 continue;
8211bd5b 3459
5d374d96 3460 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
8211bd5b 3461
5d374d96
DL
3462 for (level = 0; level <= max_level; level++) {
3463 for (i = 0; i < intel_num_planes(crtc); i++)
3464 I915_WRITE(PLANE_WM(pipe, i, level),
3465 new->plane[pipe][i][level]);
3466 I915_WRITE(CUR_WM(pipe, level),
3467 new->cursor[pipe][level]);
2d41c0b5 3468 }
5d374d96
DL
3469 for (i = 0; i < intel_num_planes(crtc); i++)
3470 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3471 new->plane_trans[pipe][i]);
3472 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
3473
2cd601c6 3474 for (i = 0; i < intel_num_planes(crtc); i++) {
5d374d96
DL
3475 skl_ddb_entry_write(dev_priv,
3476 PLANE_BUF_CFG(pipe, i),
3477 &new->ddb.plane[pipe][i]);
2cd601c6
CK
3478 skl_ddb_entry_write(dev_priv,
3479 PLANE_NV12_BUF_CFG(pipe, i),
3480 &new->ddb.y_plane[pipe][i]);
3481 }
5d374d96
DL
3482
3483 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3484 &new->ddb.cursor[pipe]);
2d41c0b5 3485 }
2d41c0b5
PB
3486}
3487
0e8fb7ba
DL
3488/*
3489 * When setting up a new DDB allocation arrangement, we need to correctly
3490 * sequence the times at which the new allocations for the pipes are taken into
3491 * account or we'll have pipes fetching from space previously allocated to
3492 * another pipe.
3493 *
3494 * Roughly the sequence looks like:
3495 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3496 * overlapping with a previous light-up pipe (another way to put it is:
3497 * pipes with their new allocation strickly included into their old ones).
3498 * 2. re-allocate the other pipes that get their allocation reduced
3499 * 3. allocate the pipes having their allocation increased
3500 *
3501 * Steps 1. and 2. are here to take care of the following case:
3502 * - Initially DDB looks like this:
3503 * | B | C |
3504 * - enable pipe A.
3505 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3506 * allocation
3507 * | A | B | C |
3508 *
3509 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3510 */
3511
d21b795c
DL
3512static void
3513skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
0e8fb7ba 3514{
0e8fb7ba
DL
3515 int plane;
3516
d21b795c
DL
3517 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3518
dd740780 3519 for_each_plane(dev_priv, pipe, plane) {
0e8fb7ba
DL
3520 I915_WRITE(PLANE_SURF(pipe, plane),
3521 I915_READ(PLANE_SURF(pipe, plane)));
3522 }
3523 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3524}
3525
3526static bool
3527skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3528 const struct skl_ddb_allocation *new,
3529 enum pipe pipe)
3530{
3531 uint16_t old_size, new_size;
3532
3533 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3534 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3535
3536 return old_size != new_size &&
3537 new->pipe[pipe].start >= old->pipe[pipe].start &&
3538 new->pipe[pipe].end <= old->pipe[pipe].end;
3539}
3540
3541static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3542 struct skl_wm_values *new_values)
3543{
3544 struct drm_device *dev = dev_priv->dev;
3545 struct skl_ddb_allocation *cur_ddb, *new_ddb;
c929cb45 3546 bool reallocated[I915_MAX_PIPES] = {};
0e8fb7ba
DL
3547 struct intel_crtc *crtc;
3548 enum pipe pipe;
3549
3550 new_ddb = &new_values->ddb;
3551 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3552
3553 /*
3554 * First pass: flush the pipes with the new allocation contained into
3555 * the old space.
3556 *
3557 * We'll wait for the vblank on those pipes to ensure we can safely
3558 * re-allocate the freed space without this pipe fetching from it.
3559 */
3560 for_each_intel_crtc(dev, crtc) {
3561 if (!crtc->active)
3562 continue;
3563
3564 pipe = crtc->pipe;
3565
3566 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3567 continue;
3568
d21b795c 3569 skl_wm_flush_pipe(dev_priv, pipe, 1);
0e8fb7ba
DL
3570 intel_wait_for_vblank(dev, pipe);
3571
3572 reallocated[pipe] = true;
3573 }
3574
3575
3576 /*
3577 * Second pass: flush the pipes that are having their allocation
3578 * reduced, but overlapping with a previous allocation.
3579 *
3580 * Here as well we need to wait for the vblank to make sure the freed
3581 * space is not used anymore.
3582 */
3583 for_each_intel_crtc(dev, crtc) {
3584 if (!crtc->active)
3585 continue;
3586
3587 pipe = crtc->pipe;
3588
3589 if (reallocated[pipe])
3590 continue;
3591
3592 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3593 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
d21b795c 3594 skl_wm_flush_pipe(dev_priv, pipe, 2);
0e8fb7ba 3595 intel_wait_for_vblank(dev, pipe);
d9d8e6b3 3596 reallocated[pipe] = true;
0e8fb7ba 3597 }
0e8fb7ba
DL
3598 }
3599
3600 /*
3601 * Third pass: flush the pipes that got more space allocated.
3602 *
3603 * We don't need to actively wait for the update here, next vblank
3604 * will just get more DDB space with the correct WM values.
3605 */
3606 for_each_intel_crtc(dev, crtc) {
3607 if (!crtc->active)
3608 continue;
3609
3610 pipe = crtc->pipe;
3611
3612 /*
3613 * At this point, only the pipes more space than before are
3614 * left to re-allocate.
3615 */
3616 if (reallocated[pipe])
3617 continue;
3618
d21b795c 3619 skl_wm_flush_pipe(dev_priv, pipe, 3);
0e8fb7ba
DL
3620 }
3621}
3622
2d41c0b5
PB
3623static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3624 struct skl_pipe_wm_parameters *params,
3625 struct intel_wm_config *config,
3626 struct skl_ddb_allocation *ddb, /* out */
3627 struct skl_pipe_wm *pipe_wm /* out */)
3628{
3629 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3630
3631 skl_compute_wm_pipe_parameters(crtc, params);
b9cec075 3632 skl_allocate_pipe_ddb(crtc, config, params, ddb);
2d41c0b5
PB
3633 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3634
3635 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3636 return false;
3637
3638 intel_crtc->wm.skl_active = *pipe_wm;
2cd601c6 3639
2d41c0b5
PB
3640 return true;
3641}
3642
3643static void skl_update_other_pipe_wm(struct drm_device *dev,
3644 struct drm_crtc *crtc,
3645 struct intel_wm_config *config,
3646 struct skl_wm_values *r)
3647{
3648 struct intel_crtc *intel_crtc;
3649 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3650
3651 /*
3652 * If the WM update hasn't changed the allocation for this_crtc (the
3653 * crtc we are currently computing the new WM values for), other
3654 * enabled crtcs will keep the same allocation and we don't need to
3655 * recompute anything for them.
3656 */
3657 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3658 return;
3659
3660 /*
3661 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3662 * other active pipes need new DDB allocation and WM values.
3663 */
3664 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3665 base.head) {
3666 struct skl_pipe_wm_parameters params = {};
3667 struct skl_pipe_wm pipe_wm = {};
3668 bool wm_changed;
3669
3670 if (this_crtc->pipe == intel_crtc->pipe)
3671 continue;
3672
3673 if (!intel_crtc->active)
3674 continue;
3675
3676 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3677 &params, config,
3678 &r->ddb, &pipe_wm);
3679
3680 /*
3681 * If we end up re-computing the other pipe WM values, it's
3682 * because it was really needed, so we expect the WM values to
3683 * be different.
3684 */
3685 WARN_ON(!wm_changed);
3686
3687 skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
3688 r->dirty[intel_crtc->pipe] = true;
3689 }
3690}
3691
adda50b8
BP
3692static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
3693{
3694 watermarks->wm_linetime[pipe] = 0;
3695 memset(watermarks->plane[pipe], 0,
3696 sizeof(uint32_t) * 8 * I915_MAX_PLANES);
3697 memset(watermarks->cursor[pipe], 0, sizeof(uint32_t) * 8);
3698 memset(watermarks->plane_trans[pipe],
3699 0, sizeof(uint32_t) * I915_MAX_PLANES);
3700 watermarks->cursor_trans[pipe] = 0;
3701
3702 /* Clear ddb entries for pipe */
3703 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
3704 memset(&watermarks->ddb.plane[pipe], 0,
3705 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3706 memset(&watermarks->ddb.y_plane[pipe], 0,
3707 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3708 memset(&watermarks->ddb.cursor[pipe], 0, sizeof(struct skl_ddb_entry));
3709
3710}
3711
2d41c0b5
PB
3712static void skl_update_wm(struct drm_crtc *crtc)
3713{
3714 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3715 struct drm_device *dev = crtc->dev;
3716 struct drm_i915_private *dev_priv = dev->dev_private;
3717 struct skl_pipe_wm_parameters params = {};
3718 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3719 struct skl_pipe_wm pipe_wm = {};
3720 struct intel_wm_config config = {};
3721
adda50b8
BP
3722
3723 /* Clear all dirty flags */
3724 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3725
3726 skl_clear_wm(results, intel_crtc->pipe);
2d41c0b5
PB
3727
3728 skl_compute_wm_global_parameters(dev, &config);
3729
3730 if (!skl_update_pipe_wm(crtc, &params, &config,
3731 &results->ddb, &pipe_wm))
3732 return;
3733
3734 skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
3735 results->dirty[intel_crtc->pipe] = true;
3736
3737 skl_update_other_pipe_wm(dev, crtc, &config, results);
3738 skl_write_wm_values(dev_priv, results);
0e8fb7ba 3739 skl_flush_wm_values(dev_priv, results);
53b0deb4
DL
3740
3741 /* store the new configuration */
3742 dev_priv->wm.skl_hw = *results;
2d41c0b5
PB
3743}
3744
3745static void
3746skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3747 uint32_t sprite_width, uint32_t sprite_height,
3748 int pixel_size, bool enabled, bool scaled)
3749{
3750 struct intel_plane *intel_plane = to_intel_plane(plane);
0fda6568 3751 struct drm_framebuffer *fb = plane->state->fb;
2d41c0b5
PB
3752
3753 intel_plane->wm.enabled = enabled;
3754 intel_plane->wm.scaled = scaled;
3755 intel_plane->wm.horiz_pixels = sprite_width;
3756 intel_plane->wm.vert_pixels = sprite_height;
0fda6568 3757 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
2cd601c6
CK
3758
3759 /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
3760 intel_plane->wm.bytes_per_pixel =
3761 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3762 drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
3763 intel_plane->wm.y_bytes_per_pixel =
3764 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3765 drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
3766
0fda6568
TU
3767 /*
3768 * Framebuffer can be NULL on plane disable, but it does not
3769 * matter for watermarks if we assume no tiling in that case.
3770 */
3771 if (fb)
3772 intel_plane->wm.tiling = fb->modifier[0];
1fc0a8f7 3773 intel_plane->wm.rotation = plane->state->rotation;
2d41c0b5
PB
3774
3775 skl_update_wm(crtc);
3776}
3777
820c1980 3778static void ilk_update_wm(struct drm_crtc *crtc)
801bcfff 3779{
7c4a395f 3780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
46ba614c 3781 struct drm_device *dev = crtc->dev;
801bcfff 3782 struct drm_i915_private *dev_priv = dev->dev_private;
820c1980
ID
3783 struct ilk_wm_maximums max;
3784 struct ilk_pipe_wm_parameters params = {};
3785 struct ilk_wm_values results = {};
77c122bc 3786 enum intel_ddb_partitioning partitioning;
7c4a395f 3787 struct intel_pipe_wm pipe_wm = {};
198a1e9b 3788 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
a485bfb8 3789 struct intel_wm_config config = {};
7c4a395f 3790
2a44b76b 3791 ilk_compute_wm_parameters(crtc, &params);
7c4a395f
VS
3792
3793 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
3794
3795 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3796 return;
861f3389 3797
7c4a395f 3798 intel_crtc->wm.active = pipe_wm;
861f3389 3799
2a44b76b
VS
3800 ilk_compute_wm_config(dev, &config);
3801
34982fe1 3802 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
0ba22e26 3803 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
a485bfb8
VS
3804
3805 /* 5/6 split only in single pipe config on IVB+ */
ec98c8d1
VS
3806 if (INTEL_INFO(dev)->gen >= 7 &&
3807 config.num_pipes_active == 1 && config.sprites_enabled) {
34982fe1 3808 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
0ba22e26 3809 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
0362c781 3810
820c1980 3811 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
861f3389 3812 } else {
198a1e9b 3813 best_lp_wm = &lp_wm_1_2;
861f3389
PZ
3814 }
3815
198a1e9b 3816 partitioning = (best_lp_wm == &lp_wm_1_2) ?
77c122bc 3817 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
801bcfff 3818
820c1980 3819 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
609cedef 3820
820c1980 3821 ilk_write_wm_values(dev_priv, &results);
1011d8c4
PZ
3822}
3823
ed57cb8a
DL
3824static void
3825ilk_update_sprite_wm(struct drm_plane *plane,
3826 struct drm_crtc *crtc,
3827 uint32_t sprite_width, uint32_t sprite_height,
3828 int pixel_size, bool enabled, bool scaled)
526682e9 3829{
8553c18e 3830 struct drm_device *dev = plane->dev;
adf3d35e 3831 struct intel_plane *intel_plane = to_intel_plane(plane);
526682e9 3832
adf3d35e
VS
3833 intel_plane->wm.enabled = enabled;
3834 intel_plane->wm.scaled = scaled;
3835 intel_plane->wm.horiz_pixels = sprite_width;
ed57cb8a 3836 intel_plane->wm.vert_pixels = sprite_width;
adf3d35e 3837 intel_plane->wm.bytes_per_pixel = pixel_size;
526682e9 3838
8553c18e
VS
3839 /*
3840 * IVB workaround: must disable low power watermarks for at least
3841 * one frame before enabling scaling. LP watermarks can be re-enabled
3842 * when scaling is disabled.
3843 *
3844 * WaCxSRDisabledForSpriteScaling:ivb
3845 */
3846 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
3847 intel_wait_for_vblank(dev, intel_plane->pipe);
3848
820c1980 3849 ilk_update_wm(crtc);
526682e9
PZ
3850}
3851
3078999f
PB
3852static void skl_pipe_wm_active_state(uint32_t val,
3853 struct skl_pipe_wm *active,
3854 bool is_transwm,
3855 bool is_cursor,
3856 int i,
3857 int level)
3858{
3859 bool is_enabled = (val & PLANE_WM_EN) != 0;
3860
3861 if (!is_transwm) {
3862 if (!is_cursor) {
3863 active->wm[level].plane_en[i] = is_enabled;
3864 active->wm[level].plane_res_b[i] =
3865 val & PLANE_WM_BLOCKS_MASK;
3866 active->wm[level].plane_res_l[i] =
3867 (val >> PLANE_WM_LINES_SHIFT) &
3868 PLANE_WM_LINES_MASK;
3869 } else {
3870 active->wm[level].cursor_en = is_enabled;
3871 active->wm[level].cursor_res_b =
3872 val & PLANE_WM_BLOCKS_MASK;
3873 active->wm[level].cursor_res_l =
3874 (val >> PLANE_WM_LINES_SHIFT) &
3875 PLANE_WM_LINES_MASK;
3876 }
3877 } else {
3878 if (!is_cursor) {
3879 active->trans_wm.plane_en[i] = is_enabled;
3880 active->trans_wm.plane_res_b[i] =
3881 val & PLANE_WM_BLOCKS_MASK;
3882 active->trans_wm.plane_res_l[i] =
3883 (val >> PLANE_WM_LINES_SHIFT) &
3884 PLANE_WM_LINES_MASK;
3885 } else {
3886 active->trans_wm.cursor_en = is_enabled;
3887 active->trans_wm.cursor_res_b =
3888 val & PLANE_WM_BLOCKS_MASK;
3889 active->trans_wm.cursor_res_l =
3890 (val >> PLANE_WM_LINES_SHIFT) &
3891 PLANE_WM_LINES_MASK;
3892 }
3893 }
3894}
3895
3896static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3897{
3898 struct drm_device *dev = crtc->dev;
3899 struct drm_i915_private *dev_priv = dev->dev_private;
3900 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3901 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3902 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3903 enum pipe pipe = intel_crtc->pipe;
3904 int level, i, max_level;
3905 uint32_t temp;
3906
3907 max_level = ilk_wm_max_level(dev);
3908
3909 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3910
3911 for (level = 0; level <= max_level; level++) {
3912 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3913 hw->plane[pipe][i][level] =
3914 I915_READ(PLANE_WM(pipe, i, level));
3915 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
3916 }
3917
3918 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3919 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3920 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
3921
3ef00284 3922 if (!intel_crtc->active)
3078999f
PB
3923 return;
3924
3925 hw->dirty[pipe] = true;
3926
3927 active->linetime = hw->wm_linetime[pipe];
3928
3929 for (level = 0; level <= max_level; level++) {
3930 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3931 temp = hw->plane[pipe][i][level];
3932 skl_pipe_wm_active_state(temp, active, false,
3933 false, i, level);
3934 }
3935 temp = hw->cursor[pipe][level];
3936 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3937 }
3938
3939 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3940 temp = hw->plane_trans[pipe][i];
3941 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3942 }
3943
3944 temp = hw->cursor_trans[pipe];
3945 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3946}
3947
3948void skl_wm_get_hw_state(struct drm_device *dev)
3949{
a269c583
DL
3950 struct drm_i915_private *dev_priv = dev->dev_private;
3951 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3078999f
PB
3952 struct drm_crtc *crtc;
3953
a269c583 3954 skl_ddb_get_hw_state(dev_priv, ddb);
3078999f
PB
3955 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3956 skl_pipe_wm_get_hw_state(crtc);
3957}
3958
243e6a44
VS
3959static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3960{
3961 struct drm_device *dev = crtc->dev;
3962 struct drm_i915_private *dev_priv = dev->dev_private;
820c1980 3963 struct ilk_wm_values *hw = &dev_priv->wm.hw;
243e6a44
VS
3964 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3965 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3966 enum pipe pipe = intel_crtc->pipe;
3967 static const unsigned int wm0_pipe_reg[] = {
3968 [PIPE_A] = WM0_PIPEA_ILK,
3969 [PIPE_B] = WM0_PIPEB_ILK,
3970 [PIPE_C] = WM0_PIPEC_IVB,
3971 };
3972
3973 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
a42a5719 3974 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ce0e0713 3975 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
243e6a44 3976
3ef00284 3977 active->pipe_enabled = intel_crtc->active;
2a44b76b
VS
3978
3979 if (active->pipe_enabled) {
243e6a44
VS
3980 u32 tmp = hw->wm_pipe[pipe];
3981
3982 /*
3983 * For active pipes LP0 watermark is marked as
3984 * enabled, and LP1+ watermaks as disabled since
3985 * we can't really reverse compute them in case
3986 * multiple pipes are active.
3987 */
3988 active->wm[0].enable = true;
3989 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3990 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3991 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3992 active->linetime = hw->wm_linetime[pipe];
3993 } else {
3994 int level, max_level = ilk_wm_max_level(dev);
3995
3996 /*
3997 * For inactive pipes, all watermark levels
3998 * should be marked as enabled but zeroed,
3999 * which is what we'd compute them to.
4000 */
4001 for (level = 0; level <= max_level; level++)
4002 active->wm[level].enable = true;
4003 }
4004}
4005
6eb1a681
VS
4006#define _FW_WM(value, plane) \
4007 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4008#define _FW_WM_VLV(value, plane) \
4009 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4010
4011static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4012 struct vlv_wm_values *wm)
4013{
4014 enum pipe pipe;
4015 uint32_t tmp;
4016
4017 for_each_pipe(dev_priv, pipe) {
4018 tmp = I915_READ(VLV_DDL(pipe));
4019
4020 wm->ddl[pipe].primary =
4021 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4022 wm->ddl[pipe].cursor =
4023 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4024 wm->ddl[pipe].sprite[0] =
4025 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4026 wm->ddl[pipe].sprite[1] =
4027 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4028 }
4029
4030 tmp = I915_READ(DSPFW1);
4031 wm->sr.plane = _FW_WM(tmp, SR);
4032 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
4033 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
4034 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
4035
4036 tmp = I915_READ(DSPFW2);
4037 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
4038 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
4039 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
4040
4041 tmp = I915_READ(DSPFW3);
4042 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4043
4044 if (IS_CHERRYVIEW(dev_priv)) {
4045 tmp = I915_READ(DSPFW7_CHV);
4046 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4047 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4048
4049 tmp = I915_READ(DSPFW8_CHV);
4050 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4051 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4052
4053 tmp = I915_READ(DSPFW9_CHV);
4054 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4055 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4056
4057 tmp = I915_READ(DSPHOWM);
4058 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4059 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4060 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4061 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4062 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4063 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4064 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4065 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4066 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4067 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4068 } else {
4069 tmp = I915_READ(DSPFW7);
4070 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4071 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4072
4073 tmp = I915_READ(DSPHOWM);
4074 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4075 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4076 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4077 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4078 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4079 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4080 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4081 }
4082}
4083
4084#undef _FW_WM
4085#undef _FW_WM_VLV
4086
4087void vlv_wm_get_hw_state(struct drm_device *dev)
4088{
4089 struct drm_i915_private *dev_priv = to_i915(dev);
4090 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4091 struct intel_plane *plane;
4092 enum pipe pipe;
4093 u32 val;
4094
4095 vlv_read_wm_values(dev_priv, wm);
4096
4097 for_each_intel_plane(dev, plane) {
4098 switch (plane->base.type) {
4099 int sprite;
4100 case DRM_PLANE_TYPE_CURSOR:
4101 plane->wm.fifo_size = 63;
4102 break;
4103 case DRM_PLANE_TYPE_PRIMARY:
4104 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4105 break;
4106 case DRM_PLANE_TYPE_OVERLAY:
4107 sprite = plane->plane;
4108 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4109 break;
4110 }
4111 }
4112
4113 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4114 wm->level = VLV_WM_LEVEL_PM2;
4115
4116 if (IS_CHERRYVIEW(dev_priv)) {
4117 mutex_lock(&dev_priv->rps.hw_lock);
4118
4119 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4120 if (val & DSP_MAXFIFO_PM5_ENABLE)
4121 wm->level = VLV_WM_LEVEL_PM5;
4122
4123 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4124 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4125 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4126
4127 mutex_unlock(&dev_priv->rps.hw_lock);
4128 }
4129
4130 for_each_pipe(dev_priv, pipe)
4131 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4132 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4133 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4134
4135 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4136 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4137}
4138
243e6a44
VS
4139void ilk_wm_get_hw_state(struct drm_device *dev)
4140{
4141 struct drm_i915_private *dev_priv = dev->dev_private;
820c1980 4142 struct ilk_wm_values *hw = &dev_priv->wm.hw;
243e6a44
VS
4143 struct drm_crtc *crtc;
4144
70e1e0ec 4145 for_each_crtc(dev, crtc)
243e6a44
VS
4146 ilk_pipe_wm_get_hw_state(crtc);
4147
4148 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4149 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4150 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4151
4152 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
cfa7698b
VS
4153 if (INTEL_INFO(dev)->gen >= 7) {
4154 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4155 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4156 }
243e6a44 4157
a42a5719 4158 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ac9545fd
VS
4159 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4160 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4161 else if (IS_IVYBRIDGE(dev))
4162 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4163 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
243e6a44
VS
4164
4165 hw->enable_fbc_wm =
4166 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4167}
4168
b445e3b0
ED
4169/**
4170 * intel_update_watermarks - update FIFO watermark values based on current modes
4171 *
4172 * Calculate watermark values for the various WM regs based on current mode
4173 * and plane configuration.
4174 *
4175 * There are several cases to deal with here:
4176 * - normal (i.e. non-self-refresh)
4177 * - self-refresh (SR) mode
4178 * - lines are large relative to FIFO size (buffer can hold up to 2)
4179 * - lines are small relative to FIFO size (buffer can hold more than 2
4180 * lines), so need to account for TLB latency
4181 *
4182 * The normal calculation is:
4183 * watermark = dotclock * bytes per pixel * latency
4184 * where latency is platform & configuration dependent (we assume pessimal
4185 * values here).
4186 *
4187 * The SR calculation is:
4188 * watermark = (trunc(latency/line time)+1) * surface width *
4189 * bytes per pixel
4190 * where
4191 * line time = htotal / dotclock
4192 * surface width = hdisplay for normal plane and 64 for cursor
4193 * and latency is assumed to be high, as above.
4194 *
4195 * The final value programmed to the register should always be rounded up,
4196 * and include an extra 2 entries to account for clock crossings.
4197 *
4198 * We don't use the sprite, so we can ignore that. And on Crestline we have
4199 * to set the non-SR watermarks to 8.
4200 */
46ba614c 4201void intel_update_watermarks(struct drm_crtc *crtc)
b445e3b0 4202{
46ba614c 4203 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
b445e3b0
ED
4204
4205 if (dev_priv->display.update_wm)
46ba614c 4206 dev_priv->display.update_wm(crtc);
b445e3b0
ED
4207}
4208
adf3d35e
VS
4209void intel_update_sprite_watermarks(struct drm_plane *plane,
4210 struct drm_crtc *crtc,
ed57cb8a
DL
4211 uint32_t sprite_width,
4212 uint32_t sprite_height,
4213 int pixel_size,
39db4a4d 4214 bool enabled, bool scaled)
b445e3b0 4215{
adf3d35e 4216 struct drm_i915_private *dev_priv = plane->dev->dev_private;
b445e3b0
ED
4217
4218 if (dev_priv->display.update_sprite_wm)
ed57cb8a
DL
4219 dev_priv->display.update_sprite_wm(plane, crtc,
4220 sprite_width, sprite_height,
39db4a4d 4221 pixel_size, enabled, scaled);
b445e3b0
ED
4222}
4223
9270388e
DV
4224/**
4225 * Lock protecting IPS related data structures
9270388e
DV
4226 */
4227DEFINE_SPINLOCK(mchdev_lock);
4228
4229/* Global for IPS driver to get at the current i915 device. Protected by
4230 * mchdev_lock. */
4231static struct drm_i915_private *i915_mch_dev;
4232
2b4e57bd
ED
4233bool ironlake_set_drps(struct drm_device *dev, u8 val)
4234{
4235 struct drm_i915_private *dev_priv = dev->dev_private;
4236 u16 rgvswctl;
4237
9270388e
DV
4238 assert_spin_locked(&mchdev_lock);
4239
2b4e57bd
ED
4240 rgvswctl = I915_READ16(MEMSWCTL);
4241 if (rgvswctl & MEMCTL_CMD_STS) {
4242 DRM_DEBUG("gpu busy, RCS change rejected\n");
4243 return false; /* still busy with another command */
4244 }
4245
4246 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4247 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4248 I915_WRITE16(MEMSWCTL, rgvswctl);
4249 POSTING_READ16(MEMSWCTL);
4250
4251 rgvswctl |= MEMCTL_CMD_STS;
4252 I915_WRITE16(MEMSWCTL, rgvswctl);
4253
4254 return true;
4255}
4256
8090c6b9 4257static void ironlake_enable_drps(struct drm_device *dev)
2b4e57bd
ED
4258{
4259 struct drm_i915_private *dev_priv = dev->dev_private;
4260 u32 rgvmodectl = I915_READ(MEMMODECTL);
4261 u8 fmax, fmin, fstart, vstart;
4262
9270388e
DV
4263 spin_lock_irq(&mchdev_lock);
4264
2b4e57bd
ED
4265 /* Enable temp reporting */
4266 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4267 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4268
4269 /* 100ms RC evaluation intervals */
4270 I915_WRITE(RCUPEI, 100000);
4271 I915_WRITE(RCDNEI, 100000);
4272
4273 /* Set max/min thresholds to 90ms and 80ms respectively */
4274 I915_WRITE(RCBMAXAVG, 90000);
4275 I915_WRITE(RCBMINAVG, 80000);
4276
4277 I915_WRITE(MEMIHYST, 1);
4278
4279 /* Set up min, max, and cur for interrupt handling */
4280 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4281 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4282 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4283 MEMMODE_FSTART_SHIFT;
4284
4285 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4286 PXVFREQ_PX_SHIFT;
4287
20e4d407
DV
4288 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4289 dev_priv->ips.fstart = fstart;
2b4e57bd 4290
20e4d407
DV
4291 dev_priv->ips.max_delay = fstart;
4292 dev_priv->ips.min_delay = fmin;
4293 dev_priv->ips.cur_delay = fstart;
2b4e57bd
ED
4294
4295 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4296 fmax, fmin, fstart);
4297
4298 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4299
4300 /*
4301 * Interrupts will be enabled in ironlake_irq_postinstall
4302 */
4303
4304 I915_WRITE(VIDSTART, vstart);
4305 POSTING_READ(VIDSTART);
4306
4307 rgvmodectl |= MEMMODE_SWMODE_EN;
4308 I915_WRITE(MEMMODECTL, rgvmodectl);
4309
9270388e 4310 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2b4e57bd 4311 DRM_ERROR("stuck trying to change perf mode\n");
dd92d8de 4312 mdelay(1);
2b4e57bd
ED
4313
4314 ironlake_set_drps(dev, fstart);
4315
20e4d407 4316 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2b4e57bd 4317 I915_READ(0x112e0);
20e4d407
DV
4318 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4319 dev_priv->ips.last_count2 = I915_READ(0x112f4);
5ed0bdf2 4320 dev_priv->ips.last_time2 = ktime_get_raw_ns();
9270388e
DV
4321
4322 spin_unlock_irq(&mchdev_lock);
2b4e57bd
ED
4323}
4324
8090c6b9 4325static void ironlake_disable_drps(struct drm_device *dev)
2b4e57bd
ED
4326{
4327 struct drm_i915_private *dev_priv = dev->dev_private;
9270388e
DV
4328 u16 rgvswctl;
4329
4330 spin_lock_irq(&mchdev_lock);
4331
4332 rgvswctl = I915_READ16(MEMSWCTL);
2b4e57bd
ED
4333
4334 /* Ack interrupts, disable EFC interrupt */
4335 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4336 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4337 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4338 I915_WRITE(DEIIR, DE_PCU_EVENT);
4339 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4340
4341 /* Go back to the starting frequency */
20e4d407 4342 ironlake_set_drps(dev, dev_priv->ips.fstart);
dd92d8de 4343 mdelay(1);
2b4e57bd
ED
4344 rgvswctl |= MEMCTL_CMD_STS;
4345 I915_WRITE(MEMSWCTL, rgvswctl);
dd92d8de 4346 mdelay(1);
2b4e57bd 4347
9270388e 4348 spin_unlock_irq(&mchdev_lock);
2b4e57bd
ED
4349}
4350
acbe9475
DV
4351/* There's a funny hw issue where the hw returns all 0 when reading from
4352 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4353 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4354 * all limits and the gpu stuck at whatever frequency it is at atm).
4355 */
74ef1173 4356static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
2b4e57bd 4357{
7b9e0ae6 4358 u32 limits;
2b4e57bd 4359
20b46e59
DV
4360 /* Only set the down limit when we've reached the lowest level to avoid
4361 * getting more interrupts, otherwise leave this clear. This prevents a
4362 * race in the hw when coming out of rc6: There's a tiny window where
4363 * the hw runs at the minimal clock before selecting the desired
4364 * frequency, if the down threshold expires in that window we will not
4365 * receive a down interrupt. */
74ef1173
AG
4366 if (IS_GEN9(dev_priv->dev)) {
4367 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4368 if (val <= dev_priv->rps.min_freq_softlimit)
4369 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4370 } else {
4371 limits = dev_priv->rps.max_freq_softlimit << 24;
4372 if (val <= dev_priv->rps.min_freq_softlimit)
4373 limits |= dev_priv->rps.min_freq_softlimit << 16;
4374 }
20b46e59
DV
4375
4376 return limits;
4377}
4378
dd75fdc8
CW
4379static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4380{
4381 int new_power;
8a586437
AG
4382 u32 threshold_up = 0, threshold_down = 0; /* in % */
4383 u32 ei_up = 0, ei_down = 0;
dd75fdc8
CW
4384
4385 new_power = dev_priv->rps.power;
4386 switch (dev_priv->rps.power) {
4387 case LOW_POWER:
b39fb297 4388 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
dd75fdc8
CW
4389 new_power = BETWEEN;
4390 break;
4391
4392 case BETWEEN:
b39fb297 4393 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
dd75fdc8 4394 new_power = LOW_POWER;
b39fb297 4395 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
dd75fdc8
CW
4396 new_power = HIGH_POWER;
4397 break;
4398
4399 case HIGH_POWER:
b39fb297 4400 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
dd75fdc8
CW
4401 new_power = BETWEEN;
4402 break;
4403 }
4404 /* Max/min bins are special */
aed242ff 4405 if (val <= dev_priv->rps.min_freq_softlimit)
dd75fdc8 4406 new_power = LOW_POWER;
aed242ff 4407 if (val >= dev_priv->rps.max_freq_softlimit)
dd75fdc8
CW
4408 new_power = HIGH_POWER;
4409 if (new_power == dev_priv->rps.power)
4410 return;
4411
4412 /* Note the units here are not exactly 1us, but 1280ns. */
4413 switch (new_power) {
4414 case LOW_POWER:
4415 /* Upclock if more than 95% busy over 16ms */
8a586437
AG
4416 ei_up = 16000;
4417 threshold_up = 95;
dd75fdc8
CW
4418
4419 /* Downclock if less than 85% busy over 32ms */
8a586437
AG
4420 ei_down = 32000;
4421 threshold_down = 85;
dd75fdc8
CW
4422 break;
4423
4424 case BETWEEN:
4425 /* Upclock if more than 90% busy over 13ms */
8a586437
AG
4426 ei_up = 13000;
4427 threshold_up = 90;
dd75fdc8
CW
4428
4429 /* Downclock if less than 75% busy over 32ms */
8a586437
AG
4430 ei_down = 32000;
4431 threshold_down = 75;
dd75fdc8
CW
4432 break;
4433
4434 case HIGH_POWER:
4435 /* Upclock if more than 85% busy over 10ms */
8a586437
AG
4436 ei_up = 10000;
4437 threshold_up = 85;
dd75fdc8
CW
4438
4439 /* Downclock if less than 60% busy over 32ms */
8a586437
AG
4440 ei_down = 32000;
4441 threshold_down = 60;
dd75fdc8
CW
4442 break;
4443 }
4444
8a586437
AG
4445 I915_WRITE(GEN6_RP_UP_EI,
4446 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4447 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4448 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
4449
4450 I915_WRITE(GEN6_RP_DOWN_EI,
4451 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4452 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4453 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4454
4455 I915_WRITE(GEN6_RP_CONTROL,
4456 GEN6_RP_MEDIA_TURBO |
4457 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4458 GEN6_RP_MEDIA_IS_GFX |
4459 GEN6_RP_ENABLE |
4460 GEN6_RP_UP_BUSY_AVG |
4461 GEN6_RP_DOWN_IDLE_AVG);
4462
dd75fdc8 4463 dev_priv->rps.power = new_power;
8fb55197
CW
4464 dev_priv->rps.up_threshold = threshold_up;
4465 dev_priv->rps.down_threshold = threshold_down;
dd75fdc8
CW
4466 dev_priv->rps.last_adj = 0;
4467}
4468
2876ce73
CW
4469static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4470{
4471 u32 mask = 0;
4472
4473 if (val > dev_priv->rps.min_freq_softlimit)
6f4b12f8 4474 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
2876ce73 4475 if (val < dev_priv->rps.max_freq_softlimit)
6f4b12f8 4476 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
2876ce73 4477
7b3c29f6
CW
4478 mask &= dev_priv->pm_rps_events;
4479
59d02a1f 4480 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
2876ce73
CW
4481}
4482
b8a5ff8d
JM
4483/* gen6_set_rps is called to update the frequency request, but should also be
4484 * called when the range (min_delay and max_delay) is modified so that we can
4485 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
ffe02b40 4486static void gen6_set_rps(struct drm_device *dev, u8 val)
20b46e59
DV
4487{
4488 struct drm_i915_private *dev_priv = dev->dev_private;
7b9e0ae6 4489
23eafea6
SAK
4490 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4491 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
4492 return;
4493
4fc688ce 4494 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
aed242ff
CW
4495 WARN_ON(val > dev_priv->rps.max_freq);
4496 WARN_ON(val < dev_priv->rps.min_freq);
004777cb 4497
eb64cad1
CW
4498 /* min/max delay may still have been modified so be sure to
4499 * write the limits value.
4500 */
4501 if (val != dev_priv->rps.cur_freq) {
4502 gen6_set_rps_thresholds(dev_priv, val);
b8a5ff8d 4503
5704195c
AG
4504 if (IS_GEN9(dev))
4505 I915_WRITE(GEN6_RPNSWREQ,
4506 GEN9_FREQUENCY(val));
4507 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
eb64cad1
CW
4508 I915_WRITE(GEN6_RPNSWREQ,
4509 HSW_FREQUENCY(val));
4510 else
4511 I915_WRITE(GEN6_RPNSWREQ,
4512 GEN6_FREQUENCY(val) |
4513 GEN6_OFFSET(0) |
4514 GEN6_AGGRESSIVE_TURBO);
b8a5ff8d 4515 }
7b9e0ae6 4516
7b9e0ae6
CW
4517 /* Make sure we continue to get interrupts
4518 * until we hit the minimum or maximum frequencies.
4519 */
74ef1173 4520 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
2876ce73 4521 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
7b9e0ae6 4522
d5570a72
BW
4523 POSTING_READ(GEN6_RPNSWREQ);
4524
b39fb297 4525 dev_priv->rps.cur_freq = val;
be2cde9a 4526 trace_intel_gpu_freq_change(val * 50);
2b4e57bd
ED
4527}
4528
ffe02b40
VS
4529static void valleyview_set_rps(struct drm_device *dev, u8 val)
4530{
4531 struct drm_i915_private *dev_priv = dev->dev_private;
4532
4533 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
aed242ff
CW
4534 WARN_ON(val > dev_priv->rps.max_freq);
4535 WARN_ON(val < dev_priv->rps.min_freq);
ffe02b40
VS
4536
4537 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4538 "Odd GPU freq value\n"))
4539 val &= ~1;
4540
cd25dd5b
D
4541 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4542
8fb55197 4543 if (val != dev_priv->rps.cur_freq) {
ffe02b40 4544 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
8fb55197
CW
4545 if (!IS_CHERRYVIEW(dev_priv))
4546 gen6_set_rps_thresholds(dev_priv, val);
4547 }
ffe02b40 4548
ffe02b40
VS
4549 dev_priv->rps.cur_freq = val;
4550 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4551}
4552
a7f6e231 4553/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
76c3552f
D
4554 *
4555 * * If Gfx is Idle, then
a7f6e231
D
4556 * 1. Forcewake Media well.
4557 * 2. Request idle freq.
4558 * 3. Release Forcewake of Media well.
76c3552f
D
4559*/
4560static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4561{
aed242ff 4562 u32 val = dev_priv->rps.idle_freq;
5549d25f 4563
aed242ff 4564 if (dev_priv->rps.cur_freq <= val)
76c3552f
D
4565 return;
4566
a7f6e231
D
4567 /* Wake up the media well, as that takes a lot less
4568 * power than the Render well. */
4569 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4570 valleyview_set_rps(dev_priv->dev, val);
4571 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
76c3552f
D
4572}
4573
43cf3bf0
CW
4574void gen6_rps_busy(struct drm_i915_private *dev_priv)
4575{
4576 mutex_lock(&dev_priv->rps.hw_lock);
4577 if (dev_priv->rps.enabled) {
4578 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4579 gen6_rps_reset_ei(dev_priv);
4580 I915_WRITE(GEN6_PMINTRMSK,
4581 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4582 }
4583 mutex_unlock(&dev_priv->rps.hw_lock);
4584}
4585
b29c19b6
CW
4586void gen6_rps_idle(struct drm_i915_private *dev_priv)
4587{
691bb717
DL
4588 struct drm_device *dev = dev_priv->dev;
4589
b29c19b6 4590 mutex_lock(&dev_priv->rps.hw_lock);
c0951f0c 4591 if (dev_priv->rps.enabled) {
21a11fff 4592 if (IS_VALLEYVIEW(dev))
76c3552f 4593 vlv_set_rps_idle(dev_priv);
7526ed79 4594 else
aed242ff 4595 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
c0951f0c 4596 dev_priv->rps.last_adj = 0;
43cf3bf0 4597 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
c0951f0c 4598 }
8d3afd7d 4599 mutex_unlock(&dev_priv->rps.hw_lock);
1854d5ca 4600
8d3afd7d 4601 spin_lock(&dev_priv->rps.client_lock);
1854d5ca
CW
4602 while (!list_empty(&dev_priv->rps.clients))
4603 list_del_init(dev_priv->rps.clients.next);
8d3afd7d 4604 spin_unlock(&dev_priv->rps.client_lock);
b29c19b6
CW
4605}
4606
1854d5ca 4607void gen6_rps_boost(struct drm_i915_private *dev_priv,
e61b9958
CW
4608 struct intel_rps_client *rps,
4609 unsigned long submitted)
b29c19b6 4610{
8d3afd7d
CW
4611 /* This is intentionally racy! We peek at the state here, then
4612 * validate inside the RPS worker.
4613 */
4614 if (!(dev_priv->mm.busy &&
4615 dev_priv->rps.enabled &&
4616 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4617 return;
43cf3bf0 4618
e61b9958
CW
4619 /* Force a RPS boost (and don't count it against the client) if
4620 * the GPU is severely congested.
4621 */
d0bc54f2 4622 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
e61b9958
CW
4623 rps = NULL;
4624
8d3afd7d
CW
4625 spin_lock(&dev_priv->rps.client_lock);
4626 if (rps == NULL || list_empty(&rps->link)) {
4627 spin_lock_irq(&dev_priv->irq_lock);
4628 if (dev_priv->rps.interrupts_enabled) {
4629 dev_priv->rps.client_boost = true;
4630 queue_work(dev_priv->wq, &dev_priv->rps.work);
4631 }
4632 spin_unlock_irq(&dev_priv->irq_lock);
1854d5ca 4633
2e1b8730
CW
4634 if (rps != NULL) {
4635 list_add(&rps->link, &dev_priv->rps.clients);
4636 rps->boosts++;
1854d5ca
CW
4637 } else
4638 dev_priv->rps.boosts++;
c0951f0c 4639 }
8d3afd7d 4640 spin_unlock(&dev_priv->rps.client_lock);
b29c19b6
CW
4641}
4642
ffe02b40 4643void intel_set_rps(struct drm_device *dev, u8 val)
0a073b84 4644{
ffe02b40
VS
4645 if (IS_VALLEYVIEW(dev))
4646 valleyview_set_rps(dev, val);
4647 else
4648 gen6_set_rps(dev, val);
0a073b84
JB
4649}
4650
20e49366
ZW
4651static void gen9_disable_rps(struct drm_device *dev)
4652{
4653 struct drm_i915_private *dev_priv = dev->dev_private;
4654
4655 I915_WRITE(GEN6_RC_CONTROL, 0);
38c23527 4656 I915_WRITE(GEN9_PG_ENABLE, 0);
20e49366
ZW
4657}
4658
44fc7d5c 4659static void gen6_disable_rps(struct drm_device *dev)
d20d4f0c
JB
4660{
4661 struct drm_i915_private *dev_priv = dev->dev_private;
4662
4663 I915_WRITE(GEN6_RC_CONTROL, 0);
44fc7d5c 4664 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
44fc7d5c
DV
4665}
4666
38807746
D
4667static void cherryview_disable_rps(struct drm_device *dev)
4668{
4669 struct drm_i915_private *dev_priv = dev->dev_private;
4670
4671 I915_WRITE(GEN6_RC_CONTROL, 0);
4672}
4673
44fc7d5c
DV
4674static void valleyview_disable_rps(struct drm_device *dev)
4675{
4676 struct drm_i915_private *dev_priv = dev->dev_private;
4677
98a2e5f9
D
4678 /* we're doing forcewake before Disabling RC6,
4679 * This what the BIOS expects when going into suspend */
59bad947 4680 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
98a2e5f9 4681
44fc7d5c 4682 I915_WRITE(GEN6_RC_CONTROL, 0);
d20d4f0c 4683
59bad947 4684 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d20d4f0c
JB
4685}
4686
dc39fff7
BW
4687static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4688{
91ca689a
ID
4689 if (IS_VALLEYVIEW(dev)) {
4690 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4691 mode = GEN6_RC_CTL_RC6_ENABLE;
4692 else
4693 mode = 0;
4694 }
58abf1da
RV
4695 if (HAS_RC6p(dev))
4696 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4697 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
4698 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
4699 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
4700
4701 else
4702 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4703 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
dc39fff7
BW
4704}
4705
e6069ca8 4706static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
2b4e57bd 4707{
e7d66d89
DV
4708 /* No RC6 before Ironlake and code is gone for ilk. */
4709 if (INTEL_INFO(dev)->gen < 6)
e6069ca8
ID
4710 return 0;
4711
456470eb 4712 /* Respect the kernel parameter if it is set */
e6069ca8
ID
4713 if (enable_rc6 >= 0) {
4714 int mask;
4715
58abf1da 4716 if (HAS_RC6p(dev))
e6069ca8
ID
4717 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4718 INTEL_RC6pp_ENABLE;
4719 else
4720 mask = INTEL_RC6_ENABLE;
4721
4722 if ((enable_rc6 & mask) != enable_rc6)
8dfd1f04
DV
4723 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4724 enable_rc6 & mask, enable_rc6, mask);
e6069ca8
ID
4725
4726 return enable_rc6 & mask;
4727 }
2b4e57bd 4728
8bade1ad 4729 if (IS_IVYBRIDGE(dev))
cca84a1f 4730 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8bade1ad
BW
4731
4732 return INTEL_RC6_ENABLE;
2b4e57bd
ED
4733}
4734
e6069ca8
ID
4735int intel_enable_rc6(const struct drm_device *dev)
4736{
4737 return i915.enable_rc6;
4738}
4739
93ee2920 4740static void gen6_init_rps_frequencies(struct drm_device *dev)
3280e8b0 4741{
93ee2920
TR
4742 struct drm_i915_private *dev_priv = dev->dev_private;
4743 uint32_t rp_state_cap;
4744 u32 ddcc_status = 0;
4745 int ret;
4746
3280e8b0
BW
4747 /* All of these values are in units of 50MHz */
4748 dev_priv->rps.cur_freq = 0;
93ee2920 4749 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
35040562
BP
4750 if (IS_BROXTON(dev)) {
4751 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4752 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4753 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4754 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
4755 } else {
4756 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4757 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4758 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4759 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4760 }
4761
3280e8b0
BW
4762 /* hw_max = RP0 until we check for overclocking */
4763 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4764
93ee2920 4765 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
c5e0688c 4766 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
93ee2920
TR
4767 ret = sandybridge_pcode_read(dev_priv,
4768 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4769 &ddcc_status);
4770 if (0 == ret)
4771 dev_priv->rps.efficient_freq =
46efa4ab
TR
4772 clamp_t(u8,
4773 ((ddcc_status >> 8) & 0xff),
4774 dev_priv->rps.min_freq,
4775 dev_priv->rps.max_freq);
93ee2920
TR
4776 }
4777
c5e0688c
AG
4778 if (IS_SKYLAKE(dev)) {
4779 /* Store the frequency values in 16.66 MHZ units, which is
4780 the natural hardware unit for SKL */
4781 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4782 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4783 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4784 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
4785 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
4786 }
4787
aed242ff
CW
4788 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4789
3280e8b0
BW
4790 /* Preserve min/max settings in case of re-init */
4791 if (dev_priv->rps.max_freq_softlimit == 0)
4792 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4793
93ee2920
TR
4794 if (dev_priv->rps.min_freq_softlimit == 0) {
4795 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4796 dev_priv->rps.min_freq_softlimit =
813b5e69
VS
4797 max_t(int, dev_priv->rps.efficient_freq,
4798 intel_freq_opcode(dev_priv, 450));
93ee2920
TR
4799 else
4800 dev_priv->rps.min_freq_softlimit =
4801 dev_priv->rps.min_freq;
4802 }
3280e8b0
BW
4803}
4804
b6fef0ef 4805/* See the Gen9_GT_PM_Programming_Guide doc for the below */
20e49366 4806static void gen9_enable_rps(struct drm_device *dev)
b6fef0ef
JB
4807{
4808 struct drm_i915_private *dev_priv = dev->dev_private;
4809
4810 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4811
ba1c554c
DL
4812 gen6_init_rps_frequencies(dev);
4813
23eafea6
SAK
4814 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4815 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
4816 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4817 return;
4818 }
4819
0beb059a
AG
4820 /* Program defaults and thresholds for RPS*/
4821 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4822 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
4823
4824 /* 1 second timeout*/
4825 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4826 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4827
b6fef0ef 4828 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
b6fef0ef 4829
0beb059a
AG
4830 /* Leaning on the below call to gen6_set_rps to program/setup the
4831 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4832 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4833 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4834 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
b6fef0ef
JB
4835
4836 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4837}
4838
4839static void gen9_enable_rc6(struct drm_device *dev)
20e49366
ZW
4840{
4841 struct drm_i915_private *dev_priv = dev->dev_private;
4842 struct intel_engine_cs *ring;
4843 uint32_t rc6_mask = 0;
4844 int unused;
4845
4846 /* 1a: Software RC state - RC0 */
4847 I915_WRITE(GEN6_RC_STATE, 0);
4848
4849 /* 1b: Get forcewake during program sequence. Although the driver
4850 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 4851 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
20e49366
ZW
4852
4853 /* 2a: Disable RC states. */
4854 I915_WRITE(GEN6_RC_CONTROL, 0);
4855
4856 /* 2b: Program RC6 thresholds.*/
4857 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4858 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4859 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4860 for_each_ring(ring, dev_priv, unused)
4861 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4862 I915_WRITE(GEN6_RC_SLEEP, 0);
4863 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4864
38c23527
ZW
4865 /* 2c: Program Coarse Power Gating Policies. */
4866 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4867 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4868
20e49366
ZW
4869 /* 3a: Enable RC6 */
4870 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4871 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4872 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4873 "on" : "off");
4874 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4875 GEN6_RC_CTL_EI_MODE(1) |
4876 rc6_mask);
4877
cb07bae0
SK
4878 /*
4879 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
f2d2fe95 4880 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
cb07bae0 4881 */
f2d2fe95
SAK
4882 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
4883 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
4884 I915_WRITE(GEN9_PG_ENABLE, 0);
4885 else
4886 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4887 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
38c23527 4888
59bad947 4889 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
20e49366
ZW
4890
4891}
4892
6edee7f3
BW
4893static void gen8_enable_rps(struct drm_device *dev)
4894{
4895 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 4896 struct intel_engine_cs *ring;
93ee2920 4897 uint32_t rc6_mask = 0;
6edee7f3
BW
4898 int unused;
4899
4900 /* 1a: Software RC state - RC0 */
4901 I915_WRITE(GEN6_RC_STATE, 0);
4902
4903 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4904 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 4905 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6edee7f3
BW
4906
4907 /* 2a: Disable RC states. */
4908 I915_WRITE(GEN6_RC_CONTROL, 0);
4909
93ee2920
TR
4910 /* Initialize rps frequencies */
4911 gen6_init_rps_frequencies(dev);
6edee7f3
BW
4912
4913 /* 2b: Program RC6 thresholds.*/
4914 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4915 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4916 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4917 for_each_ring(ring, dev_priv, unused)
4918 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4919 I915_WRITE(GEN6_RC_SLEEP, 0);
0d68b25e
TR
4920 if (IS_BROADWELL(dev))
4921 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4922 else
4923 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
6edee7f3
BW
4924
4925 /* 3: Enable RC6 */
4926 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4927 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
abbf9d2c 4928 intel_print_rc6_info(dev, rc6_mask);
0d68b25e
TR
4929 if (IS_BROADWELL(dev))
4930 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4931 GEN7_RC_CTL_TO_MODE |
4932 rc6_mask);
4933 else
4934 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4935 GEN6_RC_CTL_EI_MODE(1) |
4936 rc6_mask);
6edee7f3
BW
4937
4938 /* 4 Program defaults and thresholds for RPS*/
f9bdc585
BW
4939 I915_WRITE(GEN6_RPNSWREQ,
4940 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4941 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4942 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
7526ed79
DV
4943 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4944 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4945
4946 /* Docs recommend 900MHz, and 300 MHz respectively */
4947 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4948 dev_priv->rps.max_freq_softlimit << 24 |
4949 dev_priv->rps.min_freq_softlimit << 16);
4950
4951 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4952 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4953 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4954 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
4955
4956 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6edee7f3
BW
4957
4958 /* 5: Enable RPS */
7526ed79
DV
4959 I915_WRITE(GEN6_RP_CONTROL,
4960 GEN6_RP_MEDIA_TURBO |
4961 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4962 GEN6_RP_MEDIA_IS_GFX |
4963 GEN6_RP_ENABLE |
4964 GEN6_RP_UP_BUSY_AVG |
4965 GEN6_RP_DOWN_IDLE_AVG);
4966
4967 /* 6: Ring frequency + overclocking (our driver does this later */
4968
c7f3153a 4969 dev_priv->rps.power = HIGH_POWER; /* force a reset */
aed242ff 4970 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
7526ed79 4971
59bad947 4972 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6edee7f3
BW
4973}
4974
79f5b2c7 4975static void gen6_enable_rps(struct drm_device *dev)
2b4e57bd 4976{
79f5b2c7 4977 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 4978 struct intel_engine_cs *ring;
d060c169 4979 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
2b4e57bd 4980 u32 gtfifodbg;
2b4e57bd 4981 int rc6_mode;
42c0526c 4982 int i, ret;
2b4e57bd 4983
4fc688ce 4984 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79f5b2c7 4985
2b4e57bd
ED
4986 /* Here begins a magic sequence of register writes to enable
4987 * auto-downclocking.
4988 *
4989 * Perhaps there might be some value in exposing these to
4990 * userspace...
4991 */
4992 I915_WRITE(GEN6_RC_STATE, 0);
2b4e57bd
ED
4993
4994 /* Clear the DBG now so we don't confuse earlier errors */
4995 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4996 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4997 I915_WRITE(GTFIFODBG, gtfifodbg);
4998 }
4999
59bad947 5000 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2b4e57bd 5001
93ee2920
TR
5002 /* Initialize rps frequencies */
5003 gen6_init_rps_frequencies(dev);
dd0a1aa1 5004
2b4e57bd
ED
5005 /* disable the counters and set deterministic thresholds */
5006 I915_WRITE(GEN6_RC_CONTROL, 0);
5007
5008 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5009 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5010 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5011 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5012 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5013
b4519513
CW
5014 for_each_ring(ring, dev_priv, i)
5015 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2b4e57bd
ED
5016
5017 I915_WRITE(GEN6_RC_SLEEP, 0);
5018 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
29c78f60 5019 if (IS_IVYBRIDGE(dev))
351aa566
SM
5020 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5021 else
5022 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
0920a487 5023 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
2b4e57bd
ED
5024 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5025
5a7dc92a 5026 /* Check if we are enabling RC6 */
2b4e57bd
ED
5027 rc6_mode = intel_enable_rc6(dev_priv->dev);
5028 if (rc6_mode & INTEL_RC6_ENABLE)
5029 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5030
5a7dc92a
ED
5031 /* We don't use those on Haswell */
5032 if (!IS_HASWELL(dev)) {
5033 if (rc6_mode & INTEL_RC6p_ENABLE)
5034 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2b4e57bd 5035
5a7dc92a
ED
5036 if (rc6_mode & INTEL_RC6pp_ENABLE)
5037 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5038 }
2b4e57bd 5039
dc39fff7 5040 intel_print_rc6_info(dev, rc6_mask);
2b4e57bd
ED
5041
5042 I915_WRITE(GEN6_RC_CONTROL,
5043 rc6_mask |
5044 GEN6_RC_CTL_EI_MODE(1) |
5045 GEN6_RC_CTL_HW_ENABLE);
5046
dd75fdc8
CW
5047 /* Power down if completely idle for over 50ms */
5048 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
2b4e57bd 5049 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2b4e57bd 5050
42c0526c 5051 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
d060c169 5052 if (ret)
42c0526c 5053 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
d060c169
BW
5054
5055 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
5056 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
5057 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
b39fb297 5058 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
d060c169 5059 (pcu_mbox & 0xff) * 50);
b39fb297 5060 dev_priv->rps.max_freq = pcu_mbox & 0xff;
2b4e57bd
ED
5061 }
5062
dd75fdc8 5063 dev_priv->rps.power = HIGH_POWER; /* force a reset */
aed242ff 5064 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
2b4e57bd 5065
31643d54
BW
5066 rc6vids = 0;
5067 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5068 if (IS_GEN6(dev) && ret) {
5069 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5070 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5071 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5072 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5073 rc6vids &= 0xffff00;
5074 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5075 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5076 if (ret)
5077 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5078 }
5079
59bad947 5080 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2b4e57bd
ED
5081}
5082
c2bc2fc5 5083static void __gen6_update_ring_freq(struct drm_device *dev)
2b4e57bd 5084{
79f5b2c7 5085 struct drm_i915_private *dev_priv = dev->dev_private;
2b4e57bd 5086 int min_freq = 15;
3ebecd07
CW
5087 unsigned int gpu_freq;
5088 unsigned int max_ia_freq, min_ring_freq;
4c8c7743 5089 unsigned int max_gpu_freq, min_gpu_freq;
2b4e57bd 5090 int scaling_factor = 180;
eda79642 5091 struct cpufreq_policy *policy;
2b4e57bd 5092
4fc688ce 5093 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79f5b2c7 5094
eda79642
BW
5095 policy = cpufreq_cpu_get(0);
5096 if (policy) {
5097 max_ia_freq = policy->cpuinfo.max_freq;
5098 cpufreq_cpu_put(policy);
5099 } else {
5100 /*
5101 * Default to measured freq if none found, PCU will ensure we
5102 * don't go over
5103 */
2b4e57bd 5104 max_ia_freq = tsc_khz;
eda79642 5105 }
2b4e57bd
ED
5106
5107 /* Convert from kHz to MHz */
5108 max_ia_freq /= 1000;
5109
153b4b95 5110 min_ring_freq = I915_READ(DCLK) & 0xf;
f6aca45c
BW
5111 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5112 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3ebecd07 5113
4c8c7743
AG
5114 if (IS_SKYLAKE(dev)) {
5115 /* Convert GT frequency to 50 HZ units */
5116 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5117 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5118 } else {
5119 min_gpu_freq = dev_priv->rps.min_freq;
5120 max_gpu_freq = dev_priv->rps.max_freq;
5121 }
5122
2b4e57bd
ED
5123 /*
5124 * For each potential GPU frequency, load a ring frequency we'd like
5125 * to use for memory access. We do this by specifying the IA frequency
5126 * the PCU should use as a reference to determine the ring frequency.
5127 */
4c8c7743
AG
5128 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5129 int diff = max_gpu_freq - gpu_freq;
3ebecd07
CW
5130 unsigned int ia_freq = 0, ring_freq = 0;
5131
4c8c7743
AG
5132 if (IS_SKYLAKE(dev)) {
5133 /*
5134 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5135 * No floor required for ring frequency on SKL.
5136 */
5137 ring_freq = gpu_freq;
5138 } else if (INTEL_INFO(dev)->gen >= 8) {
46c764d4
BW
5139 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5140 ring_freq = max(min_ring_freq, gpu_freq);
5141 } else if (IS_HASWELL(dev)) {
f6aca45c 5142 ring_freq = mult_frac(gpu_freq, 5, 4);
3ebecd07
CW
5143 ring_freq = max(min_ring_freq, ring_freq);
5144 /* leave ia_freq as the default, chosen by cpufreq */
5145 } else {
5146 /* On older processors, there is no separate ring
5147 * clock domain, so in order to boost the bandwidth
5148 * of the ring, we need to upclock the CPU (ia_freq).
5149 *
5150 * For GPU frequencies less than 750MHz,
5151 * just use the lowest ring freq.
5152 */
5153 if (gpu_freq < min_freq)
5154 ia_freq = 800;
5155 else
5156 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5157 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5158 }
2b4e57bd 5159
42c0526c
BW
5160 sandybridge_pcode_write(dev_priv,
5161 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3ebecd07
CW
5162 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5163 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5164 gpu_freq);
2b4e57bd 5165 }
2b4e57bd
ED
5166}
5167
c2bc2fc5
ID
5168void gen6_update_ring_freq(struct drm_device *dev)
5169{
5170 struct drm_i915_private *dev_priv = dev->dev_private;
5171
97d3308a 5172 if (!HAS_CORE_RING_FREQ(dev))
c2bc2fc5
ID
5173 return;
5174
5175 mutex_lock(&dev_priv->rps.hw_lock);
5176 __gen6_update_ring_freq(dev);
5177 mutex_unlock(&dev_priv->rps.hw_lock);
5178}
5179
03af2045 5180static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
2b6b3a09 5181{
095acd5f 5182 struct drm_device *dev = dev_priv->dev;
2b6b3a09
D
5183 u32 val, rp0;
5184
095acd5f
D
5185 if (dev->pdev->revision >= 0x20) {
5186 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
2b6b3a09 5187
095acd5f
D
5188 switch (INTEL_INFO(dev)->eu_total) {
5189 case 8:
5190 /* (2 * 4) config */
5191 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5192 break;
5193 case 12:
5194 /* (2 * 6) config */
5195 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5196 break;
5197 case 16:
5198 /* (2 * 8) config */
5199 default:
5200 /* Setting (2 * 8) Min RP0 for any other combination */
5201 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5202 break;
5203 }
5204 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5205 } else {
5206 /* For pre-production hardware */
5207 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
5208 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5209 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
5210 }
2b6b3a09
D
5211 return rp0;
5212}
5213
5214static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5215{
5216 u32 val, rpe;
5217
5218 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5219 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5220
5221 return rpe;
5222}
5223
7707df4a
D
5224static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5225{
095acd5f 5226 struct drm_device *dev = dev_priv->dev;
7707df4a
D
5227 u32 val, rp1;
5228
095acd5f
D
5229 if (dev->pdev->revision >= 0x20) {
5230 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5231 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5232 } else {
5233 /* For pre-production hardware */
5234 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5235 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5236 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
5237 }
7707df4a
D
5238 return rp1;
5239}
5240
f8f2b001
D
5241static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5242{
5243 u32 val, rp1;
5244
5245 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5246
5247 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5248
5249 return rp1;
5250}
5251
03af2045 5252static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
0a073b84
JB
5253{
5254 u32 val, rp0;
5255
64936258 5256 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
0a073b84
JB
5257
5258 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5259 /* Clamp to max */
5260 rp0 = min_t(u32, rp0, 0xea);
5261
5262 return rp0;
5263}
5264
5265static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5266{
5267 u32 val, rpe;
5268
64936258 5269 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
0a073b84 5270 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
64936258 5271 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
0a073b84
JB
5272 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5273
5274 return rpe;
5275}
5276
03af2045 5277static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
0a073b84 5278{
64936258 5279 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
0a073b84
JB
5280}
5281
ae48434c
ID
5282/* Check that the pctx buffer wasn't move under us. */
5283static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5284{
5285 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5286
5287 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5288 dev_priv->vlv_pctx->stolen->start);
5289}
5290
38807746
D
5291
5292/* Check that the pcbr address is not empty. */
5293static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5294{
5295 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5296
5297 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5298}
5299
5300static void cherryview_setup_pctx(struct drm_device *dev)
5301{
5302 struct drm_i915_private *dev_priv = dev->dev_private;
5303 unsigned long pctx_paddr, paddr;
5304 struct i915_gtt *gtt = &dev_priv->gtt;
5305 u32 pcbr;
5306 int pctx_size = 32*1024;
5307
5308 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5309
5310 pcbr = I915_READ(VLV_PCBR);
5311 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
ce611ef8 5312 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
38807746
D
5313 paddr = (dev_priv->mm.stolen_base +
5314 (gtt->stolen_size - pctx_size));
5315
5316 pctx_paddr = (paddr & (~4095));
5317 I915_WRITE(VLV_PCBR, pctx_paddr);
5318 }
ce611ef8
VS
5319
5320 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
38807746
D
5321}
5322
c9cddffc
JB
5323static void valleyview_setup_pctx(struct drm_device *dev)
5324{
5325 struct drm_i915_private *dev_priv = dev->dev_private;
5326 struct drm_i915_gem_object *pctx;
5327 unsigned long pctx_paddr;
5328 u32 pcbr;
5329 int pctx_size = 24*1024;
5330
17b0c1f7
ID
5331 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5332
c9cddffc
JB
5333 pcbr = I915_READ(VLV_PCBR);
5334 if (pcbr) {
5335 /* BIOS set it up already, grab the pre-alloc'd space */
5336 int pcbr_offset;
5337
5338 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5339 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5340 pcbr_offset,
190d6cd5 5341 I915_GTT_OFFSET_NONE,
c9cddffc
JB
5342 pctx_size);
5343 goto out;
5344 }
5345
ce611ef8
VS
5346 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5347
c9cddffc
JB
5348 /*
5349 * From the Gunit register HAS:
5350 * The Gfx driver is expected to program this register and ensure
5351 * proper allocation within Gfx stolen memory. For example, this
5352 * register should be programmed such than the PCBR range does not
5353 * overlap with other ranges, such as the frame buffer, protected
5354 * memory, or any other relevant ranges.
5355 */
5356 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5357 if (!pctx) {
5358 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5359 return;
5360 }
5361
5362 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5363 I915_WRITE(VLV_PCBR, pctx_paddr);
5364
5365out:
ce611ef8 5366 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
c9cddffc
JB
5367 dev_priv->vlv_pctx = pctx;
5368}
5369
ae48434c
ID
5370static void valleyview_cleanup_pctx(struct drm_device *dev)
5371{
5372 struct drm_i915_private *dev_priv = dev->dev_private;
5373
5374 if (WARN_ON(!dev_priv->vlv_pctx))
5375 return;
5376
5377 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
5378 dev_priv->vlv_pctx = NULL;
5379}
5380
4e80519e
ID
5381static void valleyview_init_gt_powersave(struct drm_device *dev)
5382{
5383 struct drm_i915_private *dev_priv = dev->dev_private;
2bb25c17 5384 u32 val;
4e80519e
ID
5385
5386 valleyview_setup_pctx(dev);
5387
5388 mutex_lock(&dev_priv->rps.hw_lock);
5389
2bb25c17
VS
5390 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5391 switch ((val >> 6) & 3) {
5392 case 0:
5393 case 1:
5394 dev_priv->mem_freq = 800;
5395 break;
5396 case 2:
5397 dev_priv->mem_freq = 1066;
5398 break;
5399 case 3:
5400 dev_priv->mem_freq = 1333;
5401 break;
5402 }
80b83b62 5403 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
2bb25c17 5404
4e80519e
ID
5405 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5406 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5407 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7c59a9c1 5408 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4e80519e
ID
5409 dev_priv->rps.max_freq);
5410
5411 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5412 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7c59a9c1 5413 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4e80519e
ID
5414 dev_priv->rps.efficient_freq);
5415
f8f2b001
D
5416 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5417 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7c59a9c1 5418 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
f8f2b001
D
5419 dev_priv->rps.rp1_freq);
5420
4e80519e
ID
5421 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5422 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7c59a9c1 5423 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4e80519e
ID
5424 dev_priv->rps.min_freq);
5425
aed242ff
CW
5426 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5427
4e80519e
ID
5428 /* Preserve min/max settings in case of re-init */
5429 if (dev_priv->rps.max_freq_softlimit == 0)
5430 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5431
5432 if (dev_priv->rps.min_freq_softlimit == 0)
5433 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5434
5435 mutex_unlock(&dev_priv->rps.hw_lock);
5436}
5437
38807746
D
5438static void cherryview_init_gt_powersave(struct drm_device *dev)
5439{
2b6b3a09 5440 struct drm_i915_private *dev_priv = dev->dev_private;
2bb25c17 5441 u32 val;
2b6b3a09 5442
38807746 5443 cherryview_setup_pctx(dev);
2b6b3a09
D
5444
5445 mutex_lock(&dev_priv->rps.hw_lock);
5446
a580516d 5447 mutex_lock(&dev_priv->sb_lock);
c6e8f39d 5448 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
a580516d 5449 mutex_unlock(&dev_priv->sb_lock);
c6e8f39d 5450
2bb25c17
VS
5451 switch ((val >> 2) & 0x7) {
5452 case 0:
5453 case 1:
5454 dev_priv->rps.cz_freq = 200;
5455 dev_priv->mem_freq = 1600;
5456 break;
5457 case 2:
5458 dev_priv->rps.cz_freq = 267;
5459 dev_priv->mem_freq = 1600;
5460 break;
5461 case 3:
5462 dev_priv->rps.cz_freq = 333;
5463 dev_priv->mem_freq = 2000;
5464 break;
5465 case 4:
5466 dev_priv->rps.cz_freq = 320;
5467 dev_priv->mem_freq = 1600;
5468 break;
5469 case 5:
5470 dev_priv->rps.cz_freq = 400;
5471 dev_priv->mem_freq = 1600;
5472 break;
5473 }
80b83b62 5474 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
2bb25c17 5475
2b6b3a09
D
5476 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5477 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5478 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7c59a9c1 5479 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
2b6b3a09
D
5480 dev_priv->rps.max_freq);
5481
5482 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5483 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7c59a9c1 5484 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2b6b3a09
D
5485 dev_priv->rps.efficient_freq);
5486
7707df4a
D
5487 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5488 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7c59a9c1 5489 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
7707df4a
D
5490 dev_priv->rps.rp1_freq);
5491
5b7c91b7
D
5492 /* PUnit validated range is only [RPe, RP0] */
5493 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
2b6b3a09 5494 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7c59a9c1 5495 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2b6b3a09
D
5496 dev_priv->rps.min_freq);
5497
1c14762d
VS
5498 WARN_ONCE((dev_priv->rps.max_freq |
5499 dev_priv->rps.efficient_freq |
5500 dev_priv->rps.rp1_freq |
5501 dev_priv->rps.min_freq) & 1,
5502 "Odd GPU freq values\n");
5503
aed242ff
CW
5504 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5505
2b6b3a09
D
5506 /* Preserve min/max settings in case of re-init */
5507 if (dev_priv->rps.max_freq_softlimit == 0)
5508 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5509
5510 if (dev_priv->rps.min_freq_softlimit == 0)
5511 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5512
5513 mutex_unlock(&dev_priv->rps.hw_lock);
38807746
D
5514}
5515
4e80519e
ID
5516static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5517{
5518 valleyview_cleanup_pctx(dev);
5519}
5520
38807746
D
5521static void cherryview_enable_rps(struct drm_device *dev)
5522{
5523 struct drm_i915_private *dev_priv = dev->dev_private;
5524 struct intel_engine_cs *ring;
2b6b3a09 5525 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
38807746
D
5526 int i;
5527
5528 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5529
5530 gtfifodbg = I915_READ(GTFIFODBG);
5531 if (gtfifodbg) {
5532 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5533 gtfifodbg);
5534 I915_WRITE(GTFIFODBG, gtfifodbg);
5535 }
5536
5537 cherryview_check_pctx(dev_priv);
5538
5539 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5540 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
59bad947 5541 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
38807746 5542
160614a2
VS
5543 /* Disable RC states. */
5544 I915_WRITE(GEN6_RC_CONTROL, 0);
5545
38807746
D
5546 /* 2a: Program RC6 thresholds.*/
5547 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5548 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5549 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5550
5551 for_each_ring(ring, dev_priv, i)
5552 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5553 I915_WRITE(GEN6_RC_SLEEP, 0);
5554
f4f71c7d
D
5555 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5556 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
38807746
D
5557
5558 /* allows RC6 residency counter to work */
5559 I915_WRITE(VLV_COUNTER_CONTROL,
5560 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5561 VLV_MEDIA_RC6_COUNT_EN |
5562 VLV_RENDER_RC6_COUNT_EN));
5563
5564 /* For now we assume BIOS is allocating and populating the PCBR */
5565 pcbr = I915_READ(VLV_PCBR);
5566
38807746
D
5567 /* 3: Enable RC6 */
5568 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5569 (pcbr >> VLV_PCBR_ADDR_SHIFT))
af5a75a3 5570 rc6_mode = GEN7_RC_CTL_TO_MODE;
38807746
D
5571
5572 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5573
2b6b3a09 5574 /* 4 Program defaults and thresholds for RPS*/
3cbdb48f 5575 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2b6b3a09
D
5576 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5577 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5578 I915_WRITE(GEN6_RP_UP_EI, 66000);
5579 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5580
5581 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5582
5583 /* 5: Enable RPS */
5584 I915_WRITE(GEN6_RP_CONTROL,
5585 GEN6_RP_MEDIA_HW_NORMAL_MODE |
eb973a5e 5586 GEN6_RP_MEDIA_IS_GFX |
2b6b3a09
D
5587 GEN6_RP_ENABLE |
5588 GEN6_RP_UP_BUSY_AVG |
5589 GEN6_RP_DOWN_IDLE_AVG);
5590
3ef62342
D
5591 /* Setting Fixed Bias */
5592 val = VLV_OVERRIDE_EN |
5593 VLV_SOC_TDP_EN |
5594 CHV_BIAS_CPU_50_SOC_50;
5595 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5596
2b6b3a09
D
5597 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5598
8d40c3ae
VS
5599 /* RPS code assumes GPLL is used */
5600 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5601
742f491d 5602 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
2b6b3a09
D
5603 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5604
5605 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5606 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
7c59a9c1 5607 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
2b6b3a09
D
5608 dev_priv->rps.cur_freq);
5609
5610 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
7c59a9c1 5611 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2b6b3a09
D
5612 dev_priv->rps.efficient_freq);
5613
5614 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5615
59bad947 5616 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
38807746
D
5617}
5618
0a073b84
JB
5619static void valleyview_enable_rps(struct drm_device *dev)
5620{
5621 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 5622 struct intel_engine_cs *ring;
2a5913a8 5623 u32 gtfifodbg, val, rc6_mode = 0;
0a073b84
JB
5624 int i;
5625
5626 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5627
ae48434c
ID
5628 valleyview_check_pctx(dev_priv);
5629
0a073b84 5630 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
f7d85c1e
JB
5631 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5632 gtfifodbg);
0a073b84
JB
5633 I915_WRITE(GTFIFODBG, gtfifodbg);
5634 }
5635
c8d9a590 5636 /* If VLV, Forcewake all wells, else re-direct to regular path */
59bad947 5637 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
0a073b84 5638
160614a2
VS
5639 /* Disable RC states. */
5640 I915_WRITE(GEN6_RC_CONTROL, 0);
5641
cad725fe 5642 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
0a073b84
JB
5643 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5644 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5645 I915_WRITE(GEN6_RP_UP_EI, 66000);
5646 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5647
5648 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5649
5650 I915_WRITE(GEN6_RP_CONTROL,
5651 GEN6_RP_MEDIA_TURBO |
5652 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5653 GEN6_RP_MEDIA_IS_GFX |
5654 GEN6_RP_ENABLE |
5655 GEN6_RP_UP_BUSY_AVG |
5656 GEN6_RP_DOWN_IDLE_CONT);
5657
5658 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5659 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5660 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5661
5662 for_each_ring(ring, dev_priv, i)
5663 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5664
2f0aa304 5665 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
0a073b84
JB
5666
5667 /* allows RC6 residency counter to work */
49798eb2 5668 I915_WRITE(VLV_COUNTER_CONTROL,
31685c25
D
5669 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5670 VLV_RENDER_RC0_COUNT_EN |
49798eb2
JB
5671 VLV_MEDIA_RC6_COUNT_EN |
5672 VLV_RENDER_RC6_COUNT_EN));
31685c25 5673
a2b23fe0 5674 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
6b88f295 5675 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
dc39fff7
BW
5676
5677 intel_print_rc6_info(dev, rc6_mode);
5678
a2b23fe0 5679 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
0a073b84 5680
3ef62342
D
5681 /* Setting Fixed Bias */
5682 val = VLV_OVERRIDE_EN |
5683 VLV_SOC_TDP_EN |
5684 VLV_BIAS_CPU_125_SOC_875;
5685 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5686
64936258 5687 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
0a073b84 5688
8d40c3ae
VS
5689 /* RPS code assumes GPLL is used */
5690 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5691
742f491d 5692 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
0a073b84
JB
5693 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5694
b39fb297 5695 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
73008b98 5696 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
7c59a9c1 5697 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
b39fb297 5698 dev_priv->rps.cur_freq);
0a073b84 5699
73008b98 5700 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
7c59a9c1 5701 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
b39fb297 5702 dev_priv->rps.efficient_freq);
0a073b84 5703
b39fb297 5704 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
0a073b84 5705
59bad947 5706 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
0a073b84
JB
5707}
5708
dde18883
ED
5709static unsigned long intel_pxfreq(u32 vidfreq)
5710{
5711 unsigned long freq;
5712 int div = (vidfreq & 0x3f0000) >> 16;
5713 int post = (vidfreq & 0x3000) >> 12;
5714 int pre = (vidfreq & 0x7);
5715
5716 if (!pre)
5717 return 0;
5718
5719 freq = ((div * 133333) / ((1<<post) * pre));
5720
5721 return freq;
5722}
5723
eb48eb00
DV
5724static const struct cparams {
5725 u16 i;
5726 u16 t;
5727 u16 m;
5728 u16 c;
5729} cparams[] = {
5730 { 1, 1333, 301, 28664 },
5731 { 1, 1066, 294, 24460 },
5732 { 1, 800, 294, 25192 },
5733 { 0, 1333, 276, 27605 },
5734 { 0, 1066, 276, 27605 },
5735 { 0, 800, 231, 23784 },
5736};
5737
f531dcb2 5738static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
5739{
5740 u64 total_count, diff, ret;
5741 u32 count1, count2, count3, m = 0, c = 0;
5742 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5743 int i;
5744
02d71956
DV
5745 assert_spin_locked(&mchdev_lock);
5746
20e4d407 5747 diff1 = now - dev_priv->ips.last_time1;
eb48eb00
DV
5748
5749 /* Prevent division-by-zero if we are asking too fast.
5750 * Also, we don't get interesting results if we are polling
5751 * faster than once in 10ms, so just return the saved value
5752 * in such cases.
5753 */
5754 if (diff1 <= 10)
20e4d407 5755 return dev_priv->ips.chipset_power;
eb48eb00
DV
5756
5757 count1 = I915_READ(DMIEC);
5758 count2 = I915_READ(DDREC);
5759 count3 = I915_READ(CSIEC);
5760
5761 total_count = count1 + count2 + count3;
5762
5763 /* FIXME: handle per-counter overflow */
20e4d407
DV
5764 if (total_count < dev_priv->ips.last_count1) {
5765 diff = ~0UL - dev_priv->ips.last_count1;
eb48eb00
DV
5766 diff += total_count;
5767 } else {
20e4d407 5768 diff = total_count - dev_priv->ips.last_count1;
eb48eb00
DV
5769 }
5770
5771 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
20e4d407
DV
5772 if (cparams[i].i == dev_priv->ips.c_m &&
5773 cparams[i].t == dev_priv->ips.r_t) {
eb48eb00
DV
5774 m = cparams[i].m;
5775 c = cparams[i].c;
5776 break;
5777 }
5778 }
5779
5780 diff = div_u64(diff, diff1);
5781 ret = ((m * diff) + c);
5782 ret = div_u64(ret, 10);
5783
20e4d407
DV
5784 dev_priv->ips.last_count1 = total_count;
5785 dev_priv->ips.last_time1 = now;
eb48eb00 5786
20e4d407 5787 dev_priv->ips.chipset_power = ret;
eb48eb00
DV
5788
5789 return ret;
5790}
5791
f531dcb2
CW
5792unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5793{
3d13ef2e 5794 struct drm_device *dev = dev_priv->dev;
f531dcb2
CW
5795 unsigned long val;
5796
3d13ef2e 5797 if (INTEL_INFO(dev)->gen != 5)
f531dcb2
CW
5798 return 0;
5799
5800 spin_lock_irq(&mchdev_lock);
5801
5802 val = __i915_chipset_val(dev_priv);
5803
5804 spin_unlock_irq(&mchdev_lock);
5805
5806 return val;
5807}
5808
eb48eb00
DV
5809unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5810{
5811 unsigned long m, x, b;
5812 u32 tsfs;
5813
5814 tsfs = I915_READ(TSFS);
5815
5816 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5817 x = I915_READ8(TR1);
5818
5819 b = tsfs & TSFS_INTR_MASK;
5820
5821 return ((m * x) / 127) - b;
5822}
5823
d972d6ee
MK
5824static int _pxvid_to_vd(u8 pxvid)
5825{
5826 if (pxvid == 0)
5827 return 0;
5828
5829 if (pxvid >= 8 && pxvid < 31)
5830 pxvid = 31;
5831
5832 return (pxvid + 2) * 125;
5833}
5834
5835static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
eb48eb00 5836{
3d13ef2e 5837 struct drm_device *dev = dev_priv->dev;
d972d6ee
MK
5838 const int vd = _pxvid_to_vd(pxvid);
5839 const int vm = vd - 1125;
5840
3d13ef2e 5841 if (INTEL_INFO(dev)->is_mobile)
d972d6ee
MK
5842 return vm > 0 ? vm : 0;
5843
5844 return vd;
eb48eb00
DV
5845}
5846
02d71956 5847static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
eb48eb00 5848{
5ed0bdf2 5849 u64 now, diff, diffms;
eb48eb00
DV
5850 u32 count;
5851
02d71956 5852 assert_spin_locked(&mchdev_lock);
eb48eb00 5853
5ed0bdf2
TG
5854 now = ktime_get_raw_ns();
5855 diffms = now - dev_priv->ips.last_time2;
5856 do_div(diffms, NSEC_PER_MSEC);
eb48eb00
DV
5857
5858 /* Don't divide by 0 */
eb48eb00
DV
5859 if (!diffms)
5860 return;
5861
5862 count = I915_READ(GFXEC);
5863
20e4d407
DV
5864 if (count < dev_priv->ips.last_count2) {
5865 diff = ~0UL - dev_priv->ips.last_count2;
eb48eb00
DV
5866 diff += count;
5867 } else {
20e4d407 5868 diff = count - dev_priv->ips.last_count2;
eb48eb00
DV
5869 }
5870
20e4d407
DV
5871 dev_priv->ips.last_count2 = count;
5872 dev_priv->ips.last_time2 = now;
eb48eb00
DV
5873
5874 /* More magic constants... */
5875 diff = diff * 1181;
5876 diff = div_u64(diff, diffms * 10);
20e4d407 5877 dev_priv->ips.gfx_power = diff;
eb48eb00
DV
5878}
5879
02d71956
DV
5880void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5881{
3d13ef2e
DL
5882 struct drm_device *dev = dev_priv->dev;
5883
5884 if (INTEL_INFO(dev)->gen != 5)
02d71956
DV
5885 return;
5886
9270388e 5887 spin_lock_irq(&mchdev_lock);
02d71956
DV
5888
5889 __i915_update_gfx_val(dev_priv);
5890
9270388e 5891 spin_unlock_irq(&mchdev_lock);
02d71956
DV
5892}
5893
f531dcb2 5894static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
5895{
5896 unsigned long t, corr, state1, corr2, state2;
5897 u32 pxvid, ext_v;
5898
02d71956
DV
5899 assert_spin_locked(&mchdev_lock);
5900
b39fb297 5901 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
eb48eb00
DV
5902 pxvid = (pxvid >> 24) & 0x7f;
5903 ext_v = pvid_to_extvid(dev_priv, pxvid);
5904
5905 state1 = ext_v;
5906
5907 t = i915_mch_val(dev_priv);
5908
5909 /* Revel in the empirically derived constants */
5910
5911 /* Correction factor in 1/100000 units */
5912 if (t > 80)
5913 corr = ((t * 2349) + 135940);
5914 else if (t >= 50)
5915 corr = ((t * 964) + 29317);
5916 else /* < 50 */
5917 corr = ((t * 301) + 1004);
5918
5919 corr = corr * ((150142 * state1) / 10000 - 78642);
5920 corr /= 100000;
20e4d407 5921 corr2 = (corr * dev_priv->ips.corr);
eb48eb00
DV
5922
5923 state2 = (corr2 * state1) / 10000;
5924 state2 /= 100; /* convert to mW */
5925
02d71956 5926 __i915_update_gfx_val(dev_priv);
eb48eb00 5927
20e4d407 5928 return dev_priv->ips.gfx_power + state2;
eb48eb00
DV
5929}
5930
f531dcb2
CW
5931unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5932{
3d13ef2e 5933 struct drm_device *dev = dev_priv->dev;
f531dcb2
CW
5934 unsigned long val;
5935
3d13ef2e 5936 if (INTEL_INFO(dev)->gen != 5)
f531dcb2
CW
5937 return 0;
5938
5939 spin_lock_irq(&mchdev_lock);
5940
5941 val = __i915_gfx_val(dev_priv);
5942
5943 spin_unlock_irq(&mchdev_lock);
5944
5945 return val;
5946}
5947
eb48eb00
DV
5948/**
5949 * i915_read_mch_val - return value for IPS use
5950 *
5951 * Calculate and return a value for the IPS driver to use when deciding whether
5952 * we have thermal and power headroom to increase CPU or GPU power budget.
5953 */
5954unsigned long i915_read_mch_val(void)
5955{
5956 struct drm_i915_private *dev_priv;
5957 unsigned long chipset_val, graphics_val, ret = 0;
5958
9270388e 5959 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
5960 if (!i915_mch_dev)
5961 goto out_unlock;
5962 dev_priv = i915_mch_dev;
5963
f531dcb2
CW
5964 chipset_val = __i915_chipset_val(dev_priv);
5965 graphics_val = __i915_gfx_val(dev_priv);
eb48eb00
DV
5966
5967 ret = chipset_val + graphics_val;
5968
5969out_unlock:
9270388e 5970 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
5971
5972 return ret;
5973}
5974EXPORT_SYMBOL_GPL(i915_read_mch_val);
5975
5976/**
5977 * i915_gpu_raise - raise GPU frequency limit
5978 *
5979 * Raise the limit; IPS indicates we have thermal headroom.
5980 */
5981bool i915_gpu_raise(void)
5982{
5983 struct drm_i915_private *dev_priv;
5984 bool ret = true;
5985
9270388e 5986 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
5987 if (!i915_mch_dev) {
5988 ret = false;
5989 goto out_unlock;
5990 }
5991 dev_priv = i915_mch_dev;
5992
20e4d407
DV
5993 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5994 dev_priv->ips.max_delay--;
eb48eb00
DV
5995
5996out_unlock:
9270388e 5997 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
5998
5999 return ret;
6000}
6001EXPORT_SYMBOL_GPL(i915_gpu_raise);
6002
6003/**
6004 * i915_gpu_lower - lower GPU frequency limit
6005 *
6006 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6007 * frequency maximum.
6008 */
6009bool i915_gpu_lower(void)
6010{
6011 struct drm_i915_private *dev_priv;
6012 bool ret = true;
6013
9270388e 6014 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
6015 if (!i915_mch_dev) {
6016 ret = false;
6017 goto out_unlock;
6018 }
6019 dev_priv = i915_mch_dev;
6020
20e4d407
DV
6021 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6022 dev_priv->ips.max_delay++;
eb48eb00
DV
6023
6024out_unlock:
9270388e 6025 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6026
6027 return ret;
6028}
6029EXPORT_SYMBOL_GPL(i915_gpu_lower);
6030
6031/**
6032 * i915_gpu_busy - indicate GPU business to IPS
6033 *
6034 * Tell the IPS driver whether or not the GPU is busy.
6035 */
6036bool i915_gpu_busy(void)
6037{
6038 struct drm_i915_private *dev_priv;
a4872ba6 6039 struct intel_engine_cs *ring;
eb48eb00 6040 bool ret = false;
f047e395 6041 int i;
eb48eb00 6042
9270388e 6043 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
6044 if (!i915_mch_dev)
6045 goto out_unlock;
6046 dev_priv = i915_mch_dev;
6047
f047e395
CW
6048 for_each_ring(ring, dev_priv, i)
6049 ret |= !list_empty(&ring->request_list);
eb48eb00
DV
6050
6051out_unlock:
9270388e 6052 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6053
6054 return ret;
6055}
6056EXPORT_SYMBOL_GPL(i915_gpu_busy);
6057
6058/**
6059 * i915_gpu_turbo_disable - disable graphics turbo
6060 *
6061 * Disable graphics turbo by resetting the max frequency and setting the
6062 * current frequency to the default.
6063 */
6064bool i915_gpu_turbo_disable(void)
6065{
6066 struct drm_i915_private *dev_priv;
6067 bool ret = true;
6068
9270388e 6069 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
6070 if (!i915_mch_dev) {
6071 ret = false;
6072 goto out_unlock;
6073 }
6074 dev_priv = i915_mch_dev;
6075
20e4d407 6076 dev_priv->ips.max_delay = dev_priv->ips.fstart;
eb48eb00 6077
20e4d407 6078 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
eb48eb00
DV
6079 ret = false;
6080
6081out_unlock:
9270388e 6082 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6083
6084 return ret;
6085}
6086EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6087
6088/**
6089 * Tells the intel_ips driver that the i915 driver is now loaded, if
6090 * IPS got loaded first.
6091 *
6092 * This awkward dance is so that neither module has to depend on the
6093 * other in order for IPS to do the appropriate communication of
6094 * GPU turbo limits to i915.
6095 */
6096static void
6097ips_ping_for_i915_load(void)
6098{
6099 void (*link)(void);
6100
6101 link = symbol_get(ips_link_to_i915_driver);
6102 if (link) {
6103 link();
6104 symbol_put(ips_link_to_i915_driver);
6105 }
6106}
6107
6108void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6109{
02d71956
DV
6110 /* We only register the i915 ips part with intel-ips once everything is
6111 * set up, to avoid intel-ips sneaking in and reading bogus values. */
9270388e 6112 spin_lock_irq(&mchdev_lock);
eb48eb00 6113 i915_mch_dev = dev_priv;
9270388e 6114 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
6115
6116 ips_ping_for_i915_load();
6117}
6118
6119void intel_gpu_ips_teardown(void)
6120{
9270388e 6121 spin_lock_irq(&mchdev_lock);
eb48eb00 6122 i915_mch_dev = NULL;
9270388e 6123 spin_unlock_irq(&mchdev_lock);
eb48eb00 6124}
76c3552f 6125
8090c6b9 6126static void intel_init_emon(struct drm_device *dev)
dde18883
ED
6127{
6128 struct drm_i915_private *dev_priv = dev->dev_private;
6129 u32 lcfuse;
6130 u8 pxw[16];
6131 int i;
6132
6133 /* Disable to program */
6134 I915_WRITE(ECR, 0);
6135 POSTING_READ(ECR);
6136
6137 /* Program energy weights for various events */
6138 I915_WRITE(SDEW, 0x15040d00);
6139 I915_WRITE(CSIEW0, 0x007f0000);
6140 I915_WRITE(CSIEW1, 0x1e220004);
6141 I915_WRITE(CSIEW2, 0x04000004);
6142
6143 for (i = 0; i < 5; i++)
6144 I915_WRITE(PEW + (i * 4), 0);
6145 for (i = 0; i < 3; i++)
6146 I915_WRITE(DEW + (i * 4), 0);
6147
6148 /* Program P-state weights to account for frequency power adjustment */
6149 for (i = 0; i < 16; i++) {
6150 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
6151 unsigned long freq = intel_pxfreq(pxvidfreq);
6152 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6153 PXVFREQ_PX_SHIFT;
6154 unsigned long val;
6155
6156 val = vid * vid;
6157 val *= (freq / 1000);
6158 val *= 255;
6159 val /= (127*127*900);
6160 if (val > 0xff)
6161 DRM_ERROR("bad pxval: %ld\n", val);
6162 pxw[i] = val;
6163 }
6164 /* Render standby states get 0 weight */
6165 pxw[14] = 0;
6166 pxw[15] = 0;
6167
6168 for (i = 0; i < 4; i++) {
6169 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6170 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6171 I915_WRITE(PXW + (i * 4), val);
6172 }
6173
6174 /* Adjust magic regs to magic values (more experimental results) */
6175 I915_WRITE(OGW0, 0);
6176 I915_WRITE(OGW1, 0);
6177 I915_WRITE(EG0, 0x00007f00);
6178 I915_WRITE(EG1, 0x0000000e);
6179 I915_WRITE(EG2, 0x000e0000);
6180 I915_WRITE(EG3, 0x68000300);
6181 I915_WRITE(EG4, 0x42000000);
6182 I915_WRITE(EG5, 0x00140031);
6183 I915_WRITE(EG6, 0);
6184 I915_WRITE(EG7, 0);
6185
6186 for (i = 0; i < 8; i++)
6187 I915_WRITE(PXWL + (i * 4), 0);
6188
6189 /* Enable PMON + select events */
6190 I915_WRITE(ECR, 0x80000019);
6191
6192 lcfuse = I915_READ(LCFUSE02);
6193
20e4d407 6194 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
dde18883
ED
6195}
6196
ae48434c
ID
6197void intel_init_gt_powersave(struct drm_device *dev)
6198{
e6069ca8
ID
6199 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
6200
38807746
D
6201 if (IS_CHERRYVIEW(dev))
6202 cherryview_init_gt_powersave(dev);
6203 else if (IS_VALLEYVIEW(dev))
4e80519e 6204 valleyview_init_gt_powersave(dev);
ae48434c
ID
6205}
6206
6207void intel_cleanup_gt_powersave(struct drm_device *dev)
6208{
38807746
D
6209 if (IS_CHERRYVIEW(dev))
6210 return;
6211 else if (IS_VALLEYVIEW(dev))
4e80519e 6212 valleyview_cleanup_gt_powersave(dev);
ae48434c
ID
6213}
6214
dbea3cea
ID
6215static void gen6_suspend_rps(struct drm_device *dev)
6216{
6217 struct drm_i915_private *dev_priv = dev->dev_private;
6218
6219 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6220
4c2a8897 6221 gen6_disable_rps_interrupts(dev);
dbea3cea
ID
6222}
6223
156c7ca0
JB
6224/**
6225 * intel_suspend_gt_powersave - suspend PM work and helper threads
6226 * @dev: drm device
6227 *
6228 * We don't want to disable RC6 or other features here, we just want
6229 * to make sure any work we've queued has finished and won't bother
6230 * us while we're suspended.
6231 */
6232void intel_suspend_gt_powersave(struct drm_device *dev)
6233{
6234 struct drm_i915_private *dev_priv = dev->dev_private;
6235
d4d70aa5
ID
6236 if (INTEL_INFO(dev)->gen < 6)
6237 return;
6238
dbea3cea 6239 gen6_suspend_rps(dev);
b47adc17
D
6240
6241 /* Force GPU to min freq during suspend */
6242 gen6_rps_idle(dev_priv);
156c7ca0
JB
6243}
6244
8090c6b9
DV
6245void intel_disable_gt_powersave(struct drm_device *dev)
6246{
1a01ab3b
JB
6247 struct drm_i915_private *dev_priv = dev->dev_private;
6248
930ebb46 6249 if (IS_IRONLAKE_M(dev)) {
8090c6b9 6250 ironlake_disable_drps(dev);
38807746 6251 } else if (INTEL_INFO(dev)->gen >= 6) {
10d8d366 6252 intel_suspend_gt_powersave(dev);
e494837a 6253
4fc688ce 6254 mutex_lock(&dev_priv->rps.hw_lock);
20e49366
ZW
6255 if (INTEL_INFO(dev)->gen >= 9)
6256 gen9_disable_rps(dev);
6257 else if (IS_CHERRYVIEW(dev))
38807746
D
6258 cherryview_disable_rps(dev);
6259 else if (IS_VALLEYVIEW(dev))
d20d4f0c
JB
6260 valleyview_disable_rps(dev);
6261 else
6262 gen6_disable_rps(dev);
e534770a 6263
c0951f0c 6264 dev_priv->rps.enabled = false;
4fc688ce 6265 mutex_unlock(&dev_priv->rps.hw_lock);
930ebb46 6266 }
8090c6b9
DV
6267}
6268
1a01ab3b
JB
6269static void intel_gen6_powersave_work(struct work_struct *work)
6270{
6271 struct drm_i915_private *dev_priv =
6272 container_of(work, struct drm_i915_private,
6273 rps.delayed_resume_work.work);
6274 struct drm_device *dev = dev_priv->dev;
6275
4fc688ce 6276 mutex_lock(&dev_priv->rps.hw_lock);
0a073b84 6277
4c2a8897 6278 gen6_reset_rps_interrupts(dev);
3cc134e3 6279
38807746
D
6280 if (IS_CHERRYVIEW(dev)) {
6281 cherryview_enable_rps(dev);
6282 } else if (IS_VALLEYVIEW(dev)) {
0a073b84 6283 valleyview_enable_rps(dev);
20e49366 6284 } else if (INTEL_INFO(dev)->gen >= 9) {
b6fef0ef 6285 gen9_enable_rc6(dev);
20e49366 6286 gen9_enable_rps(dev);
cc017fb4
AG
6287 if (IS_SKYLAKE(dev))
6288 __gen6_update_ring_freq(dev);
6edee7f3
BW
6289 } else if (IS_BROADWELL(dev)) {
6290 gen8_enable_rps(dev);
c2bc2fc5 6291 __gen6_update_ring_freq(dev);
0a073b84
JB
6292 } else {
6293 gen6_enable_rps(dev);
c2bc2fc5 6294 __gen6_update_ring_freq(dev);
0a073b84 6295 }
aed242ff
CW
6296
6297 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6298 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6299
6300 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6301 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6302
c0951f0c 6303 dev_priv->rps.enabled = true;
3cc134e3 6304
4c2a8897 6305 gen6_enable_rps_interrupts(dev);
3cc134e3 6306
4fc688ce 6307 mutex_unlock(&dev_priv->rps.hw_lock);
c6df39b5
ID
6308
6309 intel_runtime_pm_put(dev_priv);
1a01ab3b
JB
6310}
6311
8090c6b9
DV
6312void intel_enable_gt_powersave(struct drm_device *dev)
6313{
1a01ab3b
JB
6314 struct drm_i915_private *dev_priv = dev->dev_private;
6315
f61018b1
YZ
6316 /* Powersaving is controlled by the host when inside a VM */
6317 if (intel_vgpu_active(dev))
6318 return;
6319
8090c6b9 6320 if (IS_IRONLAKE_M(dev)) {
dc1d0136 6321 mutex_lock(&dev->struct_mutex);
8090c6b9 6322 ironlake_enable_drps(dev);
8090c6b9 6323 intel_init_emon(dev);
dc1d0136 6324 mutex_unlock(&dev->struct_mutex);
38807746 6325 } else if (INTEL_INFO(dev)->gen >= 6) {
1a01ab3b
JB
6326 /*
6327 * PCU communication is slow and this doesn't need to be
6328 * done at any specific time, so do this out of our fast path
6329 * to make resume and init faster.
c6df39b5
ID
6330 *
6331 * We depend on the HW RC6 power context save/restore
6332 * mechanism when entering D3 through runtime PM suspend. So
6333 * disable RPM until RPS/RC6 is properly setup. We can only
6334 * get here via the driver load/system resume/runtime resume
6335 * paths, so the _noresume version is enough (and in case of
6336 * runtime resume it's necessary).
1a01ab3b 6337 */
c6df39b5
ID
6338 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6339 round_jiffies_up_relative(HZ)))
6340 intel_runtime_pm_get_noresume(dev_priv);
8090c6b9
DV
6341 }
6342}
6343
c6df39b5
ID
6344void intel_reset_gt_powersave(struct drm_device *dev)
6345{
6346 struct drm_i915_private *dev_priv = dev->dev_private;
6347
dbea3cea
ID
6348 if (INTEL_INFO(dev)->gen < 6)
6349 return;
6350
6351 gen6_suspend_rps(dev);
c6df39b5 6352 dev_priv->rps.enabled = false;
c6df39b5
ID
6353}
6354
3107bd48
DV
6355static void ibx_init_clock_gating(struct drm_device *dev)
6356{
6357 struct drm_i915_private *dev_priv = dev->dev_private;
6358
6359 /*
6360 * On Ibex Peak and Cougar Point, we need to disable clock
6361 * gating for the panel power sequencer or it will fail to
6362 * start up when no ports are active.
6363 */
6364 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6365}
6366
0e088b8f
VS
6367static void g4x_disable_trickle_feed(struct drm_device *dev)
6368{
6369 struct drm_i915_private *dev_priv = dev->dev_private;
b12ce1d8 6370 enum pipe pipe;
0e088b8f 6371
055e393f 6372 for_each_pipe(dev_priv, pipe) {
0e088b8f
VS
6373 I915_WRITE(DSPCNTR(pipe),
6374 I915_READ(DSPCNTR(pipe)) |
6375 DISPPLANE_TRICKLE_FEED_DISABLE);
b12ce1d8
VS
6376
6377 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6378 POSTING_READ(DSPSURF(pipe));
0e088b8f
VS
6379 }
6380}
6381
017636cc
VS
6382static void ilk_init_lp_watermarks(struct drm_device *dev)
6383{
6384 struct drm_i915_private *dev_priv = dev->dev_private;
6385
6386 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6387 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6388 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6389
6390 /*
6391 * Don't touch WM1S_LP_EN here.
6392 * Doing so could cause underruns.
6393 */
6394}
6395
1fa61106 6396static void ironlake_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6397{
6398 struct drm_i915_private *dev_priv = dev->dev_private;
231e54f6 6399 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6f1d69b0 6400
f1e8fa56
DL
6401 /*
6402 * Required for FBC
6403 * WaFbcDisableDpfcClockGating:ilk
6404 */
4d47e4f5
DL
6405 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6406 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6407 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6f1d69b0
ED
6408
6409 I915_WRITE(PCH_3DCGDIS0,
6410 MARIUNIT_CLOCK_GATE_DISABLE |
6411 SVSMUNIT_CLOCK_GATE_DISABLE);
6412 I915_WRITE(PCH_3DCGDIS1,
6413 VFMUNIT_CLOCK_GATE_DISABLE);
6414
6f1d69b0
ED
6415 /*
6416 * According to the spec the following bits should be set in
6417 * order to enable memory self-refresh
6418 * The bit 22/21 of 0x42004
6419 * The bit 5 of 0x42020
6420 * The bit 15 of 0x45000
6421 */
6422 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6423 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6424 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4d47e4f5 6425 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6f1d69b0
ED
6426 I915_WRITE(DISP_ARB_CTL,
6427 (I915_READ(DISP_ARB_CTL) |
6428 DISP_FBC_WM_DIS));
017636cc
VS
6429
6430 ilk_init_lp_watermarks(dev);
6f1d69b0
ED
6431
6432 /*
6433 * Based on the document from hardware guys the following bits
6434 * should be set unconditionally in order to enable FBC.
6435 * The bit 22 of 0x42000
6436 * The bit 22 of 0x42004
6437 * The bit 7,8,9 of 0x42020.
6438 */
6439 if (IS_IRONLAKE_M(dev)) {
4bb35334 6440 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6f1d69b0
ED
6441 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6442 I915_READ(ILK_DISPLAY_CHICKEN1) |
6443 ILK_FBCQ_DIS);
6444 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6445 I915_READ(ILK_DISPLAY_CHICKEN2) |
6446 ILK_DPARB_GATE);
6f1d69b0
ED
6447 }
6448
4d47e4f5
DL
6449 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6450
6f1d69b0
ED
6451 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6452 I915_READ(ILK_DISPLAY_CHICKEN2) |
6453 ILK_ELPIN_409_SELECT);
6454 I915_WRITE(_3D_CHICKEN2,
6455 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6456 _3D_CHICKEN2_WM_READ_PIPELINED);
4358a374 6457
ecdb4eb7 6458 /* WaDisableRenderCachePipelinedFlush:ilk */
4358a374
DV
6459 I915_WRITE(CACHE_MODE_0,
6460 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3107bd48 6461
4e04632e
AG
6462 /* WaDisable_RenderCache_OperationalFlush:ilk */
6463 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6464
0e088b8f 6465 g4x_disable_trickle_feed(dev);
bdad2b2f 6466
3107bd48
DV
6467 ibx_init_clock_gating(dev);
6468}
6469
6470static void cpt_init_clock_gating(struct drm_device *dev)
6471{
6472 struct drm_i915_private *dev_priv = dev->dev_private;
6473 int pipe;
3f704fa2 6474 uint32_t val;
3107bd48
DV
6475
6476 /*
6477 * On Ibex Peak and Cougar Point, we need to disable clock
6478 * gating for the panel power sequencer or it will fail to
6479 * start up when no ports are active.
6480 */
cd664078
JB
6481 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6482 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6483 PCH_CPUNIT_CLOCK_GATE_DISABLE);
3107bd48
DV
6484 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6485 DPLS_EDP_PPS_FIX_DIS);
335c07b7
TI
6486 /* The below fixes the weird display corruption, a few pixels shifted
6487 * downward, on (only) LVDS of some HP laptops with IVY.
6488 */
055e393f 6489 for_each_pipe(dev_priv, pipe) {
dc4bd2d1
PZ
6490 val = I915_READ(TRANS_CHICKEN2(pipe));
6491 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6492 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
41aa3448 6493 if (dev_priv->vbt.fdi_rx_polarity_inverted)
3f704fa2 6494 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
dc4bd2d1
PZ
6495 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6496 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6497 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
3f704fa2
PZ
6498 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6499 }
3107bd48 6500 /* WADP0ClockGatingDisable */
055e393f 6501 for_each_pipe(dev_priv, pipe) {
3107bd48
DV
6502 I915_WRITE(TRANS_CHICKEN1(pipe),
6503 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6504 }
6f1d69b0
ED
6505}
6506
1d7aaa0c
DV
6507static void gen6_check_mch_setup(struct drm_device *dev)
6508{
6509 struct drm_i915_private *dev_priv = dev->dev_private;
6510 uint32_t tmp;
6511
6512 tmp = I915_READ(MCH_SSKPD);
df662a28
DV
6513 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6514 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6515 tmp);
1d7aaa0c
DV
6516}
6517
1fa61106 6518static void gen6_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6519{
6520 struct drm_i915_private *dev_priv = dev->dev_private;
231e54f6 6521 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6f1d69b0 6522
231e54f6 6523 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6f1d69b0
ED
6524
6525 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6526 I915_READ(ILK_DISPLAY_CHICKEN2) |
6527 ILK_ELPIN_409_SELECT);
6528
ecdb4eb7 6529 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4283908e
DV
6530 I915_WRITE(_3D_CHICKEN,
6531 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6532
4e04632e
AG
6533 /* WaDisable_RenderCache_OperationalFlush:snb */
6534 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6535
8d85d272
VS
6536 /*
6537 * BSpec recoomends 8x4 when MSAA is used,
6538 * however in practice 16x4 seems fastest.
c5c98a58
VS
6539 *
6540 * Note that PS/WM thread counts depend on the WIZ hashing
6541 * disable bit, which we don't touch here, but it's good
6542 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8d85d272
VS
6543 */
6544 I915_WRITE(GEN6_GT_MODE,
98533251 6545 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8d85d272 6546
017636cc 6547 ilk_init_lp_watermarks(dev);
6f1d69b0 6548
6f1d69b0 6549 I915_WRITE(CACHE_MODE_0,
50743298 6550 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6f1d69b0
ED
6551
6552 I915_WRITE(GEN6_UCGCTL1,
6553 I915_READ(GEN6_UCGCTL1) |
6554 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6555 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6556
6557 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6558 * gating disable must be set. Failure to set it results in
6559 * flickering pixels due to Z write ordering failures after
6560 * some amount of runtime in the Mesa "fire" demo, and Unigine
6561 * Sanctuary and Tropics, and apparently anything else with
6562 * alpha test or pixel discard.
6563 *
6564 * According to the spec, bit 11 (RCCUNIT) must also be set,
6565 * but we didn't debug actual testcases to find it out.
0f846f81 6566 *
ef59318c
VS
6567 * WaDisableRCCUnitClockGating:snb
6568 * WaDisableRCPBUnitClockGating:snb
6f1d69b0
ED
6569 */
6570 I915_WRITE(GEN6_UCGCTL2,
6571 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6572 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6573
5eb146dd 6574 /* WaStripsFansDisableFastClipPerformanceFix:snb */
743b57d8
VS
6575 I915_WRITE(_3D_CHICKEN3,
6576 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6f1d69b0 6577
e927ecde
VS
6578 /*
6579 * Bspec says:
6580 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6581 * 3DSTATE_SF number of SF output attributes is more than 16."
6582 */
6583 I915_WRITE(_3D_CHICKEN3,
6584 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6585
6f1d69b0
ED
6586 /*
6587 * According to the spec the following bits should be
6588 * set in order to enable memory self-refresh and fbc:
6589 * The bit21 and bit22 of 0x42000
6590 * The bit21 and bit22 of 0x42004
6591 * The bit5 and bit7 of 0x42020
6592 * The bit14 of 0x70180
6593 * The bit14 of 0x71180
4bb35334
DL
6594 *
6595 * WaFbcAsynchFlipDisableFbcQueue:snb
6f1d69b0
ED
6596 */
6597 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6598 I915_READ(ILK_DISPLAY_CHICKEN1) |
6599 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6600 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6601 I915_READ(ILK_DISPLAY_CHICKEN2) |
6602 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
231e54f6
DL
6603 I915_WRITE(ILK_DSPCLK_GATE_D,
6604 I915_READ(ILK_DSPCLK_GATE_D) |
6605 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6606 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6f1d69b0 6607
0e088b8f 6608 g4x_disable_trickle_feed(dev);
f8f2ac9a 6609
3107bd48 6610 cpt_init_clock_gating(dev);
1d7aaa0c
DV
6611
6612 gen6_check_mch_setup(dev);
6f1d69b0
ED
6613}
6614
6615static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6616{
6617 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6618
3aad9059 6619 /*
46680e0a 6620 * WaVSThreadDispatchOverride:ivb,vlv
3aad9059
VS
6621 *
6622 * This actually overrides the dispatch
6623 * mode for all thread types.
6624 */
6f1d69b0
ED
6625 reg &= ~GEN7_FF_SCHED_MASK;
6626 reg |= GEN7_FF_TS_SCHED_HW;
6627 reg |= GEN7_FF_VS_SCHED_HW;
6628 reg |= GEN7_FF_DS_SCHED_HW;
6629
6630 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6631}
6632
17a303ec
PZ
6633static void lpt_init_clock_gating(struct drm_device *dev)
6634{
6635 struct drm_i915_private *dev_priv = dev->dev_private;
6636
6637 /*
6638 * TODO: this bit should only be enabled when really needed, then
6639 * disabled when not needed anymore in order to save power.
6640 */
c2699524 6641 if (HAS_PCH_LPT_LP(dev))
17a303ec
PZ
6642 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6643 I915_READ(SOUTH_DSPCLK_GATE_D) |
6644 PCH_LP_PARTITION_LEVEL_DISABLE);
0a790cdb
PZ
6645
6646 /* WADPOClockGatingDisable:hsw */
6647 I915_WRITE(_TRANSA_CHICKEN1,
6648 I915_READ(_TRANSA_CHICKEN1) |
6649 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
17a303ec
PZ
6650}
6651
7d708ee4
ID
6652static void lpt_suspend_hw(struct drm_device *dev)
6653{
6654 struct drm_i915_private *dev_priv = dev->dev_private;
6655
c2699524 6656 if (HAS_PCH_LPT_LP(dev)) {
7d708ee4
ID
6657 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6658
6659 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6660 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6661 }
6662}
6663
47c2bd97 6664static void broadwell_init_clock_gating(struct drm_device *dev)
1020a5c2
BW
6665{
6666 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 6667 enum pipe pipe;
4d487cff 6668 uint32_t misccpctl;
1020a5c2 6669
7ad0dbab 6670 ilk_init_lp_watermarks(dev);
50ed5fbd 6671
ab57fff1 6672 /* WaSwitchSolVfFArbitrationPriority:bdw */
50ed5fbd 6673 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
fe4ab3ce 6674
ab57fff1 6675 /* WaPsrDPAMaskVBlankInSRD:bdw */
fe4ab3ce
BW
6676 I915_WRITE(CHICKEN_PAR1_1,
6677 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6678
ab57fff1 6679 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
055e393f 6680 for_each_pipe(dev_priv, pipe) {
07d27e20 6681 I915_WRITE(CHICKEN_PIPESL_1(pipe),
c7c65622 6682 I915_READ(CHICKEN_PIPESL_1(pipe)) |
8f670bb1 6683 BDW_DPRS_MASK_VBLANK_SRD);
fe4ab3ce 6684 }
63801f21 6685
ab57fff1
BW
6686 /* WaVSRefCountFullforceMissDisable:bdw */
6687 /* WaDSRefCountFullforceMissDisable:bdw */
6688 I915_WRITE(GEN7_FF_THREAD_MODE,
6689 I915_READ(GEN7_FF_THREAD_MODE) &
6690 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
36075a4c 6691
295e8bb7
VS
6692 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6693 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
4f1ca9e9
VS
6694
6695 /* WaDisableSDEUnitClockGating:bdw */
6696 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6697 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5d708680 6698
4d487cff
VS
6699 /*
6700 * WaProgramL3SqcReg1Default:bdw
6701 * WaTempDisableDOPClkGating:bdw
6702 */
6703 misccpctl = I915_READ(GEN7_MISCCPCTL);
6704 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6705 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6706 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6707
6d50b065
VS
6708 /*
6709 * WaGttCachingOffByDefault:bdw
6710 * GTT cache may not work with big pages, so if those
6711 * are ever enabled GTT cache may need to be disabled.
6712 */
6713 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6714
89d6b2b8 6715 lpt_init_clock_gating(dev);
1020a5c2
BW
6716}
6717
cad2a2d7
ED
6718static void haswell_init_clock_gating(struct drm_device *dev)
6719{
6720 struct drm_i915_private *dev_priv = dev->dev_private;
cad2a2d7 6721
017636cc 6722 ilk_init_lp_watermarks(dev);
cad2a2d7 6723
f3fc4884
FJ
6724 /* L3 caching of data atomics doesn't work -- disable it. */
6725 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6726 I915_WRITE(HSW_ROW_CHICKEN3,
6727 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6728
ecdb4eb7 6729 /* This is required by WaCatErrorRejectionIssue:hsw */
cad2a2d7
ED
6730 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6731 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6732 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6733
e36ea7ff
VS
6734 /* WaVSRefCountFullforceMissDisable:hsw */
6735 I915_WRITE(GEN7_FF_THREAD_MODE,
6736 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
cad2a2d7 6737
4e04632e
AG
6738 /* WaDisable_RenderCache_OperationalFlush:hsw */
6739 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6740
fe27c606
CW
6741 /* enable HiZ Raw Stall Optimization */
6742 I915_WRITE(CACHE_MODE_0_GEN7,
6743 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6744
ecdb4eb7 6745 /* WaDisable4x2SubspanOptimization:hsw */
cad2a2d7
ED
6746 I915_WRITE(CACHE_MODE_1,
6747 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
1544d9d5 6748
a12c4967
VS
6749 /*
6750 * BSpec recommends 8x4 when MSAA is used,
6751 * however in practice 16x4 seems fastest.
c5c98a58
VS
6752 *
6753 * Note that PS/WM thread counts depend on the WIZ hashing
6754 * disable bit, which we don't touch here, but it's good
6755 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
a12c4967
VS
6756 */
6757 I915_WRITE(GEN7_GT_MODE,
98533251 6758 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
a12c4967 6759
94411593
KG
6760 /* WaSampleCChickenBitEnable:hsw */
6761 I915_WRITE(HALF_SLICE_CHICKEN3,
6762 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6763
ecdb4eb7 6764 /* WaSwitchSolVfFArbitrationPriority:hsw */
e3dff585
BW
6765 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6766
90a88643
PZ
6767 /* WaRsPkgCStateDisplayPMReq:hsw */
6768 I915_WRITE(CHICKEN_PAR1_1,
6769 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
1544d9d5 6770
17a303ec 6771 lpt_init_clock_gating(dev);
cad2a2d7
ED
6772}
6773
1fa61106 6774static void ivybridge_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6775{
6776 struct drm_i915_private *dev_priv = dev->dev_private;
20848223 6777 uint32_t snpcr;
6f1d69b0 6778
017636cc 6779 ilk_init_lp_watermarks(dev);
6f1d69b0 6780
231e54f6 6781 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6f1d69b0 6782
ecdb4eb7 6783 /* WaDisableEarlyCull:ivb */
87f8020e
JB
6784 I915_WRITE(_3D_CHICKEN3,
6785 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6786
ecdb4eb7 6787 /* WaDisableBackToBackFlipFix:ivb */
6f1d69b0
ED
6788 I915_WRITE(IVB_CHICKEN3,
6789 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6790 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6791
ecdb4eb7 6792 /* WaDisablePSDDualDispatchEnable:ivb */
12f3382b
JB
6793 if (IS_IVB_GT1(dev))
6794 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6795 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
12f3382b 6796
4e04632e
AG
6797 /* WaDisable_RenderCache_OperationalFlush:ivb */
6798 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6799
ecdb4eb7 6800 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6f1d69b0
ED
6801 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6802 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6803
ecdb4eb7 6804 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6f1d69b0
ED
6805 I915_WRITE(GEN7_L3CNTLREG1,
6806 GEN7_WA_FOR_GEN7_L3_CONTROL);
6807 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8ab43976
JB
6808 GEN7_WA_L3_CHICKEN_MODE);
6809 if (IS_IVB_GT1(dev))
6810 I915_WRITE(GEN7_ROW_CHICKEN2,
6811 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
412236c2
VS
6812 else {
6813 /* must write both registers */
6814 I915_WRITE(GEN7_ROW_CHICKEN2,
6815 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8ab43976
JB
6816 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6817 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
412236c2 6818 }
6f1d69b0 6819
ecdb4eb7 6820 /* WaForceL3Serialization:ivb */
61939d97
JB
6821 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6822 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6823
1b80a19a 6824 /*
0f846f81 6825 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 6826 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
0f846f81
JB
6827 */
6828 I915_WRITE(GEN6_UCGCTL2,
28acf3b2 6829 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
0f846f81 6830
ecdb4eb7 6831 /* This is required by WaCatErrorRejectionIssue:ivb */
6f1d69b0
ED
6832 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6833 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6834 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6835
0e088b8f 6836 g4x_disable_trickle_feed(dev);
6f1d69b0
ED
6837
6838 gen7_setup_fixed_func_scheduler(dev_priv);
97e1930f 6839
22721343
CW
6840 if (0) { /* causes HiZ corruption on ivb:gt1 */
6841 /* enable HiZ Raw Stall Optimization */
6842 I915_WRITE(CACHE_MODE_0_GEN7,
6843 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6844 }
116f2b6d 6845
ecdb4eb7 6846 /* WaDisable4x2SubspanOptimization:ivb */
97e1930f
DV
6847 I915_WRITE(CACHE_MODE_1,
6848 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
20848223 6849
a607c1a4
VS
6850 /*
6851 * BSpec recommends 8x4 when MSAA is used,
6852 * however in practice 16x4 seems fastest.
c5c98a58
VS
6853 *
6854 * Note that PS/WM thread counts depend on the WIZ hashing
6855 * disable bit, which we don't touch here, but it's good
6856 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
a607c1a4
VS
6857 */
6858 I915_WRITE(GEN7_GT_MODE,
98533251 6859 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
a607c1a4 6860
20848223
BW
6861 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6862 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6863 snpcr |= GEN6_MBC_SNPCR_MED;
6864 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3107bd48 6865
ab5c608b
BW
6866 if (!HAS_PCH_NOP(dev))
6867 cpt_init_clock_gating(dev);
1d7aaa0c
DV
6868
6869 gen6_check_mch_setup(dev);
6f1d69b0
ED
6870}
6871
c6beb13e
VS
6872static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
6873{
6874 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6875
6876 /*
6877 * Disable trickle feed and enable pnd deadline calculation
6878 */
6879 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6880 I915_WRITE(CBR1_VLV, 0);
6881}
6882
1fa61106 6883static void valleyview_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6884{
6885 struct drm_i915_private *dev_priv = dev->dev_private;
6f1d69b0 6886
c6beb13e 6887 vlv_init_display_clock_gating(dev_priv);
6f1d69b0 6888
ecdb4eb7 6889 /* WaDisableEarlyCull:vlv */
87f8020e
JB
6890 I915_WRITE(_3D_CHICKEN3,
6891 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6892
ecdb4eb7 6893 /* WaDisableBackToBackFlipFix:vlv */
6f1d69b0
ED
6894 I915_WRITE(IVB_CHICKEN3,
6895 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6896 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6897
fad7d36e 6898 /* WaPsdDispatchEnable:vlv */
ecdb4eb7 6899 /* WaDisablePSDDualDispatchEnable:vlv */
12f3382b 6900 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
d3bc0303
JB
6901 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6902 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
12f3382b 6903
4e04632e
AG
6904 /* WaDisable_RenderCache_OperationalFlush:vlv */
6905 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6906
ecdb4eb7 6907 /* WaForceL3Serialization:vlv */
61939d97
JB
6908 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6909 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6910
ecdb4eb7 6911 /* WaDisableDopClockGating:vlv */
8ab43976
JB
6912 I915_WRITE(GEN7_ROW_CHICKEN2,
6913 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6914
ecdb4eb7 6915 /* This is required by WaCatErrorRejectionIssue:vlv */
6f1d69b0
ED
6916 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6917 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6918 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6919
46680e0a
VS
6920 gen7_setup_fixed_func_scheduler(dev_priv);
6921
3c0edaeb 6922 /*
0f846f81 6923 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 6924 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
0f846f81
JB
6925 */
6926 I915_WRITE(GEN6_UCGCTL2,
3c0edaeb 6927 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
0f846f81 6928
c98f5062
AG
6929 /* WaDisableL3Bank2xClockGate:vlv
6930 * Disabling L3 clock gating- MMIO 940c[25] = 1
6931 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6932 I915_WRITE(GEN7_UCGCTL4,
6933 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
e3f33d46 6934
afd58e79
VS
6935 /*
6936 * BSpec says this must be set, even though
6937 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6938 */
6b26c86d
DV
6939 I915_WRITE(CACHE_MODE_1,
6940 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7983117f 6941
da2518f9
VS
6942 /*
6943 * BSpec recommends 8x4 when MSAA is used,
6944 * however in practice 16x4 seems fastest.
6945 *
6946 * Note that PS/WM thread counts depend on the WIZ hashing
6947 * disable bit, which we don't touch here, but it's good
6948 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6949 */
6950 I915_WRITE(GEN7_GT_MODE,
6951 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6952
031994ee
VS
6953 /*
6954 * WaIncreaseL3CreditsForVLVB0:vlv
6955 * This is the hardware default actually.
6956 */
6957 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6958
2d809570 6959 /*
ecdb4eb7 6960 * WaDisableVLVClockGating_VBIIssue:vlv
2d809570
JB
6961 * Disable clock gating on th GCFG unit to prevent a delay
6962 * in the reporting of vblank events.
6963 */
7a0d1eed 6964 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6f1d69b0
ED
6965}
6966
a4565da8
VS
6967static void cherryview_init_clock_gating(struct drm_device *dev)
6968{
6969 struct drm_i915_private *dev_priv = dev->dev_private;
6970
c6beb13e 6971 vlv_init_display_clock_gating(dev_priv);
dd811e70 6972
232ce337
VS
6973 /* WaVSRefCountFullforceMissDisable:chv */
6974 /* WaDSRefCountFullforceMissDisable:chv */
6975 I915_WRITE(GEN7_FF_THREAD_MODE,
6976 I915_READ(GEN7_FF_THREAD_MODE) &
6977 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
acea6f95
VS
6978
6979 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6980 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6981 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
0846697c
VS
6982
6983 /* WaDisableCSUnitClockGating:chv */
6984 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6985 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
c631780f
VS
6986
6987 /* WaDisableSDEUnitClockGating:chv */
6988 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6989 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6d50b065
VS
6990
6991 /*
6992 * GTT cache may not work with big pages, so if those
6993 * are ever enabled GTT cache may need to be disabled.
6994 */
6995 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
a4565da8
VS
6996}
6997
1fa61106 6998static void g4x_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
6999{
7000 struct drm_i915_private *dev_priv = dev->dev_private;
7001 uint32_t dspclk_gate;
7002
7003 I915_WRITE(RENCLK_GATE_D1, 0);
7004 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7005 GS_UNIT_CLOCK_GATE_DISABLE |
7006 CL_UNIT_CLOCK_GATE_DISABLE);
7007 I915_WRITE(RAMCLK_GATE_D, 0);
7008 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7009 OVRUNIT_CLOCK_GATE_DISABLE |
7010 OVCUNIT_CLOCK_GATE_DISABLE;
7011 if (IS_GM45(dev))
7012 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7013 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
4358a374
DV
7014
7015 /* WaDisableRenderCachePipelinedFlush */
7016 I915_WRITE(CACHE_MODE_0,
7017 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
de1aa629 7018
4e04632e
AG
7019 /* WaDisable_RenderCache_OperationalFlush:g4x */
7020 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7021
0e088b8f 7022 g4x_disable_trickle_feed(dev);
6f1d69b0
ED
7023}
7024
1fa61106 7025static void crestline_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
7026{
7027 struct drm_i915_private *dev_priv = dev->dev_private;
7028
7029 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7030 I915_WRITE(RENCLK_GATE_D2, 0);
7031 I915_WRITE(DSPCLK_GATE_D, 0);
7032 I915_WRITE(RAMCLK_GATE_D, 0);
7033 I915_WRITE16(DEUC, 0);
20f94967
VS
7034 I915_WRITE(MI_ARB_STATE,
7035 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4e04632e
AG
7036
7037 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7038 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6f1d69b0
ED
7039}
7040
1fa61106 7041static void broadwater_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
7042{
7043 struct drm_i915_private *dev_priv = dev->dev_private;
7044
7045 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7046 I965_RCC_CLOCK_GATE_DISABLE |
7047 I965_RCPB_CLOCK_GATE_DISABLE |
7048 I965_ISC_CLOCK_GATE_DISABLE |
7049 I965_FBC_CLOCK_GATE_DISABLE);
7050 I915_WRITE(RENCLK_GATE_D2, 0);
20f94967
VS
7051 I915_WRITE(MI_ARB_STATE,
7052 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4e04632e
AG
7053
7054 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7055 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6f1d69b0
ED
7056}
7057
1fa61106 7058static void gen3_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
7059{
7060 struct drm_i915_private *dev_priv = dev->dev_private;
7061 u32 dstate = I915_READ(D_STATE);
7062
7063 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7064 DSTATE_DOT_CLOCK_GATING;
7065 I915_WRITE(D_STATE, dstate);
13a86b85
CW
7066
7067 if (IS_PINEVIEW(dev))
7068 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
974a3b0f
DV
7069
7070 /* IIR "flip pending" means done if this bit is set */
7071 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
12fabbcb
VS
7072
7073 /* interrupts should cause a wake up from C3 */
3299254f 7074 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
dbb42748
VS
7075
7076 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7077 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
1038392b
VS
7078
7079 I915_WRITE(MI_ARB_STATE,
7080 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7081}
7082
1fa61106 7083static void i85x_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
7084{
7085 struct drm_i915_private *dev_priv = dev->dev_private;
7086
7087 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
54e472ae
VS
7088
7089 /* interrupts should cause a wake up from C3 */
7090 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7091 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
1038392b
VS
7092
7093 I915_WRITE(MEM_MODE,
7094 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7095}
7096
1fa61106 7097static void i830_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
7098{
7099 struct drm_i915_private *dev_priv = dev->dev_private;
7100
7101 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
1038392b
VS
7102
7103 I915_WRITE(MEM_MODE,
7104 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7105 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
7106}
7107
6f1d69b0
ED
7108void intel_init_clock_gating(struct drm_device *dev)
7109{
7110 struct drm_i915_private *dev_priv = dev->dev_private;
7111
c57e3551
DL
7112 if (dev_priv->display.init_clock_gating)
7113 dev_priv->display.init_clock_gating(dev);
6f1d69b0
ED
7114}
7115
7d708ee4
ID
7116void intel_suspend_hw(struct drm_device *dev)
7117{
7118 if (HAS_PCH_LPT(dev))
7119 lpt_suspend_hw(dev);
7120}
7121
1fa61106
ED
7122/* Set up chip specific power management-related functions */
7123void intel_init_pm(struct drm_device *dev)
7124{
7125 struct drm_i915_private *dev_priv = dev->dev_private;
7126
7ff0ebcc 7127 intel_fbc_init(dev_priv);
1fa61106 7128
c921aba8
DV
7129 /* For cxsr */
7130 if (IS_PINEVIEW(dev))
7131 i915_pineview_get_mem_freq(dev);
7132 else if (IS_GEN5(dev))
7133 i915_ironlake_get_mem_freq(dev);
7134
1fa61106 7135 /* For FIFO watermark updates */
f5ed50cb 7136 if (INTEL_INFO(dev)->gen >= 9) {
2af30a5c
PB
7137 skl_setup_wm_latency(dev);
7138
a82abe43
ID
7139 if (IS_BROXTON(dev))
7140 dev_priv->display.init_clock_gating =
7141 bxt_init_clock_gating;
7142 else if (IS_SKYLAKE(dev))
7143 dev_priv->display.init_clock_gating =
7144 skl_init_clock_gating;
2d41c0b5
PB
7145 dev_priv->display.update_wm = skl_update_wm;
7146 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
c83155a6 7147 } else if (HAS_PCH_SPLIT(dev)) {
fa50ad61 7148 ilk_setup_wm_latency(dev);
53615a5e 7149
bd602544
VS
7150 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7151 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7152 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7153 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7154 dev_priv->display.update_wm = ilk_update_wm;
7155 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
7156 } else {
7157 DRM_DEBUG_KMS("Failed to read display plane latency. "
7158 "Disable CxSR\n");
7159 }
7160
7161 if (IS_GEN5(dev))
1fa61106 7162 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
bd602544 7163 else if (IS_GEN6(dev))
1fa61106 7164 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
bd602544 7165 else if (IS_IVYBRIDGE(dev))
1fa61106 7166 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
bd602544 7167 else if (IS_HASWELL(dev))
cad2a2d7 7168 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
bd602544 7169 else if (INTEL_INFO(dev)->gen == 8)
47c2bd97 7170 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
a4565da8 7171 } else if (IS_CHERRYVIEW(dev)) {
262cd2e1
VS
7172 vlv_setup_wm_latency(dev);
7173
7174 dev_priv->display.update_wm = vlv_update_wm;
a4565da8
VS
7175 dev_priv->display.init_clock_gating =
7176 cherryview_init_clock_gating;
1fa61106 7177 } else if (IS_VALLEYVIEW(dev)) {
26e1fe4f
VS
7178 vlv_setup_wm_latency(dev);
7179
7180 dev_priv->display.update_wm = vlv_update_wm;
1fa61106
ED
7181 dev_priv->display.init_clock_gating =
7182 valleyview_init_clock_gating;
1fa61106
ED
7183 } else if (IS_PINEVIEW(dev)) {
7184 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7185 dev_priv->is_ddr3,
7186 dev_priv->fsb_freq,
7187 dev_priv->mem_freq)) {
7188 DRM_INFO("failed to find known CxSR latency "
7189 "(found ddr%s fsb freq %d, mem freq %d), "
7190 "disabling CxSR\n",
7191 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7192 dev_priv->fsb_freq, dev_priv->mem_freq);
7193 /* Disable CxSR and never update its watermark again */
5209b1f4 7194 intel_set_memory_cxsr(dev_priv, false);
1fa61106
ED
7195 dev_priv->display.update_wm = NULL;
7196 } else
7197 dev_priv->display.update_wm = pineview_update_wm;
7198 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7199 } else if (IS_G4X(dev)) {
7200 dev_priv->display.update_wm = g4x_update_wm;
7201 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7202 } else if (IS_GEN4(dev)) {
7203 dev_priv->display.update_wm = i965_update_wm;
7204 if (IS_CRESTLINE(dev))
7205 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7206 else if (IS_BROADWATER(dev))
7207 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7208 } else if (IS_GEN3(dev)) {
7209 dev_priv->display.update_wm = i9xx_update_wm;
7210 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7211 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
feb56b93
DV
7212 } else if (IS_GEN2(dev)) {
7213 if (INTEL_INFO(dev)->num_pipes == 1) {
7214 dev_priv->display.update_wm = i845_update_wm;
1fa61106 7215 dev_priv->display.get_fifo_size = i845_get_fifo_size;
feb56b93
DV
7216 } else {
7217 dev_priv->display.update_wm = i9xx_update_wm;
1fa61106 7218 dev_priv->display.get_fifo_size = i830_get_fifo_size;
feb56b93
DV
7219 }
7220
7221 if (IS_I85X(dev) || IS_I865G(dev))
7222 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7223 else
7224 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7225 } else {
7226 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
1fa61106
ED
7227 }
7228}
7229
151a49d0 7230int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
42c0526c 7231{
4fc688ce 7232 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
42c0526c
BW
7233
7234 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7235 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7236 return -EAGAIN;
7237 }
7238
7239 I915_WRITE(GEN6_PCODE_DATA, *val);
dddab346 7240 I915_WRITE(GEN6_PCODE_DATA1, 0);
42c0526c
BW
7241 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7242
7243 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7244 500)) {
7245 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7246 return -ETIMEDOUT;
7247 }
7248
7249 *val = I915_READ(GEN6_PCODE_DATA);
7250 I915_WRITE(GEN6_PCODE_DATA, 0);
7251
7252 return 0;
7253}
7254
151a49d0 7255int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
42c0526c 7256{
4fc688ce 7257 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
42c0526c
BW
7258
7259 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7260 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7261 return -EAGAIN;
7262 }
7263
7264 I915_WRITE(GEN6_PCODE_DATA, val);
7265 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7266
7267 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7268 500)) {
7269 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7270 return -ETIMEDOUT;
7271 }
7272
7273 I915_WRITE(GEN6_PCODE_DATA, 0);
7274
7275 return 0;
7276}
a0e4e199 7277
dd06f88c 7278static int vlv_gpu_freq_div(unsigned int czclk_freq)
855ba3be 7279{
dd06f88c
VS
7280 switch (czclk_freq) {
7281 case 200:
7282 return 10;
7283 case 267:
7284 return 12;
7285 case 320:
7286 case 333:
dd06f88c 7287 return 16;
ab3fb157
VS
7288 case 400:
7289 return 20;
855ba3be
JB
7290 default:
7291 return -1;
7292 }
dd06f88c 7293}
855ba3be 7294
dd06f88c
VS
7295static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7296{
7297 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7298
7299 div = vlv_gpu_freq_div(czclk_freq);
7300 if (div < 0)
7301 return div;
7302
7303 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
855ba3be
JB
7304}
7305
b55dd647 7306static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
855ba3be 7307{
dd06f88c 7308 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
855ba3be 7309
dd06f88c
VS
7310 mul = vlv_gpu_freq_div(czclk_freq);
7311 if (mul < 0)
7312 return mul;
855ba3be 7313
dd06f88c 7314 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
855ba3be
JB
7315}
7316
b55dd647 7317static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7318{
dd06f88c 7319 int div, czclk_freq = dev_priv->rps.cz_freq;
22b1b2f8 7320
dd06f88c
VS
7321 div = vlv_gpu_freq_div(czclk_freq) / 2;
7322 if (div < 0)
7323 return div;
22b1b2f8 7324
dd06f88c 7325 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
22b1b2f8
D
7326}
7327
b55dd647 7328static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7329{
dd06f88c 7330 int mul, czclk_freq = dev_priv->rps.cz_freq;
22b1b2f8 7331
dd06f88c
VS
7332 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7333 if (mul < 0)
7334 return mul;
22b1b2f8 7335
1c14762d 7336 /* CHV needs even values */
dd06f88c 7337 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
22b1b2f8
D
7338}
7339
616bc820 7340int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
22b1b2f8 7341{
80b6dda4
AG
7342 if (IS_GEN9(dev_priv->dev))
7343 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
7344 else if (IS_CHERRYVIEW(dev_priv->dev))
616bc820 7345 return chv_gpu_freq(dev_priv, val);
22b1b2f8 7346 else if (IS_VALLEYVIEW(dev_priv->dev))
616bc820
VS
7347 return byt_gpu_freq(dev_priv, val);
7348 else
7349 return val * GT_FREQUENCY_MULTIPLIER;
22b1b2f8
D
7350}
7351
616bc820
VS
7352int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7353{
80b6dda4
AG
7354 if (IS_GEN9(dev_priv->dev))
7355 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
7356 else if (IS_CHERRYVIEW(dev_priv->dev))
616bc820 7357 return chv_freq_opcode(dev_priv, val);
22b1b2f8 7358 else if (IS_VALLEYVIEW(dev_priv->dev))
616bc820
VS
7359 return byt_freq_opcode(dev_priv, val);
7360 else
7361 return val / GT_FREQUENCY_MULTIPLIER;
7362}
22b1b2f8 7363
6ad790c0
CW
7364struct request_boost {
7365 struct work_struct work;
eed29a5b 7366 struct drm_i915_gem_request *req;
6ad790c0
CW
7367};
7368
7369static void __intel_rps_boost_work(struct work_struct *work)
7370{
7371 struct request_boost *boost = container_of(work, struct request_boost, work);
e61b9958 7372 struct drm_i915_gem_request *req = boost->req;
6ad790c0 7373
e61b9958
CW
7374 if (!i915_gem_request_completed(req, true))
7375 gen6_rps_boost(to_i915(req->ring->dev), NULL,
7376 req->emitted_jiffies);
6ad790c0 7377
e61b9958 7378 i915_gem_request_unreference__unlocked(req);
6ad790c0
CW
7379 kfree(boost);
7380}
7381
7382void intel_queue_rps_boost_for_request(struct drm_device *dev,
eed29a5b 7383 struct drm_i915_gem_request *req)
6ad790c0
CW
7384{
7385 struct request_boost *boost;
7386
eed29a5b 7387 if (req == NULL || INTEL_INFO(dev)->gen < 6)
6ad790c0
CW
7388 return;
7389
e61b9958
CW
7390 if (i915_gem_request_completed(req, true))
7391 return;
7392
6ad790c0
CW
7393 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7394 if (boost == NULL)
7395 return;
7396
eed29a5b
DV
7397 i915_gem_request_reference(req);
7398 boost->req = req;
6ad790c0
CW
7399
7400 INIT_WORK(&boost->work, __intel_rps_boost_work);
7401 queue_work(to_i915(dev)->wq, &boost->work);
7402}
7403
f742a552 7404void intel_pm_setup(struct drm_device *dev)
907b28c5
CW
7405{
7406 struct drm_i915_private *dev_priv = dev->dev_private;
7407
f742a552 7408 mutex_init(&dev_priv->rps.hw_lock);
8d3afd7d 7409 spin_lock_init(&dev_priv->rps.client_lock);
f742a552 7410
907b28c5
CW
7411 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7412 intel_gen6_powersave_work);
1854d5ca 7413 INIT_LIST_HEAD(&dev_priv->rps.clients);
2e1b8730
CW
7414 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
7415 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
5d584b2e 7416
33688d95 7417 dev_priv->pm.suspended = false;
907b28c5 7418}