]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright © 2012 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eugeni Dodonov <eugeni.dodonov@intel.com> | |
25 | * | |
26 | */ | |
27 | ||
28 | #include <linux/cpufreq.h> | |
29 | #include "i915_drv.h" | |
30 | #include "intel_drv.h" | |
31 | #include "../../../platform/x86/intel_ips.h" | |
32 | #include <linux/module.h> | |
33 | ||
34 | /** | |
35 | * RC6 is a special power stage which allows the GPU to enter an very | |
36 | * low-voltage mode when idle, using down to 0V while at this stage. This | |
37 | * stage is entered automatically when the GPU is idle when RC6 support is | |
38 | * enabled, and as soon as new workload arises GPU wakes up automatically as well. | |
39 | * | |
40 | * There are different RC6 modes available in Intel GPU, which differentiate | |
41 | * among each other with the latency required to enter and leave RC6 and | |
42 | * voltage consumed by the GPU in different states. | |
43 | * | |
44 | * The combination of the following flags define which states GPU is allowed | |
45 | * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and | |
46 | * RC6pp is deepest RC6. Their support by hardware varies according to the | |
47 | * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one | |
48 | * which brings the most power savings; deeper states save more power, but | |
49 | * require higher latency to switch to and wake up. | |
50 | */ | |
51 | #define INTEL_RC6_ENABLE (1<<0) | |
52 | #define INTEL_RC6p_ENABLE (1<<1) | |
53 | #define INTEL_RC6pp_ENABLE (1<<2) | |
54 | ||
55 | static void gen9_init_clock_gating(struct drm_device *dev) | |
56 | { | |
57 | struct drm_i915_private *dev_priv = dev->dev_private; | |
58 | ||
59 | /* WaEnableLbsSlaRetryTimerDecrement:skl */ | |
60 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | | |
61 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | |
62 | ||
63 | /* WaDisableKillLogic:bxt,skl */ | |
64 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | |
65 | ECOCHK_DIS_TLB); | |
66 | } | |
67 | ||
68 | static void skl_init_clock_gating(struct drm_device *dev) | |
69 | { | |
70 | struct drm_i915_private *dev_priv = dev->dev_private; | |
71 | ||
72 | gen9_init_clock_gating(dev); | |
73 | ||
74 | if (INTEL_REVID(dev) <= SKL_REVID_D0) { | |
75 | /* WaDisableHDCInvalidation:skl */ | |
76 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | |
77 | BDW_DISABLE_HDC_INVALIDATION); | |
78 | ||
79 | /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ | |
80 | I915_WRITE(FF_SLICE_CS_CHICKEN2, | |
81 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); | |
82 | } | |
83 | ||
84 | /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes | |
85 | * involving this register should also be added to WA batch as required. | |
86 | */ | |
87 | if (INTEL_REVID(dev) <= SKL_REVID_E0) | |
88 | /* WaDisableLSQCROPERFforOCL:skl */ | |
89 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | |
90 | GEN8_LQSC_RO_PERF_DIS); | |
91 | ||
92 | /* WaEnableGapsTsvCreditFix:skl */ | |
93 | if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { | |
94 | I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | | |
95 | GEN9_GAPS_TSV_CREDIT_DISABLE)); | |
96 | } | |
97 | } | |
98 | ||
99 | static void bxt_init_clock_gating(struct drm_device *dev) | |
100 | { | |
101 | struct drm_i915_private *dev_priv = dev->dev_private; | |
102 | ||
103 | gen9_init_clock_gating(dev); | |
104 | ||
105 | /* WaDisableSDEUnitClockGating:bxt */ | |
106 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | |
107 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | |
108 | ||
109 | /* | |
110 | * FIXME: | |
111 | * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. | |
112 | */ | |
113 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | |
114 | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); | |
115 | ||
116 | /* WaStoreMultiplePTEenable:bxt */ | |
117 | /* This is a requirement according to Hardware specification */ | |
118 | if (INTEL_REVID(dev) == BXT_REVID_A0) | |
119 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); | |
120 | ||
121 | /* WaSetClckGatingDisableMedia:bxt */ | |
122 | if (INTEL_REVID(dev) == BXT_REVID_A0) { | |
123 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & | |
124 | ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); | |
125 | } | |
126 | } | |
127 | ||
128 | static void i915_pineview_get_mem_freq(struct drm_device *dev) | |
129 | { | |
130 | struct drm_i915_private *dev_priv = dev->dev_private; | |
131 | u32 tmp; | |
132 | ||
133 | tmp = I915_READ(CLKCFG); | |
134 | ||
135 | switch (tmp & CLKCFG_FSB_MASK) { | |
136 | case CLKCFG_FSB_533: | |
137 | dev_priv->fsb_freq = 533; /* 133*4 */ | |
138 | break; | |
139 | case CLKCFG_FSB_800: | |
140 | dev_priv->fsb_freq = 800; /* 200*4 */ | |
141 | break; | |
142 | case CLKCFG_FSB_667: | |
143 | dev_priv->fsb_freq = 667; /* 167*4 */ | |
144 | break; | |
145 | case CLKCFG_FSB_400: | |
146 | dev_priv->fsb_freq = 400; /* 100*4 */ | |
147 | break; | |
148 | } | |
149 | ||
150 | switch (tmp & CLKCFG_MEM_MASK) { | |
151 | case CLKCFG_MEM_533: | |
152 | dev_priv->mem_freq = 533; | |
153 | break; | |
154 | case CLKCFG_MEM_667: | |
155 | dev_priv->mem_freq = 667; | |
156 | break; | |
157 | case CLKCFG_MEM_800: | |
158 | dev_priv->mem_freq = 800; | |
159 | break; | |
160 | } | |
161 | ||
162 | /* detect pineview DDR3 setting */ | |
163 | tmp = I915_READ(CSHRDDR3CTL); | |
164 | dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; | |
165 | } | |
166 | ||
167 | static void i915_ironlake_get_mem_freq(struct drm_device *dev) | |
168 | { | |
169 | struct drm_i915_private *dev_priv = dev->dev_private; | |
170 | u16 ddrpll, csipll; | |
171 | ||
172 | ddrpll = I915_READ16(DDRMPLL1); | |
173 | csipll = I915_READ16(CSIPLL0); | |
174 | ||
175 | switch (ddrpll & 0xff) { | |
176 | case 0xc: | |
177 | dev_priv->mem_freq = 800; | |
178 | break; | |
179 | case 0x10: | |
180 | dev_priv->mem_freq = 1066; | |
181 | break; | |
182 | case 0x14: | |
183 | dev_priv->mem_freq = 1333; | |
184 | break; | |
185 | case 0x18: | |
186 | dev_priv->mem_freq = 1600; | |
187 | break; | |
188 | default: | |
189 | DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", | |
190 | ddrpll & 0xff); | |
191 | dev_priv->mem_freq = 0; | |
192 | break; | |
193 | } | |
194 | ||
195 | dev_priv->ips.r_t = dev_priv->mem_freq; | |
196 | ||
197 | switch (csipll & 0x3ff) { | |
198 | case 0x00c: | |
199 | dev_priv->fsb_freq = 3200; | |
200 | break; | |
201 | case 0x00e: | |
202 | dev_priv->fsb_freq = 3733; | |
203 | break; | |
204 | case 0x010: | |
205 | dev_priv->fsb_freq = 4266; | |
206 | break; | |
207 | case 0x012: | |
208 | dev_priv->fsb_freq = 4800; | |
209 | break; | |
210 | case 0x014: | |
211 | dev_priv->fsb_freq = 5333; | |
212 | break; | |
213 | case 0x016: | |
214 | dev_priv->fsb_freq = 5866; | |
215 | break; | |
216 | case 0x018: | |
217 | dev_priv->fsb_freq = 6400; | |
218 | break; | |
219 | default: | |
220 | DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", | |
221 | csipll & 0x3ff); | |
222 | dev_priv->fsb_freq = 0; | |
223 | break; | |
224 | } | |
225 | ||
226 | if (dev_priv->fsb_freq == 3200) { | |
227 | dev_priv->ips.c_m = 0; | |
228 | } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { | |
229 | dev_priv->ips.c_m = 1; | |
230 | } else { | |
231 | dev_priv->ips.c_m = 2; | |
232 | } | |
233 | } | |
234 | ||
235 | static const struct cxsr_latency cxsr_latency_table[] = { | |
236 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ | |
237 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ | |
238 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ | |
239 | {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ | |
240 | {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ | |
241 | ||
242 | {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ | |
243 | {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ | |
244 | {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ | |
245 | {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ | |
246 | {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ | |
247 | ||
248 | {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ | |
249 | {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ | |
250 | {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ | |
251 | {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ | |
252 | {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ | |
253 | ||
254 | {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ | |
255 | {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ | |
256 | {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ | |
257 | {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ | |
258 | {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ | |
259 | ||
260 | {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ | |
261 | {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ | |
262 | {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ | |
263 | {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ | |
264 | {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ | |
265 | ||
266 | {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ | |
267 | {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ | |
268 | {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ | |
269 | {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ | |
270 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ | |
271 | }; | |
272 | ||
273 | static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, | |
274 | int is_ddr3, | |
275 | int fsb, | |
276 | int mem) | |
277 | { | |
278 | const struct cxsr_latency *latency; | |
279 | int i; | |
280 | ||
281 | if (fsb == 0 || mem == 0) | |
282 | return NULL; | |
283 | ||
284 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { | |
285 | latency = &cxsr_latency_table[i]; | |
286 | if (is_desktop == latency->is_desktop && | |
287 | is_ddr3 == latency->is_ddr3 && | |
288 | fsb == latency->fsb_freq && mem == latency->mem_freq) | |
289 | return latency; | |
290 | } | |
291 | ||
292 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | |
293 | ||
294 | return NULL; | |
295 | } | |
296 | ||
297 | static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) | |
298 | { | |
299 | u32 val; | |
300 | ||
301 | mutex_lock(&dev_priv->rps.hw_lock); | |
302 | ||
303 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); | |
304 | if (enable) | |
305 | val &= ~FORCE_DDR_HIGH_FREQ; | |
306 | else | |
307 | val |= FORCE_DDR_HIGH_FREQ; | |
308 | val &= ~FORCE_DDR_LOW_FREQ; | |
309 | val |= FORCE_DDR_FREQ_REQ_ACK; | |
310 | vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); | |
311 | ||
312 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & | |
313 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) | |
314 | DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); | |
315 | ||
316 | mutex_unlock(&dev_priv->rps.hw_lock); | |
317 | } | |
318 | ||
319 | static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) | |
320 | { | |
321 | u32 val; | |
322 | ||
323 | mutex_lock(&dev_priv->rps.hw_lock); | |
324 | ||
325 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | |
326 | if (enable) | |
327 | val |= DSP_MAXFIFO_PM5_ENABLE; | |
328 | else | |
329 | val &= ~DSP_MAXFIFO_PM5_ENABLE; | |
330 | vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); | |
331 | ||
332 | mutex_unlock(&dev_priv->rps.hw_lock); | |
333 | } | |
334 | ||
335 | #define FW_WM(value, plane) \ | |
336 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) | |
337 | ||
338 | void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | |
339 | { | |
340 | struct drm_device *dev = dev_priv->dev; | |
341 | u32 val; | |
342 | ||
343 | if (IS_VALLEYVIEW(dev)) { | |
344 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); | |
345 | POSTING_READ(FW_BLC_SELF_VLV); | |
346 | dev_priv->wm.vlv.cxsr = enable; | |
347 | } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { | |
348 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); | |
349 | POSTING_READ(FW_BLC_SELF); | |
350 | } else if (IS_PINEVIEW(dev)) { | |
351 | val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; | |
352 | val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; | |
353 | I915_WRITE(DSPFW3, val); | |
354 | POSTING_READ(DSPFW3); | |
355 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { | |
356 | val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : | |
357 | _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); | |
358 | I915_WRITE(FW_BLC_SELF, val); | |
359 | POSTING_READ(FW_BLC_SELF); | |
360 | } else if (IS_I915GM(dev)) { | |
361 | val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : | |
362 | _MASKED_BIT_DISABLE(INSTPM_SELF_EN); | |
363 | I915_WRITE(INSTPM, val); | |
364 | POSTING_READ(INSTPM); | |
365 | } else { | |
366 | return; | |
367 | } | |
368 | ||
369 | DRM_DEBUG_KMS("memory self-refresh is %s\n", | |
370 | enable ? "enabled" : "disabled"); | |
371 | } | |
372 | ||
373 | ||
374 | /* | |
375 | * Latency for FIFO fetches is dependent on several factors: | |
376 | * - memory configuration (speed, channels) | |
377 | * - chipset | |
378 | * - current MCH state | |
379 | * It can be fairly high in some situations, so here we assume a fairly | |
380 | * pessimal value. It's a tradeoff between extra memory fetches (if we | |
381 | * set this value too high, the FIFO will fetch frequently to stay full) | |
382 | * and power consumption (set it too low to save power and we might see | |
383 | * FIFO underruns and display "flicker"). | |
384 | * | |
385 | * A value of 5us seems to be a good balance; safe for very low end | |
386 | * platforms but not overly aggressive on lower latency configs. | |
387 | */ | |
388 | static const int pessimal_latency_ns = 5000; | |
389 | ||
390 | #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ | |
391 | ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) | |
392 | ||
393 | static int vlv_get_fifo_size(struct drm_device *dev, | |
394 | enum pipe pipe, int plane) | |
395 | { | |
396 | struct drm_i915_private *dev_priv = dev->dev_private; | |
397 | int sprite0_start, sprite1_start, size; | |
398 | ||
399 | switch (pipe) { | |
400 | uint32_t dsparb, dsparb2, dsparb3; | |
401 | case PIPE_A: | |
402 | dsparb = I915_READ(DSPARB); | |
403 | dsparb2 = I915_READ(DSPARB2); | |
404 | sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); | |
405 | sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); | |
406 | break; | |
407 | case PIPE_B: | |
408 | dsparb = I915_READ(DSPARB); | |
409 | dsparb2 = I915_READ(DSPARB2); | |
410 | sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); | |
411 | sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); | |
412 | break; | |
413 | case PIPE_C: | |
414 | dsparb2 = I915_READ(DSPARB2); | |
415 | dsparb3 = I915_READ(DSPARB3); | |
416 | sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); | |
417 | sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); | |
418 | break; | |
419 | default: | |
420 | return 0; | |
421 | } | |
422 | ||
423 | switch (plane) { | |
424 | case 0: | |
425 | size = sprite0_start; | |
426 | break; | |
427 | case 1: | |
428 | size = sprite1_start - sprite0_start; | |
429 | break; | |
430 | case 2: | |
431 | size = 512 - 1 - sprite1_start; | |
432 | break; | |
433 | default: | |
434 | return 0; | |
435 | } | |
436 | ||
437 | DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", | |
438 | pipe_name(pipe), plane == 0 ? "primary" : "sprite", | |
439 | plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), | |
440 | size); | |
441 | ||
442 | return size; | |
443 | } | |
444 | ||
445 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |
446 | { | |
447 | struct drm_i915_private *dev_priv = dev->dev_private; | |
448 | uint32_t dsparb = I915_READ(DSPARB); | |
449 | int size; | |
450 | ||
451 | size = dsparb & 0x7f; | |
452 | if (plane) | |
453 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; | |
454 | ||
455 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | |
456 | plane ? "B" : "A", size); | |
457 | ||
458 | return size; | |
459 | } | |
460 | ||
461 | static int i830_get_fifo_size(struct drm_device *dev, int plane) | |
462 | { | |
463 | struct drm_i915_private *dev_priv = dev->dev_private; | |
464 | uint32_t dsparb = I915_READ(DSPARB); | |
465 | int size; | |
466 | ||
467 | size = dsparb & 0x1ff; | |
468 | if (plane) | |
469 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; | |
470 | size >>= 1; /* Convert to cachelines */ | |
471 | ||
472 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | |
473 | plane ? "B" : "A", size); | |
474 | ||
475 | return size; | |
476 | } | |
477 | ||
478 | static int i845_get_fifo_size(struct drm_device *dev, int plane) | |
479 | { | |
480 | struct drm_i915_private *dev_priv = dev->dev_private; | |
481 | uint32_t dsparb = I915_READ(DSPARB); | |
482 | int size; | |
483 | ||
484 | size = dsparb & 0x7f; | |
485 | size >>= 2; /* Convert to cachelines */ | |
486 | ||
487 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | |
488 | plane ? "B" : "A", | |
489 | size); | |
490 | ||
491 | return size; | |
492 | } | |
493 | ||
494 | /* Pineview has different values for various configs */ | |
495 | static const struct intel_watermark_params pineview_display_wm = { | |
496 | .fifo_size = PINEVIEW_DISPLAY_FIFO, | |
497 | .max_wm = PINEVIEW_MAX_WM, | |
498 | .default_wm = PINEVIEW_DFT_WM, | |
499 | .guard_size = PINEVIEW_GUARD_WM, | |
500 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, | |
501 | }; | |
502 | static const struct intel_watermark_params pineview_display_hplloff_wm = { | |
503 | .fifo_size = PINEVIEW_DISPLAY_FIFO, | |
504 | .max_wm = PINEVIEW_MAX_WM, | |
505 | .default_wm = PINEVIEW_DFT_HPLLOFF_WM, | |
506 | .guard_size = PINEVIEW_GUARD_WM, | |
507 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, | |
508 | }; | |
509 | static const struct intel_watermark_params pineview_cursor_wm = { | |
510 | .fifo_size = PINEVIEW_CURSOR_FIFO, | |
511 | .max_wm = PINEVIEW_CURSOR_MAX_WM, | |
512 | .default_wm = PINEVIEW_CURSOR_DFT_WM, | |
513 | .guard_size = PINEVIEW_CURSOR_GUARD_WM, | |
514 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, | |
515 | }; | |
516 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { | |
517 | .fifo_size = PINEVIEW_CURSOR_FIFO, | |
518 | .max_wm = PINEVIEW_CURSOR_MAX_WM, | |
519 | .default_wm = PINEVIEW_CURSOR_DFT_WM, | |
520 | .guard_size = PINEVIEW_CURSOR_GUARD_WM, | |
521 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, | |
522 | }; | |
523 | static const struct intel_watermark_params g4x_wm_info = { | |
524 | .fifo_size = G4X_FIFO_SIZE, | |
525 | .max_wm = G4X_MAX_WM, | |
526 | .default_wm = G4X_MAX_WM, | |
527 | .guard_size = 2, | |
528 | .cacheline_size = G4X_FIFO_LINE_SIZE, | |
529 | }; | |
530 | static const struct intel_watermark_params g4x_cursor_wm_info = { | |
531 | .fifo_size = I965_CURSOR_FIFO, | |
532 | .max_wm = I965_CURSOR_MAX_WM, | |
533 | .default_wm = I965_CURSOR_DFT_WM, | |
534 | .guard_size = 2, | |
535 | .cacheline_size = G4X_FIFO_LINE_SIZE, | |
536 | }; | |
537 | static const struct intel_watermark_params valleyview_wm_info = { | |
538 | .fifo_size = VALLEYVIEW_FIFO_SIZE, | |
539 | .max_wm = VALLEYVIEW_MAX_WM, | |
540 | .default_wm = VALLEYVIEW_MAX_WM, | |
541 | .guard_size = 2, | |
542 | .cacheline_size = G4X_FIFO_LINE_SIZE, | |
543 | }; | |
544 | static const struct intel_watermark_params valleyview_cursor_wm_info = { | |
545 | .fifo_size = I965_CURSOR_FIFO, | |
546 | .max_wm = VALLEYVIEW_CURSOR_MAX_WM, | |
547 | .default_wm = I965_CURSOR_DFT_WM, | |
548 | .guard_size = 2, | |
549 | .cacheline_size = G4X_FIFO_LINE_SIZE, | |
550 | }; | |
551 | static const struct intel_watermark_params i965_cursor_wm_info = { | |
552 | .fifo_size = I965_CURSOR_FIFO, | |
553 | .max_wm = I965_CURSOR_MAX_WM, | |
554 | .default_wm = I965_CURSOR_DFT_WM, | |
555 | .guard_size = 2, | |
556 | .cacheline_size = I915_FIFO_LINE_SIZE, | |
557 | }; | |
558 | static const struct intel_watermark_params i945_wm_info = { | |
559 | .fifo_size = I945_FIFO_SIZE, | |
560 | .max_wm = I915_MAX_WM, | |
561 | .default_wm = 1, | |
562 | .guard_size = 2, | |
563 | .cacheline_size = I915_FIFO_LINE_SIZE, | |
564 | }; | |
565 | static const struct intel_watermark_params i915_wm_info = { | |
566 | .fifo_size = I915_FIFO_SIZE, | |
567 | .max_wm = I915_MAX_WM, | |
568 | .default_wm = 1, | |
569 | .guard_size = 2, | |
570 | .cacheline_size = I915_FIFO_LINE_SIZE, | |
571 | }; | |
572 | static const struct intel_watermark_params i830_a_wm_info = { | |
573 | .fifo_size = I855GM_FIFO_SIZE, | |
574 | .max_wm = I915_MAX_WM, | |
575 | .default_wm = 1, | |
576 | .guard_size = 2, | |
577 | .cacheline_size = I830_FIFO_LINE_SIZE, | |
578 | }; | |
579 | static const struct intel_watermark_params i830_bc_wm_info = { | |
580 | .fifo_size = I855GM_FIFO_SIZE, | |
581 | .max_wm = I915_MAX_WM/2, | |
582 | .default_wm = 1, | |
583 | .guard_size = 2, | |
584 | .cacheline_size = I830_FIFO_LINE_SIZE, | |
585 | }; | |
586 | static const struct intel_watermark_params i845_wm_info = { | |
587 | .fifo_size = I830_FIFO_SIZE, | |
588 | .max_wm = I915_MAX_WM, | |
589 | .default_wm = 1, | |
590 | .guard_size = 2, | |
591 | .cacheline_size = I830_FIFO_LINE_SIZE, | |
592 | }; | |
593 | ||
594 | /** | |
595 | * intel_calculate_wm - calculate watermark level | |
596 | * @clock_in_khz: pixel clock | |
597 | * @wm: chip FIFO params | |
598 | * @pixel_size: display pixel size | |
599 | * @latency_ns: memory latency for the platform | |
600 | * | |
601 | * Calculate the watermark level (the level at which the display plane will | |
602 | * start fetching from memory again). Each chip has a different display | |
603 | * FIFO size and allocation, so the caller needs to figure that out and pass | |
604 | * in the correct intel_watermark_params structure. | |
605 | * | |
606 | * As the pixel clock runs, the FIFO will be drained at a rate that depends | |
607 | * on the pixel size. When it reaches the watermark level, it'll start | |
608 | * fetching FIFO line sized based chunks from memory until the FIFO fills | |
609 | * past the watermark point. If the FIFO drains completely, a FIFO underrun | |
610 | * will occur, and a display engine hang could result. | |
611 | */ | |
612 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |
613 | const struct intel_watermark_params *wm, | |
614 | int fifo_size, | |
615 | int pixel_size, | |
616 | unsigned long latency_ns) | |
617 | { | |
618 | long entries_required, wm_size; | |
619 | ||
620 | /* | |
621 | * Note: we need to make sure we don't overflow for various clock & | |
622 | * latency values. | |
623 | * clocks go from a few thousand to several hundred thousand. | |
624 | * latency is usually a few thousand | |
625 | */ | |
626 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / | |
627 | 1000; | |
628 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); | |
629 | ||
630 | DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); | |
631 | ||
632 | wm_size = fifo_size - (entries_required + wm->guard_size); | |
633 | ||
634 | DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); | |
635 | ||
636 | /* Don't promote wm_size to unsigned... */ | |
637 | if (wm_size > (long)wm->max_wm) | |
638 | wm_size = wm->max_wm; | |
639 | if (wm_size <= 0) | |
640 | wm_size = wm->default_wm; | |
641 | ||
642 | /* | |
643 | * Bspec seems to indicate that the value shouldn't be lower than | |
644 | * 'burst size + 1'. Certainly 830 is quite unhappy with low values. | |
645 | * Lets go for 8 which is the burst size since certain platforms | |
646 | * already use a hardcoded 8 (which is what the spec says should be | |
647 | * done). | |
648 | */ | |
649 | if (wm_size <= 8) | |
650 | wm_size = 8; | |
651 | ||
652 | return wm_size; | |
653 | } | |
654 | ||
655 | static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) | |
656 | { | |
657 | struct drm_crtc *crtc, *enabled = NULL; | |
658 | ||
659 | for_each_crtc(dev, crtc) { | |
660 | if (intel_crtc_active(crtc)) { | |
661 | if (enabled) | |
662 | return NULL; | |
663 | enabled = crtc; | |
664 | } | |
665 | } | |
666 | ||
667 | return enabled; | |
668 | } | |
669 | ||
670 | static void pineview_update_wm(struct drm_crtc *unused_crtc) | |
671 | { | |
672 | struct drm_device *dev = unused_crtc->dev; | |
673 | struct drm_i915_private *dev_priv = dev->dev_private; | |
674 | struct drm_crtc *crtc; | |
675 | const struct cxsr_latency *latency; | |
676 | u32 reg; | |
677 | unsigned long wm; | |
678 | ||
679 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, | |
680 | dev_priv->fsb_freq, dev_priv->mem_freq); | |
681 | if (!latency) { | |
682 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | |
683 | intel_set_memory_cxsr(dev_priv, false); | |
684 | return; | |
685 | } | |
686 | ||
687 | crtc = single_enabled_crtc(dev); | |
688 | if (crtc) { | |
689 | const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; | |
690 | int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; | |
691 | int clock = adjusted_mode->crtc_clock; | |
692 | ||
693 | /* Display SR */ | |
694 | wm = intel_calculate_wm(clock, &pineview_display_wm, | |
695 | pineview_display_wm.fifo_size, | |
696 | pixel_size, latency->display_sr); | |
697 | reg = I915_READ(DSPFW1); | |
698 | reg &= ~DSPFW_SR_MASK; | |
699 | reg |= FW_WM(wm, SR); | |
700 | I915_WRITE(DSPFW1, reg); | |
701 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | |
702 | ||
703 | /* cursor SR */ | |
704 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, | |
705 | pineview_display_wm.fifo_size, | |
706 | pixel_size, latency->cursor_sr); | |
707 | reg = I915_READ(DSPFW3); | |
708 | reg &= ~DSPFW_CURSOR_SR_MASK; | |
709 | reg |= FW_WM(wm, CURSOR_SR); | |
710 | I915_WRITE(DSPFW3, reg); | |
711 | ||
712 | /* Display HPLL off SR */ | |
713 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, | |
714 | pineview_display_hplloff_wm.fifo_size, | |
715 | pixel_size, latency->display_hpll_disable); | |
716 | reg = I915_READ(DSPFW3); | |
717 | reg &= ~DSPFW_HPLL_SR_MASK; | |
718 | reg |= FW_WM(wm, HPLL_SR); | |
719 | I915_WRITE(DSPFW3, reg); | |
720 | ||
721 | /* cursor HPLL off SR */ | |
722 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, | |
723 | pineview_display_hplloff_wm.fifo_size, | |
724 | pixel_size, latency->cursor_hpll_disable); | |
725 | reg = I915_READ(DSPFW3); | |
726 | reg &= ~DSPFW_HPLL_CURSOR_MASK; | |
727 | reg |= FW_WM(wm, HPLL_CURSOR); | |
728 | I915_WRITE(DSPFW3, reg); | |
729 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); | |
730 | ||
731 | intel_set_memory_cxsr(dev_priv, true); | |
732 | } else { | |
733 | intel_set_memory_cxsr(dev_priv, false); | |
734 | } | |
735 | } | |
736 | ||
737 | static bool g4x_compute_wm0(struct drm_device *dev, | |
738 | int plane, | |
739 | const struct intel_watermark_params *display, | |
740 | int display_latency_ns, | |
741 | const struct intel_watermark_params *cursor, | |
742 | int cursor_latency_ns, | |
743 | int *plane_wm, | |
744 | int *cursor_wm) | |
745 | { | |
746 | struct drm_crtc *crtc; | |
747 | const struct drm_display_mode *adjusted_mode; | |
748 | int htotal, hdisplay, clock, pixel_size; | |
749 | int line_time_us, line_count; | |
750 | int entries, tlb_miss; | |
751 | ||
752 | crtc = intel_get_crtc_for_plane(dev, plane); | |
753 | if (!intel_crtc_active(crtc)) { | |
754 | *cursor_wm = cursor->guard_size; | |
755 | *plane_wm = display->guard_size; | |
756 | return false; | |
757 | } | |
758 | ||
759 | adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; | |
760 | clock = adjusted_mode->crtc_clock; | |
761 | htotal = adjusted_mode->crtc_htotal; | |
762 | hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; | |
763 | pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; | |
764 | ||
765 | /* Use the small buffer method to calculate plane watermark */ | |
766 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; | |
767 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; | |
768 | if (tlb_miss > 0) | |
769 | entries += tlb_miss; | |
770 | entries = DIV_ROUND_UP(entries, display->cacheline_size); | |
771 | *plane_wm = entries + display->guard_size; | |
772 | if (*plane_wm > (int)display->max_wm) | |
773 | *plane_wm = display->max_wm; | |
774 | ||
775 | /* Use the large buffer method to calculate cursor watermark */ | |
776 | line_time_us = max(htotal * 1000 / clock, 1); | |
777 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; | |
778 | entries = line_count * crtc->cursor->state->crtc_w * pixel_size; | |
779 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; | |
780 | if (tlb_miss > 0) | |
781 | entries += tlb_miss; | |
782 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | |
783 | *cursor_wm = entries + cursor->guard_size; | |
784 | if (*cursor_wm > (int)cursor->max_wm) | |
785 | *cursor_wm = (int)cursor->max_wm; | |
786 | ||
787 | return true; | |
788 | } | |
789 | ||
790 | /* | |
791 | * Check the wm result. | |
792 | * | |
793 | * If any calculated watermark values is larger than the maximum value that | |
794 | * can be programmed into the associated watermark register, that watermark | |
795 | * must be disabled. | |
796 | */ | |
797 | static bool g4x_check_srwm(struct drm_device *dev, | |
798 | int display_wm, int cursor_wm, | |
799 | const struct intel_watermark_params *display, | |
800 | const struct intel_watermark_params *cursor) | |
801 | { | |
802 | DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", | |
803 | display_wm, cursor_wm); | |
804 | ||
805 | if (display_wm > display->max_wm) { | |
806 | DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", | |
807 | display_wm, display->max_wm); | |
808 | return false; | |
809 | } | |
810 | ||
811 | if (cursor_wm > cursor->max_wm) { | |
812 | DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", | |
813 | cursor_wm, cursor->max_wm); | |
814 | return false; | |
815 | } | |
816 | ||
817 | if (!(display_wm || cursor_wm)) { | |
818 | DRM_DEBUG_KMS("SR latency is 0, disabling\n"); | |
819 | return false; | |
820 | } | |
821 | ||
822 | return true; | |
823 | } | |
824 | ||
825 | static bool g4x_compute_srwm(struct drm_device *dev, | |
826 | int plane, | |
827 | int latency_ns, | |
828 | const struct intel_watermark_params *display, | |
829 | const struct intel_watermark_params *cursor, | |
830 | int *display_wm, int *cursor_wm) | |
831 | { | |
832 | struct drm_crtc *crtc; | |
833 | const struct drm_display_mode *adjusted_mode; | |
834 | int hdisplay, htotal, pixel_size, clock; | |
835 | unsigned long line_time_us; | |
836 | int line_count, line_size; | |
837 | int small, large; | |
838 | int entries; | |
839 | ||
840 | if (!latency_ns) { | |
841 | *display_wm = *cursor_wm = 0; | |
842 | return false; | |
843 | } | |
844 | ||
845 | crtc = intel_get_crtc_for_plane(dev, plane); | |
846 | adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; | |
847 | clock = adjusted_mode->crtc_clock; | |
848 | htotal = adjusted_mode->crtc_htotal; | |
849 | hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; | |
850 | pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; | |
851 | ||
852 | line_time_us = max(htotal * 1000 / clock, 1); | |
853 | line_count = (latency_ns / line_time_us + 1000) / 1000; | |
854 | line_size = hdisplay * pixel_size; | |
855 | ||
856 | /* Use the minimum of the small and large buffer method for primary */ | |
857 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | |
858 | large = line_count * line_size; | |
859 | ||
860 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | |
861 | *display_wm = entries + display->guard_size; | |
862 | ||
863 | /* calculate the self-refresh watermark for display cursor */ | |
864 | entries = line_count * pixel_size * crtc->cursor->state->crtc_w; | |
865 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | |
866 | *cursor_wm = entries + cursor->guard_size; | |
867 | ||
868 | return g4x_check_srwm(dev, | |
869 | *display_wm, *cursor_wm, | |
870 | display, cursor); | |
871 | } | |
872 | ||
873 | #define FW_WM_VLV(value, plane) \ | |
874 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) | |
875 | ||
876 | static void vlv_write_wm_values(struct intel_crtc *crtc, | |
877 | const struct vlv_wm_values *wm) | |
878 | { | |
879 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | |
880 | enum pipe pipe = crtc->pipe; | |
881 | ||
882 | I915_WRITE(VLV_DDL(pipe), | |
883 | (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | | |
884 | (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | | |
885 | (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | | |
886 | (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); | |
887 | ||
888 | I915_WRITE(DSPFW1, | |
889 | FW_WM(wm->sr.plane, SR) | | |
890 | FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | | |
891 | FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | | |
892 | FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); | |
893 | I915_WRITE(DSPFW2, | |
894 | FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | | |
895 | FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | | |
896 | FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); | |
897 | I915_WRITE(DSPFW3, | |
898 | FW_WM(wm->sr.cursor, CURSOR_SR)); | |
899 | ||
900 | if (IS_CHERRYVIEW(dev_priv)) { | |
901 | I915_WRITE(DSPFW7_CHV, | |
902 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | | |
903 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); | |
904 | I915_WRITE(DSPFW8_CHV, | |
905 | FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | | |
906 | FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); | |
907 | I915_WRITE(DSPFW9_CHV, | |
908 | FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | | |
909 | FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); | |
910 | I915_WRITE(DSPHOWM, | |
911 | FW_WM(wm->sr.plane >> 9, SR_HI) | | |
912 | FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | | |
913 | FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | | |
914 | FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | | |
915 | FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | | |
916 | FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | | |
917 | FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | | |
918 | FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | | |
919 | FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | | |
920 | FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); | |
921 | } else { | |
922 | I915_WRITE(DSPFW7, | |
923 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | | |
924 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); | |
925 | I915_WRITE(DSPHOWM, | |
926 | FW_WM(wm->sr.plane >> 9, SR_HI) | | |
927 | FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | | |
928 | FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | | |
929 | FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | | |
930 | FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | | |
931 | FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | | |
932 | FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); | |
933 | } | |
934 | ||
935 | /* zero (unused) WM1 watermarks */ | |
936 | I915_WRITE(DSPFW4, 0); | |
937 | I915_WRITE(DSPFW5, 0); | |
938 | I915_WRITE(DSPFW6, 0); | |
939 | I915_WRITE(DSPHOWM1, 0); | |
940 | ||
941 | POSTING_READ(DSPFW1); | |
942 | } | |
943 | ||
944 | #undef FW_WM_VLV | |
945 | ||
946 | enum vlv_wm_level { | |
947 | VLV_WM_LEVEL_PM2, | |
948 | VLV_WM_LEVEL_PM5, | |
949 | VLV_WM_LEVEL_DDR_DVFS, | |
950 | }; | |
951 | ||
952 | /* latency must be in 0.1us units. */ | |
953 | static unsigned int vlv_wm_method2(unsigned int pixel_rate, | |
954 | unsigned int pipe_htotal, | |
955 | unsigned int horiz_pixels, | |
956 | unsigned int bytes_per_pixel, | |
957 | unsigned int latency) | |
958 | { | |
959 | unsigned int ret; | |
960 | ||
961 | ret = (latency * pixel_rate) / (pipe_htotal * 10000); | |
962 | ret = (ret + 1) * horiz_pixels * bytes_per_pixel; | |
963 | ret = DIV_ROUND_UP(ret, 64); | |
964 | ||
965 | return ret; | |
966 | } | |
967 | ||
968 | static void vlv_setup_wm_latency(struct drm_device *dev) | |
969 | { | |
970 | struct drm_i915_private *dev_priv = dev->dev_private; | |
971 | ||
972 | /* all latencies in usec */ | |
973 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; | |
974 | ||
975 | dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; | |
976 | ||
977 | if (IS_CHERRYVIEW(dev_priv)) { | |
978 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; | |
979 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; | |
980 | ||
981 | dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; | |
982 | } | |
983 | } | |
984 | ||
985 | static uint16_t vlv_compute_wm_level(struct intel_plane *plane, | |
986 | struct intel_crtc *crtc, | |
987 | const struct intel_plane_state *state, | |
988 | int level) | |
989 | { | |
990 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | |
991 | int clock, htotal, pixel_size, width, wm; | |
992 | ||
993 | if (dev_priv->wm.pri_latency[level] == 0) | |
994 | return USHRT_MAX; | |
995 | ||
996 | if (!state->visible) | |
997 | return 0; | |
998 | ||
999 | pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0); | |
1000 | clock = crtc->config->base.adjusted_mode.crtc_clock; | |
1001 | htotal = crtc->config->base.adjusted_mode.crtc_htotal; | |
1002 | width = crtc->config->pipe_src_w; | |
1003 | if (WARN_ON(htotal == 0)) | |
1004 | htotal = 1; | |
1005 | ||
1006 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { | |
1007 | /* | |
1008 | * FIXME the formula gives values that are | |
1009 | * too big for the cursor FIFO, and hence we | |
1010 | * would never be able to use cursors. For | |
1011 | * now just hardcode the watermark. | |
1012 | */ | |
1013 | wm = 63; | |
1014 | } else { | |
1015 | wm = vlv_wm_method2(clock, htotal, width, pixel_size, | |
1016 | dev_priv->wm.pri_latency[level] * 10); | |
1017 | } | |
1018 | ||
1019 | return min_t(int, wm, USHRT_MAX); | |
1020 | } | |
1021 | ||
1022 | static void vlv_compute_fifo(struct intel_crtc *crtc) | |
1023 | { | |
1024 | struct drm_device *dev = crtc->base.dev; | |
1025 | struct vlv_wm_state *wm_state = &crtc->wm_state; | |
1026 | struct intel_plane *plane; | |
1027 | unsigned int total_rate = 0; | |
1028 | const int fifo_size = 512 - 1; | |
1029 | int fifo_extra, fifo_left = fifo_size; | |
1030 | ||
1031 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | |
1032 | struct intel_plane_state *state = | |
1033 | to_intel_plane_state(plane->base.state); | |
1034 | ||
1035 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) | |
1036 | continue; | |
1037 | ||
1038 | if (state->visible) { | |
1039 | wm_state->num_active_planes++; | |
1040 | total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0); | |
1041 | } | |
1042 | } | |
1043 | ||
1044 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | |
1045 | struct intel_plane_state *state = | |
1046 | to_intel_plane_state(plane->base.state); | |
1047 | unsigned int rate; | |
1048 | ||
1049 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { | |
1050 | plane->wm.fifo_size = 63; | |
1051 | continue; | |
1052 | } | |
1053 | ||
1054 | if (!state->visible) { | |
1055 | plane->wm.fifo_size = 0; | |
1056 | continue; | |
1057 | } | |
1058 | ||
1059 | rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0); | |
1060 | plane->wm.fifo_size = fifo_size * rate / total_rate; | |
1061 | fifo_left -= plane->wm.fifo_size; | |
1062 | } | |
1063 | ||
1064 | fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1); | |
1065 | ||
1066 | /* spread the remainder evenly */ | |
1067 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | |
1068 | int plane_extra; | |
1069 | ||
1070 | if (fifo_left == 0) | |
1071 | break; | |
1072 | ||
1073 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) | |
1074 | continue; | |
1075 | ||
1076 | /* give it all to the first plane if none are active */ | |
1077 | if (plane->wm.fifo_size == 0 && | |
1078 | wm_state->num_active_planes) | |
1079 | continue; | |
1080 | ||
1081 | plane_extra = min(fifo_extra, fifo_left); | |
1082 | plane->wm.fifo_size += plane_extra; | |
1083 | fifo_left -= plane_extra; | |
1084 | } | |
1085 | ||
1086 | WARN_ON(fifo_left != 0); | |
1087 | } | |
1088 | ||
1089 | static void vlv_invert_wms(struct intel_crtc *crtc) | |
1090 | { | |
1091 | struct vlv_wm_state *wm_state = &crtc->wm_state; | |
1092 | int level; | |
1093 | ||
1094 | for (level = 0; level < wm_state->num_levels; level++) { | |
1095 | struct drm_device *dev = crtc->base.dev; | |
1096 | const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; | |
1097 | struct intel_plane *plane; | |
1098 | ||
1099 | wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; | |
1100 | wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; | |
1101 | ||
1102 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | |
1103 | switch (plane->base.type) { | |
1104 | int sprite; | |
1105 | case DRM_PLANE_TYPE_CURSOR: | |
1106 | wm_state->wm[level].cursor = plane->wm.fifo_size - | |
1107 | wm_state->wm[level].cursor; | |
1108 | break; | |
1109 | case DRM_PLANE_TYPE_PRIMARY: | |
1110 | wm_state->wm[level].primary = plane->wm.fifo_size - | |
1111 | wm_state->wm[level].primary; | |
1112 | break; | |
1113 | case DRM_PLANE_TYPE_OVERLAY: | |
1114 | sprite = plane->plane; | |
1115 | wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - | |
1116 | wm_state->wm[level].sprite[sprite]; | |
1117 | break; | |
1118 | } | |
1119 | } | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | static void vlv_compute_wm(struct intel_crtc *crtc) | |
1124 | { | |
1125 | struct drm_device *dev = crtc->base.dev; | |
1126 | struct vlv_wm_state *wm_state = &crtc->wm_state; | |
1127 | struct intel_plane *plane; | |
1128 | int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; | |
1129 | int level; | |
1130 | ||
1131 | memset(wm_state, 0, sizeof(*wm_state)); | |
1132 | ||
1133 | wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; | |
1134 | wm_state->num_levels = to_i915(dev)->wm.max_level + 1; | |
1135 | ||
1136 | wm_state->num_active_planes = 0; | |
1137 | ||
1138 | vlv_compute_fifo(crtc); | |
1139 | ||
1140 | if (wm_state->num_active_planes != 1) | |
1141 | wm_state->cxsr = false; | |
1142 | ||
1143 | if (wm_state->cxsr) { | |
1144 | for (level = 0; level < wm_state->num_levels; level++) { | |
1145 | wm_state->sr[level].plane = sr_fifo_size; | |
1146 | wm_state->sr[level].cursor = 63; | |
1147 | } | |
1148 | } | |
1149 | ||
1150 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | |
1151 | struct intel_plane_state *state = | |
1152 | to_intel_plane_state(plane->base.state); | |
1153 | ||
1154 | if (!state->visible) | |
1155 | continue; | |
1156 | ||
1157 | /* normal watermarks */ | |
1158 | for (level = 0; level < wm_state->num_levels; level++) { | |
1159 | int wm = vlv_compute_wm_level(plane, crtc, state, level); | |
1160 | int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511; | |
1161 | ||
1162 | /* hack */ | |
1163 | if (WARN_ON(level == 0 && wm > max_wm)) | |
1164 | wm = max_wm; | |
1165 | ||
1166 | if (wm > plane->wm.fifo_size) | |
1167 | break; | |
1168 | ||
1169 | switch (plane->base.type) { | |
1170 | int sprite; | |
1171 | case DRM_PLANE_TYPE_CURSOR: | |
1172 | wm_state->wm[level].cursor = wm; | |
1173 | break; | |
1174 | case DRM_PLANE_TYPE_PRIMARY: | |
1175 | wm_state->wm[level].primary = wm; | |
1176 | break; | |
1177 | case DRM_PLANE_TYPE_OVERLAY: | |
1178 | sprite = plane->plane; | |
1179 | wm_state->wm[level].sprite[sprite] = wm; | |
1180 | break; | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | wm_state->num_levels = level; | |
1185 | ||
1186 | if (!wm_state->cxsr) | |
1187 | continue; | |
1188 | ||
1189 | /* maxfifo watermarks */ | |
1190 | switch (plane->base.type) { | |
1191 | int sprite, level; | |
1192 | case DRM_PLANE_TYPE_CURSOR: | |
1193 | for (level = 0; level < wm_state->num_levels; level++) | |
1194 | wm_state->sr[level].cursor = | |
1195 | wm_state->sr[level].cursor; | |
1196 | break; | |
1197 | case DRM_PLANE_TYPE_PRIMARY: | |
1198 | for (level = 0; level < wm_state->num_levels; level++) | |
1199 | wm_state->sr[level].plane = | |
1200 | min(wm_state->sr[level].plane, | |
1201 | wm_state->wm[level].primary); | |
1202 | break; | |
1203 | case DRM_PLANE_TYPE_OVERLAY: | |
1204 | sprite = plane->plane; | |
1205 | for (level = 0; level < wm_state->num_levels; level++) | |
1206 | wm_state->sr[level].plane = | |
1207 | min(wm_state->sr[level].plane, | |
1208 | wm_state->wm[level].sprite[sprite]); | |
1209 | break; | |
1210 | } | |
1211 | } | |
1212 | ||
1213 | /* clear any (partially) filled invalid levels */ | |
1214 | for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) { | |
1215 | memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); | |
1216 | memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); | |
1217 | } | |
1218 | ||
1219 | vlv_invert_wms(crtc); | |
1220 | } | |
1221 | ||
1222 | #define VLV_FIFO(plane, value) \ | |
1223 | (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) | |
1224 | ||
1225 | static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) | |
1226 | { | |
1227 | struct drm_device *dev = crtc->base.dev; | |
1228 | struct drm_i915_private *dev_priv = to_i915(dev); | |
1229 | struct intel_plane *plane; | |
1230 | int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; | |
1231 | ||
1232 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | |
1233 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { | |
1234 | WARN_ON(plane->wm.fifo_size != 63); | |
1235 | continue; | |
1236 | } | |
1237 | ||
1238 | if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) | |
1239 | sprite0_start = plane->wm.fifo_size; | |
1240 | else if (plane->plane == 0) | |
1241 | sprite1_start = sprite0_start + plane->wm.fifo_size; | |
1242 | else | |
1243 | fifo_size = sprite1_start + plane->wm.fifo_size; | |
1244 | } | |
1245 | ||
1246 | WARN_ON(fifo_size != 512 - 1); | |
1247 | ||
1248 | DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n", | |
1249 | pipe_name(crtc->pipe), sprite0_start, | |
1250 | sprite1_start, fifo_size); | |
1251 | ||
1252 | switch (crtc->pipe) { | |
1253 | uint32_t dsparb, dsparb2, dsparb3; | |
1254 | case PIPE_A: | |
1255 | dsparb = I915_READ(DSPARB); | |
1256 | dsparb2 = I915_READ(DSPARB2); | |
1257 | ||
1258 | dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | | |
1259 | VLV_FIFO(SPRITEB, 0xff)); | |
1260 | dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | | |
1261 | VLV_FIFO(SPRITEB, sprite1_start)); | |
1262 | ||
1263 | dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | | |
1264 | VLV_FIFO(SPRITEB_HI, 0x1)); | |
1265 | dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | | |
1266 | VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); | |
1267 | ||
1268 | I915_WRITE(DSPARB, dsparb); | |
1269 | I915_WRITE(DSPARB2, dsparb2); | |
1270 | break; | |
1271 | case PIPE_B: | |
1272 | dsparb = I915_READ(DSPARB); | |
1273 | dsparb2 = I915_READ(DSPARB2); | |
1274 | ||
1275 | dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | | |
1276 | VLV_FIFO(SPRITED, 0xff)); | |
1277 | dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | | |
1278 | VLV_FIFO(SPRITED, sprite1_start)); | |
1279 | ||
1280 | dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | | |
1281 | VLV_FIFO(SPRITED_HI, 0xff)); | |
1282 | dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | | |
1283 | VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); | |
1284 | ||
1285 | I915_WRITE(DSPARB, dsparb); | |
1286 | I915_WRITE(DSPARB2, dsparb2); | |
1287 | break; | |
1288 | case PIPE_C: | |
1289 | dsparb3 = I915_READ(DSPARB3); | |
1290 | dsparb2 = I915_READ(DSPARB2); | |
1291 | ||
1292 | dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | | |
1293 | VLV_FIFO(SPRITEF, 0xff)); | |
1294 | dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | | |
1295 | VLV_FIFO(SPRITEF, sprite1_start)); | |
1296 | ||
1297 | dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | | |
1298 | VLV_FIFO(SPRITEF_HI, 0xff)); | |
1299 | dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | | |
1300 | VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); | |
1301 | ||
1302 | I915_WRITE(DSPARB3, dsparb3); | |
1303 | I915_WRITE(DSPARB2, dsparb2); | |
1304 | break; | |
1305 | default: | |
1306 | break; | |
1307 | } | |
1308 | } | |
1309 | ||
1310 | #undef VLV_FIFO | |
1311 | ||
1312 | static void vlv_merge_wm(struct drm_device *dev, | |
1313 | struct vlv_wm_values *wm) | |
1314 | { | |
1315 | struct intel_crtc *crtc; | |
1316 | int num_active_crtcs = 0; | |
1317 | ||
1318 | wm->level = to_i915(dev)->wm.max_level; | |
1319 | wm->cxsr = true; | |
1320 | ||
1321 | for_each_intel_crtc(dev, crtc) { | |
1322 | const struct vlv_wm_state *wm_state = &crtc->wm_state; | |
1323 | ||
1324 | if (!crtc->active) | |
1325 | continue; | |
1326 | ||
1327 | if (!wm_state->cxsr) | |
1328 | wm->cxsr = false; | |
1329 | ||
1330 | num_active_crtcs++; | |
1331 | wm->level = min_t(int, wm->level, wm_state->num_levels - 1); | |
1332 | } | |
1333 | ||
1334 | if (num_active_crtcs != 1) | |
1335 | wm->cxsr = false; | |
1336 | ||
1337 | if (num_active_crtcs > 1) | |
1338 | wm->level = VLV_WM_LEVEL_PM2; | |
1339 | ||
1340 | for_each_intel_crtc(dev, crtc) { | |
1341 | struct vlv_wm_state *wm_state = &crtc->wm_state; | |
1342 | enum pipe pipe = crtc->pipe; | |
1343 | ||
1344 | if (!crtc->active) | |
1345 | continue; | |
1346 | ||
1347 | wm->pipe[pipe] = wm_state->wm[wm->level]; | |
1348 | if (wm->cxsr) | |
1349 | wm->sr = wm_state->sr[wm->level]; | |
1350 | ||
1351 | wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2; | |
1352 | wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2; | |
1353 | wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2; | |
1354 | wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2; | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | static void vlv_update_wm(struct drm_crtc *crtc) | |
1359 | { | |
1360 | struct drm_device *dev = crtc->dev; | |
1361 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1362 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
1363 | enum pipe pipe = intel_crtc->pipe; | |
1364 | struct vlv_wm_values wm = {}; | |
1365 | ||
1366 | vlv_compute_wm(intel_crtc); | |
1367 | vlv_merge_wm(dev, &wm); | |
1368 | ||
1369 | if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { | |
1370 | /* FIXME should be part of crtc atomic commit */ | |
1371 | vlv_pipe_set_fifo_size(intel_crtc); | |
1372 | return; | |
1373 | } | |
1374 | ||
1375 | if (wm.level < VLV_WM_LEVEL_DDR_DVFS && | |
1376 | dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS) | |
1377 | chv_set_memory_dvfs(dev_priv, false); | |
1378 | ||
1379 | if (wm.level < VLV_WM_LEVEL_PM5 && | |
1380 | dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5) | |
1381 | chv_set_memory_pm5(dev_priv, false); | |
1382 | ||
1383 | if (!wm.cxsr && dev_priv->wm.vlv.cxsr) | |
1384 | intel_set_memory_cxsr(dev_priv, false); | |
1385 | ||
1386 | /* FIXME should be part of crtc atomic commit */ | |
1387 | vlv_pipe_set_fifo_size(intel_crtc); | |
1388 | ||
1389 | vlv_write_wm_values(intel_crtc, &wm); | |
1390 | ||
1391 | DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " | |
1392 | "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", | |
1393 | pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor, | |
1394 | wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1], | |
1395 | wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); | |
1396 | ||
1397 | if (wm.cxsr && !dev_priv->wm.vlv.cxsr) | |
1398 | intel_set_memory_cxsr(dev_priv, true); | |
1399 | ||
1400 | if (wm.level >= VLV_WM_LEVEL_PM5 && | |
1401 | dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) | |
1402 | chv_set_memory_pm5(dev_priv, true); | |
1403 | ||
1404 | if (wm.level >= VLV_WM_LEVEL_DDR_DVFS && | |
1405 | dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS) | |
1406 | chv_set_memory_dvfs(dev_priv, true); | |
1407 | ||
1408 | dev_priv->wm.vlv = wm; | |
1409 | } | |
1410 | ||
1411 | #define single_plane_enabled(mask) is_power_of_2(mask) | |
1412 | ||
1413 | static void g4x_update_wm(struct drm_crtc *crtc) | |
1414 | { | |
1415 | struct drm_device *dev = crtc->dev; | |
1416 | static const int sr_latency_ns = 12000; | |
1417 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1418 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | |
1419 | int plane_sr, cursor_sr; | |
1420 | unsigned int enabled = 0; | |
1421 | bool cxsr_enabled; | |
1422 | ||
1423 | if (g4x_compute_wm0(dev, PIPE_A, | |
1424 | &g4x_wm_info, pessimal_latency_ns, | |
1425 | &g4x_cursor_wm_info, pessimal_latency_ns, | |
1426 | &planea_wm, &cursora_wm)) | |
1427 | enabled |= 1 << PIPE_A; | |
1428 | ||
1429 | if (g4x_compute_wm0(dev, PIPE_B, | |
1430 | &g4x_wm_info, pessimal_latency_ns, | |
1431 | &g4x_cursor_wm_info, pessimal_latency_ns, | |
1432 | &planeb_wm, &cursorb_wm)) | |
1433 | enabled |= 1 << PIPE_B; | |
1434 | ||
1435 | if (single_plane_enabled(enabled) && | |
1436 | g4x_compute_srwm(dev, ffs(enabled) - 1, | |
1437 | sr_latency_ns, | |
1438 | &g4x_wm_info, | |
1439 | &g4x_cursor_wm_info, | |
1440 | &plane_sr, &cursor_sr)) { | |
1441 | cxsr_enabled = true; | |
1442 | } else { | |
1443 | cxsr_enabled = false; | |
1444 | intel_set_memory_cxsr(dev_priv, false); | |
1445 | plane_sr = cursor_sr = 0; | |
1446 | } | |
1447 | ||
1448 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " | |
1449 | "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", | |
1450 | planea_wm, cursora_wm, | |
1451 | planeb_wm, cursorb_wm, | |
1452 | plane_sr, cursor_sr); | |
1453 | ||
1454 | I915_WRITE(DSPFW1, | |
1455 | FW_WM(plane_sr, SR) | | |
1456 | FW_WM(cursorb_wm, CURSORB) | | |
1457 | FW_WM(planeb_wm, PLANEB) | | |
1458 | FW_WM(planea_wm, PLANEA)); | |
1459 | I915_WRITE(DSPFW2, | |
1460 | (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | | |
1461 | FW_WM(cursora_wm, CURSORA)); | |
1462 | /* HPLL off in SR has some issues on G4x... disable it */ | |
1463 | I915_WRITE(DSPFW3, | |
1464 | (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | | |
1465 | FW_WM(cursor_sr, CURSOR_SR)); | |
1466 | ||
1467 | if (cxsr_enabled) | |
1468 | intel_set_memory_cxsr(dev_priv, true); | |
1469 | } | |
1470 | ||
1471 | static void i965_update_wm(struct drm_crtc *unused_crtc) | |
1472 | { | |
1473 | struct drm_device *dev = unused_crtc->dev; | |
1474 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1475 | struct drm_crtc *crtc; | |
1476 | int srwm = 1; | |
1477 | int cursor_sr = 16; | |
1478 | bool cxsr_enabled; | |
1479 | ||
1480 | /* Calc sr entries for one plane configs */ | |
1481 | crtc = single_enabled_crtc(dev); | |
1482 | if (crtc) { | |
1483 | /* self-refresh has much higher latency */ | |
1484 | static const int sr_latency_ns = 12000; | |
1485 | const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; | |
1486 | int clock = adjusted_mode->crtc_clock; | |
1487 | int htotal = adjusted_mode->crtc_htotal; | |
1488 | int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; | |
1489 | int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; | |
1490 | unsigned long line_time_us; | |
1491 | int entries; | |
1492 | ||
1493 | line_time_us = max(htotal * 1000 / clock, 1); | |
1494 | ||
1495 | /* Use ns/us then divide to preserve precision */ | |
1496 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | |
1497 | pixel_size * hdisplay; | |
1498 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); | |
1499 | srwm = I965_FIFO_SIZE - entries; | |
1500 | if (srwm < 0) | |
1501 | srwm = 1; | |
1502 | srwm &= 0x1ff; | |
1503 | DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", | |
1504 | entries, srwm); | |
1505 | ||
1506 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | |
1507 | pixel_size * crtc->cursor->state->crtc_w; | |
1508 | entries = DIV_ROUND_UP(entries, | |
1509 | i965_cursor_wm_info.cacheline_size); | |
1510 | cursor_sr = i965_cursor_wm_info.fifo_size - | |
1511 | (entries + i965_cursor_wm_info.guard_size); | |
1512 | ||
1513 | if (cursor_sr > i965_cursor_wm_info.max_wm) | |
1514 | cursor_sr = i965_cursor_wm_info.max_wm; | |
1515 | ||
1516 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | |
1517 | "cursor %d\n", srwm, cursor_sr); | |
1518 | ||
1519 | cxsr_enabled = true; | |
1520 | } else { | |
1521 | cxsr_enabled = false; | |
1522 | /* Turn off self refresh if both pipes are enabled */ | |
1523 | intel_set_memory_cxsr(dev_priv, false); | |
1524 | } | |
1525 | ||
1526 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | |
1527 | srwm); | |
1528 | ||
1529 | /* 965 has limitations... */ | |
1530 | I915_WRITE(DSPFW1, FW_WM(srwm, SR) | | |
1531 | FW_WM(8, CURSORB) | | |
1532 | FW_WM(8, PLANEB) | | |
1533 | FW_WM(8, PLANEA)); | |
1534 | I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | | |
1535 | FW_WM(8, PLANEC_OLD)); | |
1536 | /* update cursor SR watermark */ | |
1537 | I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); | |
1538 | ||
1539 | if (cxsr_enabled) | |
1540 | intel_set_memory_cxsr(dev_priv, true); | |
1541 | } | |
1542 | ||
1543 | #undef FW_WM | |
1544 | ||
1545 | static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |
1546 | { | |
1547 | struct drm_device *dev = unused_crtc->dev; | |
1548 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1549 | const struct intel_watermark_params *wm_info; | |
1550 | uint32_t fwater_lo; | |
1551 | uint32_t fwater_hi; | |
1552 | int cwm, srwm = 1; | |
1553 | int fifo_size; | |
1554 | int planea_wm, planeb_wm; | |
1555 | struct drm_crtc *crtc, *enabled = NULL; | |
1556 | ||
1557 | if (IS_I945GM(dev)) | |
1558 | wm_info = &i945_wm_info; | |
1559 | else if (!IS_GEN2(dev)) | |
1560 | wm_info = &i915_wm_info; | |
1561 | else | |
1562 | wm_info = &i830_a_wm_info; | |
1563 | ||
1564 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); | |
1565 | crtc = intel_get_crtc_for_plane(dev, 0); | |
1566 | if (intel_crtc_active(crtc)) { | |
1567 | const struct drm_display_mode *adjusted_mode; | |
1568 | int cpp = crtc->primary->state->fb->bits_per_pixel / 8; | |
1569 | if (IS_GEN2(dev)) | |
1570 | cpp = 4; | |
1571 | ||
1572 | adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; | |
1573 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, | |
1574 | wm_info, fifo_size, cpp, | |
1575 | pessimal_latency_ns); | |
1576 | enabled = crtc; | |
1577 | } else { | |
1578 | planea_wm = fifo_size - wm_info->guard_size; | |
1579 | if (planea_wm > (long)wm_info->max_wm) | |
1580 | planea_wm = wm_info->max_wm; | |
1581 | } | |
1582 | ||
1583 | if (IS_GEN2(dev)) | |
1584 | wm_info = &i830_bc_wm_info; | |
1585 | ||
1586 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); | |
1587 | crtc = intel_get_crtc_for_plane(dev, 1); | |
1588 | if (intel_crtc_active(crtc)) { | |
1589 | const struct drm_display_mode *adjusted_mode; | |
1590 | int cpp = crtc->primary->state->fb->bits_per_pixel / 8; | |
1591 | if (IS_GEN2(dev)) | |
1592 | cpp = 4; | |
1593 | ||
1594 | adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; | |
1595 | planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, | |
1596 | wm_info, fifo_size, cpp, | |
1597 | pessimal_latency_ns); | |
1598 | if (enabled == NULL) | |
1599 | enabled = crtc; | |
1600 | else | |
1601 | enabled = NULL; | |
1602 | } else { | |
1603 | planeb_wm = fifo_size - wm_info->guard_size; | |
1604 | if (planeb_wm > (long)wm_info->max_wm) | |
1605 | planeb_wm = wm_info->max_wm; | |
1606 | } | |
1607 | ||
1608 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | |
1609 | ||
1610 | if (IS_I915GM(dev) && enabled) { | |
1611 | struct drm_i915_gem_object *obj; | |
1612 | ||
1613 | obj = intel_fb_obj(enabled->primary->state->fb); | |
1614 | ||
1615 | /* self-refresh seems busted with untiled */ | |
1616 | if (obj->tiling_mode == I915_TILING_NONE) | |
1617 | enabled = NULL; | |
1618 | } | |
1619 | ||
1620 | /* | |
1621 | * Overlay gets an aggressive default since video jitter is bad. | |
1622 | */ | |
1623 | cwm = 2; | |
1624 | ||
1625 | /* Play safe and disable self-refresh before adjusting watermarks. */ | |
1626 | intel_set_memory_cxsr(dev_priv, false); | |
1627 | ||
1628 | /* Calc sr entries for one plane configs */ | |
1629 | if (HAS_FW_BLC(dev) && enabled) { | |
1630 | /* self-refresh has much higher latency */ | |
1631 | static const int sr_latency_ns = 6000; | |
1632 | const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode; | |
1633 | int clock = adjusted_mode->crtc_clock; | |
1634 | int htotal = adjusted_mode->crtc_htotal; | |
1635 | int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; | |
1636 | int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8; | |
1637 | unsigned long line_time_us; | |
1638 | int entries; | |
1639 | ||
1640 | line_time_us = max(htotal * 1000 / clock, 1); | |
1641 | ||
1642 | /* Use ns/us then divide to preserve precision */ | |
1643 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | |
1644 | pixel_size * hdisplay; | |
1645 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); | |
1646 | DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); | |
1647 | srwm = wm_info->fifo_size - entries; | |
1648 | if (srwm < 0) | |
1649 | srwm = 1; | |
1650 | ||
1651 | if (IS_I945G(dev) || IS_I945GM(dev)) | |
1652 | I915_WRITE(FW_BLC_SELF, | |
1653 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); | |
1654 | else if (IS_I915GM(dev)) | |
1655 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); | |
1656 | } | |
1657 | ||
1658 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | |
1659 | planea_wm, planeb_wm, cwm, srwm); | |
1660 | ||
1661 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); | |
1662 | fwater_hi = (cwm & 0x1f); | |
1663 | ||
1664 | /* Set request length to 8 cachelines per fetch */ | |
1665 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); | |
1666 | fwater_hi = fwater_hi | (1 << 8); | |
1667 | ||
1668 | I915_WRITE(FW_BLC, fwater_lo); | |
1669 | I915_WRITE(FW_BLC2, fwater_hi); | |
1670 | ||
1671 | if (enabled) | |
1672 | intel_set_memory_cxsr(dev_priv, true); | |
1673 | } | |
1674 | ||
1675 | static void i845_update_wm(struct drm_crtc *unused_crtc) | |
1676 | { | |
1677 | struct drm_device *dev = unused_crtc->dev; | |
1678 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1679 | struct drm_crtc *crtc; | |
1680 | const struct drm_display_mode *adjusted_mode; | |
1681 | uint32_t fwater_lo; | |
1682 | int planea_wm; | |
1683 | ||
1684 | crtc = single_enabled_crtc(dev); | |
1685 | if (crtc == NULL) | |
1686 | return; | |
1687 | ||
1688 | adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; | |
1689 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, | |
1690 | &i845_wm_info, | |
1691 | dev_priv->display.get_fifo_size(dev, 0), | |
1692 | 4, pessimal_latency_ns); | |
1693 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | |
1694 | fwater_lo |= (3<<8) | planea_wm; | |
1695 | ||
1696 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); | |
1697 | ||
1698 | I915_WRITE(FW_BLC, fwater_lo); | |
1699 | } | |
1700 | ||
1701 | uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) | |
1702 | { | |
1703 | uint32_t pixel_rate; | |
1704 | ||
1705 | pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; | |
1706 | ||
1707 | /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to | |
1708 | * adjust the pixel_rate here. */ | |
1709 | ||
1710 | if (pipe_config->pch_pfit.enabled) { | |
1711 | uint64_t pipe_w, pipe_h, pfit_w, pfit_h; | |
1712 | uint32_t pfit_size = pipe_config->pch_pfit.size; | |
1713 | ||
1714 | pipe_w = pipe_config->pipe_src_w; | |
1715 | pipe_h = pipe_config->pipe_src_h; | |
1716 | ||
1717 | pfit_w = (pfit_size >> 16) & 0xFFFF; | |
1718 | pfit_h = pfit_size & 0xFFFF; | |
1719 | if (pipe_w < pfit_w) | |
1720 | pipe_w = pfit_w; | |
1721 | if (pipe_h < pfit_h) | |
1722 | pipe_h = pfit_h; | |
1723 | ||
1724 | pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, | |
1725 | pfit_w * pfit_h); | |
1726 | } | |
1727 | ||
1728 | return pixel_rate; | |
1729 | } | |
1730 | ||
1731 | /* latency must be in 0.1us units. */ | |
1732 | static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, | |
1733 | uint32_t latency) | |
1734 | { | |
1735 | uint64_t ret; | |
1736 | ||
1737 | if (WARN(latency == 0, "Latency value missing\n")) | |
1738 | return UINT_MAX; | |
1739 | ||
1740 | ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; | |
1741 | ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; | |
1742 | ||
1743 | return ret; | |
1744 | } | |
1745 | ||
1746 | /* latency must be in 0.1us units. */ | |
1747 | static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, | |
1748 | uint32_t horiz_pixels, uint8_t bytes_per_pixel, | |
1749 | uint32_t latency) | |
1750 | { | |
1751 | uint32_t ret; | |
1752 | ||
1753 | if (WARN(latency == 0, "Latency value missing\n")) | |
1754 | return UINT_MAX; | |
1755 | ||
1756 | ret = (latency * pixel_rate) / (pipe_htotal * 10000); | |
1757 | ret = (ret + 1) * horiz_pixels * bytes_per_pixel; | |
1758 | ret = DIV_ROUND_UP(ret, 64) + 2; | |
1759 | return ret; | |
1760 | } | |
1761 | ||
1762 | static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, | |
1763 | uint8_t bytes_per_pixel) | |
1764 | { | |
1765 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; | |
1766 | } | |
1767 | ||
1768 | struct skl_pipe_wm_parameters { | |
1769 | bool active; | |
1770 | uint32_t pipe_htotal; | |
1771 | uint32_t pixel_rate; /* in KHz */ | |
1772 | struct intel_plane_wm_parameters plane[I915_MAX_PLANES]; | |
1773 | struct intel_plane_wm_parameters cursor; | |
1774 | }; | |
1775 | ||
1776 | struct ilk_pipe_wm_parameters { | |
1777 | bool active; | |
1778 | uint32_t pipe_htotal; | |
1779 | uint32_t pixel_rate; | |
1780 | }; | |
1781 | ||
1782 | struct ilk_wm_maximums { | |
1783 | uint16_t pri; | |
1784 | uint16_t spr; | |
1785 | uint16_t cur; | |
1786 | uint16_t fbc; | |
1787 | }; | |
1788 | ||
1789 | /* used in computing the new watermarks state */ | |
1790 | struct intel_wm_config { | |
1791 | unsigned int num_pipes_active; | |
1792 | bool sprites_enabled; | |
1793 | bool sprites_scaled; | |
1794 | }; | |
1795 | ||
1796 | /* | |
1797 | * For both WM_PIPE and WM_LP. | |
1798 | * mem_value must be in 0.1us units. | |
1799 | */ | |
1800 | static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params, | |
1801 | const struct intel_plane_state *pstate, | |
1802 | uint32_t mem_value, | |
1803 | bool is_lp) | |
1804 | { | |
1805 | int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; | |
1806 | uint32_t method1, method2; | |
1807 | ||
1808 | if (!params->active || !pstate->visible) | |
1809 | return 0; | |
1810 | ||
1811 | method1 = ilk_wm_method1(params->pixel_rate, bpp, mem_value); | |
1812 | ||
1813 | if (!is_lp) | |
1814 | return method1; | |
1815 | ||
1816 | method2 = ilk_wm_method2(params->pixel_rate, | |
1817 | params->pipe_htotal, | |
1818 | drm_rect_width(&pstate->dst), | |
1819 | bpp, | |
1820 | mem_value); | |
1821 | ||
1822 | return min(method1, method2); | |
1823 | } | |
1824 | ||
1825 | /* | |
1826 | * For both WM_PIPE and WM_LP. | |
1827 | * mem_value must be in 0.1us units. | |
1828 | */ | |
1829 | static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params, | |
1830 | const struct intel_plane_state *pstate, | |
1831 | uint32_t mem_value) | |
1832 | { | |
1833 | int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; | |
1834 | uint32_t method1, method2; | |
1835 | ||
1836 | if (!params->active || !pstate->visible) | |
1837 | return 0; | |
1838 | ||
1839 | method1 = ilk_wm_method1(params->pixel_rate, bpp, mem_value); | |
1840 | method2 = ilk_wm_method2(params->pixel_rate, | |
1841 | params->pipe_htotal, | |
1842 | drm_rect_width(&pstate->dst), | |
1843 | bpp, | |
1844 | mem_value); | |
1845 | return min(method1, method2); | |
1846 | } | |
1847 | ||
1848 | /* | |
1849 | * For both WM_PIPE and WM_LP. | |
1850 | * mem_value must be in 0.1us units. | |
1851 | */ | |
1852 | static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params, | |
1853 | const struct intel_plane_state *pstate, | |
1854 | uint32_t mem_value) | |
1855 | { | |
1856 | int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; | |
1857 | ||
1858 | if (!params->active || !pstate->visible) | |
1859 | return 0; | |
1860 | ||
1861 | return ilk_wm_method2(params->pixel_rate, | |
1862 | params->pipe_htotal, | |
1863 | drm_rect_width(&pstate->dst), | |
1864 | bpp, | |
1865 | mem_value); | |
1866 | } | |
1867 | ||
1868 | /* Only for WM_LP. */ | |
1869 | static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params, | |
1870 | const struct intel_plane_state *pstate, | |
1871 | uint32_t pri_val) | |
1872 | { | |
1873 | int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; | |
1874 | ||
1875 | if (!params->active || !pstate->visible) | |
1876 | return 0; | |
1877 | ||
1878 | return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp); | |
1879 | } | |
1880 | ||
1881 | static unsigned int ilk_display_fifo_size(const struct drm_device *dev) | |
1882 | { | |
1883 | if (INTEL_INFO(dev)->gen >= 8) | |
1884 | return 3072; | |
1885 | else if (INTEL_INFO(dev)->gen >= 7) | |
1886 | return 768; | |
1887 | else | |
1888 | return 512; | |
1889 | } | |
1890 | ||
1891 | static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev, | |
1892 | int level, bool is_sprite) | |
1893 | { | |
1894 | if (INTEL_INFO(dev)->gen >= 8) | |
1895 | /* BDW primary/sprite plane watermarks */ | |
1896 | return level == 0 ? 255 : 2047; | |
1897 | else if (INTEL_INFO(dev)->gen >= 7) | |
1898 | /* IVB/HSW primary/sprite plane watermarks */ | |
1899 | return level == 0 ? 127 : 1023; | |
1900 | else if (!is_sprite) | |
1901 | /* ILK/SNB primary plane watermarks */ | |
1902 | return level == 0 ? 127 : 511; | |
1903 | else | |
1904 | /* ILK/SNB sprite plane watermarks */ | |
1905 | return level == 0 ? 63 : 255; | |
1906 | } | |
1907 | ||
1908 | static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev, | |
1909 | int level) | |
1910 | { | |
1911 | if (INTEL_INFO(dev)->gen >= 7) | |
1912 | return level == 0 ? 63 : 255; | |
1913 | else | |
1914 | return level == 0 ? 31 : 63; | |
1915 | } | |
1916 | ||
1917 | static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev) | |
1918 | { | |
1919 | if (INTEL_INFO(dev)->gen >= 8) | |
1920 | return 31; | |
1921 | else | |
1922 | return 15; | |
1923 | } | |
1924 | ||
1925 | /* Calculate the maximum primary/sprite plane watermark */ | |
1926 | static unsigned int ilk_plane_wm_max(const struct drm_device *dev, | |
1927 | int level, | |
1928 | const struct intel_wm_config *config, | |
1929 | enum intel_ddb_partitioning ddb_partitioning, | |
1930 | bool is_sprite) | |
1931 | { | |
1932 | unsigned int fifo_size = ilk_display_fifo_size(dev); | |
1933 | ||
1934 | /* if sprites aren't enabled, sprites get nothing */ | |
1935 | if (is_sprite && !config->sprites_enabled) | |
1936 | return 0; | |
1937 | ||
1938 | /* HSW allows LP1+ watermarks even with multiple pipes */ | |
1939 | if (level == 0 || config->num_pipes_active > 1) { | |
1940 | fifo_size /= INTEL_INFO(dev)->num_pipes; | |
1941 | ||
1942 | /* | |
1943 | * For some reason the non self refresh | |
1944 | * FIFO size is only half of the self | |
1945 | * refresh FIFO size on ILK/SNB. | |
1946 | */ | |
1947 | if (INTEL_INFO(dev)->gen <= 6) | |
1948 | fifo_size /= 2; | |
1949 | } | |
1950 | ||
1951 | if (config->sprites_enabled) { | |
1952 | /* level 0 is always calculated with 1:1 split */ | |
1953 | if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { | |
1954 | if (is_sprite) | |
1955 | fifo_size *= 5; | |
1956 | fifo_size /= 6; | |
1957 | } else { | |
1958 | fifo_size /= 2; | |
1959 | } | |
1960 | } | |
1961 | ||
1962 | /* clamp to max that the registers can hold */ | |
1963 | return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite)); | |
1964 | } | |
1965 | ||
1966 | /* Calculate the maximum cursor plane watermark */ | |
1967 | static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, | |
1968 | int level, | |
1969 | const struct intel_wm_config *config) | |
1970 | { | |
1971 | /* HSW LP1+ watermarks w/ multiple pipes */ | |
1972 | if (level > 0 && config->num_pipes_active > 1) | |
1973 | return 64; | |
1974 | ||
1975 | /* otherwise just report max that registers can hold */ | |
1976 | return ilk_cursor_wm_reg_max(dev, level); | |
1977 | } | |
1978 | ||
1979 | static void ilk_compute_wm_maximums(const struct drm_device *dev, | |
1980 | int level, | |
1981 | const struct intel_wm_config *config, | |
1982 | enum intel_ddb_partitioning ddb_partitioning, | |
1983 | struct ilk_wm_maximums *max) | |
1984 | { | |
1985 | max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); | |
1986 | max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); | |
1987 | max->cur = ilk_cursor_wm_max(dev, level, config); | |
1988 | max->fbc = ilk_fbc_wm_reg_max(dev); | |
1989 | } | |
1990 | ||
1991 | static void ilk_compute_wm_reg_maximums(struct drm_device *dev, | |
1992 | int level, | |
1993 | struct ilk_wm_maximums *max) | |
1994 | { | |
1995 | max->pri = ilk_plane_wm_reg_max(dev, level, false); | |
1996 | max->spr = ilk_plane_wm_reg_max(dev, level, true); | |
1997 | max->cur = ilk_cursor_wm_reg_max(dev, level); | |
1998 | max->fbc = ilk_fbc_wm_reg_max(dev); | |
1999 | } | |
2000 | ||
2001 | static bool ilk_validate_wm_level(int level, | |
2002 | const struct ilk_wm_maximums *max, | |
2003 | struct intel_wm_level *result) | |
2004 | { | |
2005 | bool ret; | |
2006 | ||
2007 | /* already determined to be invalid? */ | |
2008 | if (!result->enable) | |
2009 | return false; | |
2010 | ||
2011 | result->enable = result->pri_val <= max->pri && | |
2012 | result->spr_val <= max->spr && | |
2013 | result->cur_val <= max->cur; | |
2014 | ||
2015 | ret = result->enable; | |
2016 | ||
2017 | /* | |
2018 | * HACK until we can pre-compute everything, | |
2019 | * and thus fail gracefully if LP0 watermarks | |
2020 | * are exceeded... | |
2021 | */ | |
2022 | if (level == 0 && !result->enable) { | |
2023 | if (result->pri_val > max->pri) | |
2024 | DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", | |
2025 | level, result->pri_val, max->pri); | |
2026 | if (result->spr_val > max->spr) | |
2027 | DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", | |
2028 | level, result->spr_val, max->spr); | |
2029 | if (result->cur_val > max->cur) | |
2030 | DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", | |
2031 | level, result->cur_val, max->cur); | |
2032 | ||
2033 | result->pri_val = min_t(uint32_t, result->pri_val, max->pri); | |
2034 | result->spr_val = min_t(uint32_t, result->spr_val, max->spr); | |
2035 | result->cur_val = min_t(uint32_t, result->cur_val, max->cur); | |
2036 | result->enable = true; | |
2037 | } | |
2038 | ||
2039 | return ret; | |
2040 | } | |
2041 | ||
2042 | static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, | |
2043 | const struct intel_crtc *intel_crtc, | |
2044 | int level, | |
2045 | const struct ilk_pipe_wm_parameters *p, | |
2046 | struct intel_wm_level *result) | |
2047 | { | |
2048 | struct intel_plane *intel_plane; | |
2049 | uint16_t pri_latency = dev_priv->wm.pri_latency[level]; | |
2050 | uint16_t spr_latency = dev_priv->wm.spr_latency[level]; | |
2051 | uint16_t cur_latency = dev_priv->wm.cur_latency[level]; | |
2052 | ||
2053 | /* WM1+ latency values stored in 0.5us units */ | |
2054 | if (level > 0) { | |
2055 | pri_latency *= 5; | |
2056 | spr_latency *= 5; | |
2057 | cur_latency *= 5; | |
2058 | } | |
2059 | ||
2060 | for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) { | |
2061 | struct intel_plane_state *pstate = | |
2062 | to_intel_plane_state(intel_plane->base.state); | |
2063 | ||
2064 | switch (intel_plane->base.type) { | |
2065 | case DRM_PLANE_TYPE_PRIMARY: | |
2066 | result->pri_val = ilk_compute_pri_wm(p, pstate, | |
2067 | pri_latency, | |
2068 | level); | |
2069 | result->fbc_val = ilk_compute_fbc_wm(p, pstate, | |
2070 | result->pri_val); | |
2071 | break; | |
2072 | case DRM_PLANE_TYPE_OVERLAY: | |
2073 | result->spr_val = ilk_compute_spr_wm(p, pstate, | |
2074 | spr_latency); | |
2075 | break; | |
2076 | case DRM_PLANE_TYPE_CURSOR: | |
2077 | result->cur_val = ilk_compute_cur_wm(p, pstate, | |
2078 | cur_latency); | |
2079 | break; | |
2080 | } | |
2081 | } | |
2082 | ||
2083 | result->enable = true; | |
2084 | } | |
2085 | ||
2086 | static uint32_t | |
2087 | hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) | |
2088 | { | |
2089 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2090 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2091 | const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; | |
2092 | u32 linetime, ips_linetime; | |
2093 | ||
2094 | if (!intel_crtc->active) | |
2095 | return 0; | |
2096 | ||
2097 | /* The WM are computed with base on how long it takes to fill a single | |
2098 | * row at the given clock rate, multiplied by 8. | |
2099 | * */ | |
2100 | linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, | |
2101 | adjusted_mode->crtc_clock); | |
2102 | ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, | |
2103 | dev_priv->cdclk_freq); | |
2104 | ||
2105 | return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | | |
2106 | PIPE_WM_LINETIME_TIME(linetime); | |
2107 | } | |
2108 | ||
2109 | static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) | |
2110 | { | |
2111 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2112 | ||
2113 | if (IS_GEN9(dev)) { | |
2114 | uint32_t val; | |
2115 | int ret, i; | |
2116 | int level, max_level = ilk_wm_max_level(dev); | |
2117 | ||
2118 | /* read the first set of memory latencies[0:3] */ | |
2119 | val = 0; /* data0 to be programmed to 0 for first set */ | |
2120 | mutex_lock(&dev_priv->rps.hw_lock); | |
2121 | ret = sandybridge_pcode_read(dev_priv, | |
2122 | GEN9_PCODE_READ_MEM_LATENCY, | |
2123 | &val); | |
2124 | mutex_unlock(&dev_priv->rps.hw_lock); | |
2125 | ||
2126 | if (ret) { | |
2127 | DRM_ERROR("SKL Mailbox read error = %d\n", ret); | |
2128 | return; | |
2129 | } | |
2130 | ||
2131 | wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK; | |
2132 | wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & | |
2133 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2134 | wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & | |
2135 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2136 | wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & | |
2137 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2138 | ||
2139 | /* read the second set of memory latencies[4:7] */ | |
2140 | val = 1; /* data0 to be programmed to 1 for second set */ | |
2141 | mutex_lock(&dev_priv->rps.hw_lock); | |
2142 | ret = sandybridge_pcode_read(dev_priv, | |
2143 | GEN9_PCODE_READ_MEM_LATENCY, | |
2144 | &val); | |
2145 | mutex_unlock(&dev_priv->rps.hw_lock); | |
2146 | if (ret) { | |
2147 | DRM_ERROR("SKL Mailbox read error = %d\n", ret); | |
2148 | return; | |
2149 | } | |
2150 | ||
2151 | wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK; | |
2152 | wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & | |
2153 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2154 | wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & | |
2155 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2156 | wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & | |
2157 | GEN9_MEM_LATENCY_LEVEL_MASK; | |
2158 | ||
2159 | /* | |
2160 | * WaWmMemoryReadLatency:skl | |
2161 | * | |
2162 | * punit doesn't take into account the read latency so we need | |
2163 | * to add 2us to the various latency levels we retrieve from | |
2164 | * the punit. | |
2165 | * - W0 is a bit special in that it's the only level that | |
2166 | * can't be disabled if we want to have display working, so | |
2167 | * we always add 2us there. | |
2168 | * - For levels >=1, punit returns 0us latency when they are | |
2169 | * disabled, so we respect that and don't add 2us then | |
2170 | * | |
2171 | * Additionally, if a level n (n > 1) has a 0us latency, all | |
2172 | * levels m (m >= n) need to be disabled. We make sure to | |
2173 | * sanitize the values out of the punit to satisfy this | |
2174 | * requirement. | |
2175 | */ | |
2176 | wm[0] += 2; | |
2177 | for (level = 1; level <= max_level; level++) | |
2178 | if (wm[level] != 0) | |
2179 | wm[level] += 2; | |
2180 | else { | |
2181 | for (i = level + 1; i <= max_level; i++) | |
2182 | wm[i] = 0; | |
2183 | ||
2184 | break; | |
2185 | } | |
2186 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | |
2187 | uint64_t sskpd = I915_READ64(MCH_SSKPD); | |
2188 | ||
2189 | wm[0] = (sskpd >> 56) & 0xFF; | |
2190 | if (wm[0] == 0) | |
2191 | wm[0] = sskpd & 0xF; | |
2192 | wm[1] = (sskpd >> 4) & 0xFF; | |
2193 | wm[2] = (sskpd >> 12) & 0xFF; | |
2194 | wm[3] = (sskpd >> 20) & 0x1FF; | |
2195 | wm[4] = (sskpd >> 32) & 0x1FF; | |
2196 | } else if (INTEL_INFO(dev)->gen >= 6) { | |
2197 | uint32_t sskpd = I915_READ(MCH_SSKPD); | |
2198 | ||
2199 | wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; | |
2200 | wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; | |
2201 | wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; | |
2202 | wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; | |
2203 | } else if (INTEL_INFO(dev)->gen >= 5) { | |
2204 | uint32_t mltr = I915_READ(MLTR_ILK); | |
2205 | ||
2206 | /* ILK primary LP0 latency is 700 ns */ | |
2207 | wm[0] = 7; | |
2208 | wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; | |
2209 | wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; | |
2210 | } | |
2211 | } | |
2212 | ||
2213 | static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) | |
2214 | { | |
2215 | /* ILK sprite LP0 latency is 1300 ns */ | |
2216 | if (INTEL_INFO(dev)->gen == 5) | |
2217 | wm[0] = 13; | |
2218 | } | |
2219 | ||
2220 | static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) | |
2221 | { | |
2222 | /* ILK cursor LP0 latency is 1300 ns */ | |
2223 | if (INTEL_INFO(dev)->gen == 5) | |
2224 | wm[0] = 13; | |
2225 | ||
2226 | /* WaDoubleCursorLP3Latency:ivb */ | |
2227 | if (IS_IVYBRIDGE(dev)) | |
2228 | wm[3] *= 2; | |
2229 | } | |
2230 | ||
2231 | int ilk_wm_max_level(const struct drm_device *dev) | |
2232 | { | |
2233 | /* how many WM levels are we expecting */ | |
2234 | if (INTEL_INFO(dev)->gen >= 9) | |
2235 | return 7; | |
2236 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | |
2237 | return 4; | |
2238 | else if (INTEL_INFO(dev)->gen >= 6) | |
2239 | return 3; | |
2240 | else | |
2241 | return 2; | |
2242 | } | |
2243 | ||
2244 | static void intel_print_wm_latency(struct drm_device *dev, | |
2245 | const char *name, | |
2246 | const uint16_t wm[8]) | |
2247 | { | |
2248 | int level, max_level = ilk_wm_max_level(dev); | |
2249 | ||
2250 | for (level = 0; level <= max_level; level++) { | |
2251 | unsigned int latency = wm[level]; | |
2252 | ||
2253 | if (latency == 0) { | |
2254 | DRM_ERROR("%s WM%d latency not provided\n", | |
2255 | name, level); | |
2256 | continue; | |
2257 | } | |
2258 | ||
2259 | /* | |
2260 | * - latencies are in us on gen9. | |
2261 | * - before then, WM1+ latency values are in 0.5us units | |
2262 | */ | |
2263 | if (IS_GEN9(dev)) | |
2264 | latency *= 10; | |
2265 | else if (level > 0) | |
2266 | latency *= 5; | |
2267 | ||
2268 | DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", | |
2269 | name, level, wm[level], | |
2270 | latency / 10, latency % 10); | |
2271 | } | |
2272 | } | |
2273 | ||
2274 | static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, | |
2275 | uint16_t wm[5], uint16_t min) | |
2276 | { | |
2277 | int level, max_level = ilk_wm_max_level(dev_priv->dev); | |
2278 | ||
2279 | if (wm[0] >= min) | |
2280 | return false; | |
2281 | ||
2282 | wm[0] = max(wm[0], min); | |
2283 | for (level = 1; level <= max_level; level++) | |
2284 | wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); | |
2285 | ||
2286 | return true; | |
2287 | } | |
2288 | ||
2289 | static void snb_wm_latency_quirk(struct drm_device *dev) | |
2290 | { | |
2291 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2292 | bool changed; | |
2293 | ||
2294 | /* | |
2295 | * The BIOS provided WM memory latency values are often | |
2296 | * inadequate for high resolution displays. Adjust them. | |
2297 | */ | |
2298 | changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | | |
2299 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | | |
2300 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); | |
2301 | ||
2302 | if (!changed) | |
2303 | return; | |
2304 | ||
2305 | DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); | |
2306 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); | |
2307 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); | |
2308 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | |
2309 | } | |
2310 | ||
2311 | static void ilk_setup_wm_latency(struct drm_device *dev) | |
2312 | { | |
2313 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2314 | ||
2315 | intel_read_wm_latency(dev, dev_priv->wm.pri_latency); | |
2316 | ||
2317 | memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, | |
2318 | sizeof(dev_priv->wm.pri_latency)); | |
2319 | memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, | |
2320 | sizeof(dev_priv->wm.pri_latency)); | |
2321 | ||
2322 | intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency); | |
2323 | intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency); | |
2324 | ||
2325 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); | |
2326 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); | |
2327 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | |
2328 | ||
2329 | if (IS_GEN6(dev)) | |
2330 | snb_wm_latency_quirk(dev); | |
2331 | } | |
2332 | ||
2333 | static void skl_setup_wm_latency(struct drm_device *dev) | |
2334 | { | |
2335 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2336 | ||
2337 | intel_read_wm_latency(dev, dev_priv->wm.skl_latency); | |
2338 | intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); | |
2339 | } | |
2340 | ||
2341 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, | |
2342 | struct ilk_pipe_wm_parameters *p) | |
2343 | { | |
2344 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2345 | ||
2346 | if (!intel_crtc->active) | |
2347 | return; | |
2348 | ||
2349 | p->active = true; | |
2350 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; | |
2351 | p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config); | |
2352 | } | |
2353 | ||
2354 | static void ilk_compute_wm_config(struct drm_device *dev, | |
2355 | struct intel_wm_config *config) | |
2356 | { | |
2357 | struct intel_crtc *intel_crtc; | |
2358 | ||
2359 | /* Compute the currently _active_ config */ | |
2360 | for_each_intel_crtc(dev, intel_crtc) { | |
2361 | const struct intel_pipe_wm *wm = &intel_crtc->wm.active; | |
2362 | ||
2363 | if (!wm->pipe_enabled) | |
2364 | continue; | |
2365 | ||
2366 | config->sprites_enabled |= wm->sprites_enabled; | |
2367 | config->sprites_scaled |= wm->sprites_scaled; | |
2368 | config->num_pipes_active++; | |
2369 | } | |
2370 | } | |
2371 | ||
2372 | /* Compute new watermarks for the pipe */ | |
2373 | static bool intel_compute_pipe_wm(struct drm_crtc *crtc, | |
2374 | const struct ilk_pipe_wm_parameters *params, | |
2375 | struct intel_pipe_wm *pipe_wm) | |
2376 | { | |
2377 | struct drm_device *dev = crtc->dev; | |
2378 | const struct drm_i915_private *dev_priv = dev->dev_private; | |
2379 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2380 | struct intel_plane *intel_plane; | |
2381 | struct intel_plane_state *sprstate = NULL; | |
2382 | int level, max_level = ilk_wm_max_level(dev); | |
2383 | /* LP0 watermark maximums depend on this pipe alone */ | |
2384 | struct intel_wm_config config = { | |
2385 | .num_pipes_active = 1, | |
2386 | }; | |
2387 | struct ilk_wm_maximums max; | |
2388 | ||
2389 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | |
2390 | if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) { | |
2391 | sprstate = to_intel_plane_state(intel_plane->base.state); | |
2392 | break; | |
2393 | } | |
2394 | } | |
2395 | ||
2396 | config.sprites_enabled = sprstate->visible; | |
2397 | config.sprites_scaled = sprstate->visible && | |
2398 | (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 || | |
2399 | drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); | |
2400 | ||
2401 | ||
2402 | pipe_wm->pipe_enabled = params->active; | |
2403 | pipe_wm->sprites_enabled = sprstate->visible; | |
2404 | pipe_wm->sprites_scaled = config.sprites_scaled; | |
2405 | ||
2406 | /* ILK/SNB: LP2+ watermarks only w/o sprites */ | |
2407 | if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible) | |
2408 | max_level = 1; | |
2409 | ||
2410 | /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ | |
2411 | if (config.sprites_scaled) | |
2412 | max_level = 0; | |
2413 | ||
2414 | ilk_compute_wm_level(dev_priv, intel_crtc, 0, params, &pipe_wm->wm[0]); | |
2415 | ||
2416 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | |
2417 | pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); | |
2418 | ||
2419 | /* LP0 watermarks always use 1/2 DDB partitioning */ | |
2420 | ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); | |
2421 | ||
2422 | /* At least LP0 must be valid */ | |
2423 | if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) | |
2424 | return false; | |
2425 | ||
2426 | ilk_compute_wm_reg_maximums(dev, 1, &max); | |
2427 | ||
2428 | for (level = 1; level <= max_level; level++) { | |
2429 | struct intel_wm_level wm = {}; | |
2430 | ||
2431 | ilk_compute_wm_level(dev_priv, intel_crtc, level, params, &wm); | |
2432 | ||
2433 | /* | |
2434 | * Disable any watermark level that exceeds the | |
2435 | * register maximums since such watermarks are | |
2436 | * always invalid. | |
2437 | */ | |
2438 | if (!ilk_validate_wm_level(level, &max, &wm)) | |
2439 | break; | |
2440 | ||
2441 | pipe_wm->wm[level] = wm; | |
2442 | } | |
2443 | ||
2444 | return true; | |
2445 | } | |
2446 | ||
2447 | /* | |
2448 | * Merge the watermarks from all active pipes for a specific level. | |
2449 | */ | |
2450 | static void ilk_merge_wm_level(struct drm_device *dev, | |
2451 | int level, | |
2452 | struct intel_wm_level *ret_wm) | |
2453 | { | |
2454 | const struct intel_crtc *intel_crtc; | |
2455 | ||
2456 | ret_wm->enable = true; | |
2457 | ||
2458 | for_each_intel_crtc(dev, intel_crtc) { | |
2459 | const struct intel_pipe_wm *active = &intel_crtc->wm.active; | |
2460 | const struct intel_wm_level *wm = &active->wm[level]; | |
2461 | ||
2462 | if (!active->pipe_enabled) | |
2463 | continue; | |
2464 | ||
2465 | /* | |
2466 | * The watermark values may have been used in the past, | |
2467 | * so we must maintain them in the registers for some | |
2468 | * time even if the level is now disabled. | |
2469 | */ | |
2470 | if (!wm->enable) | |
2471 | ret_wm->enable = false; | |
2472 | ||
2473 | ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); | |
2474 | ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); | |
2475 | ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); | |
2476 | ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); | |
2477 | } | |
2478 | } | |
2479 | ||
2480 | /* | |
2481 | * Merge all low power watermarks for all active pipes. | |
2482 | */ | |
2483 | static void ilk_wm_merge(struct drm_device *dev, | |
2484 | const struct intel_wm_config *config, | |
2485 | const struct ilk_wm_maximums *max, | |
2486 | struct intel_pipe_wm *merged) | |
2487 | { | |
2488 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2489 | int level, max_level = ilk_wm_max_level(dev); | |
2490 | int last_enabled_level = max_level; | |
2491 | ||
2492 | /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ | |
2493 | if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && | |
2494 | config->num_pipes_active > 1) | |
2495 | return; | |
2496 | ||
2497 | /* ILK: FBC WM must be disabled always */ | |
2498 | merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; | |
2499 | ||
2500 | /* merge each WM1+ level */ | |
2501 | for (level = 1; level <= max_level; level++) { | |
2502 | struct intel_wm_level *wm = &merged->wm[level]; | |
2503 | ||
2504 | ilk_merge_wm_level(dev, level, wm); | |
2505 | ||
2506 | if (level > last_enabled_level) | |
2507 | wm->enable = false; | |
2508 | else if (!ilk_validate_wm_level(level, max, wm)) | |
2509 | /* make sure all following levels get disabled */ | |
2510 | last_enabled_level = level - 1; | |
2511 | ||
2512 | /* | |
2513 | * The spec says it is preferred to disable | |
2514 | * FBC WMs instead of disabling a WM level. | |
2515 | */ | |
2516 | if (wm->fbc_val > max->fbc) { | |
2517 | if (wm->enable) | |
2518 | merged->fbc_wm_enabled = false; | |
2519 | wm->fbc_val = 0; | |
2520 | } | |
2521 | } | |
2522 | ||
2523 | /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ | |
2524 | /* | |
2525 | * FIXME this is racy. FBC might get enabled later. | |
2526 | * What we should check here is whether FBC can be | |
2527 | * enabled sometime later. | |
2528 | */ | |
2529 | if (IS_GEN5(dev) && !merged->fbc_wm_enabled && | |
2530 | intel_fbc_enabled(dev_priv)) { | |
2531 | for (level = 2; level <= max_level; level++) { | |
2532 | struct intel_wm_level *wm = &merged->wm[level]; | |
2533 | ||
2534 | wm->enable = false; | |
2535 | } | |
2536 | } | |
2537 | } | |
2538 | ||
2539 | static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) | |
2540 | { | |
2541 | /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ | |
2542 | return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); | |
2543 | } | |
2544 | ||
2545 | /* The value we need to program into the WM_LPx latency field */ | |
2546 | static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) | |
2547 | { | |
2548 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2549 | ||
2550 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | |
2551 | return 2 * level; | |
2552 | else | |
2553 | return dev_priv->wm.pri_latency[level]; | |
2554 | } | |
2555 | ||
2556 | static void ilk_compute_wm_results(struct drm_device *dev, | |
2557 | const struct intel_pipe_wm *merged, | |
2558 | enum intel_ddb_partitioning partitioning, | |
2559 | struct ilk_wm_values *results) | |
2560 | { | |
2561 | struct intel_crtc *intel_crtc; | |
2562 | int level, wm_lp; | |
2563 | ||
2564 | results->enable_fbc_wm = merged->fbc_wm_enabled; | |
2565 | results->partitioning = partitioning; | |
2566 | ||
2567 | /* LP1+ register values */ | |
2568 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { | |
2569 | const struct intel_wm_level *r; | |
2570 | ||
2571 | level = ilk_wm_lp_to_level(wm_lp, merged); | |
2572 | ||
2573 | r = &merged->wm[level]; | |
2574 | ||
2575 | /* | |
2576 | * Maintain the watermark values even if the level is | |
2577 | * disabled. Doing otherwise could cause underruns. | |
2578 | */ | |
2579 | results->wm_lp[wm_lp - 1] = | |
2580 | (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | | |
2581 | (r->pri_val << WM1_LP_SR_SHIFT) | | |
2582 | r->cur_val; | |
2583 | ||
2584 | if (r->enable) | |
2585 | results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; | |
2586 | ||
2587 | if (INTEL_INFO(dev)->gen >= 8) | |
2588 | results->wm_lp[wm_lp - 1] |= | |
2589 | r->fbc_val << WM1_LP_FBC_SHIFT_BDW; | |
2590 | else | |
2591 | results->wm_lp[wm_lp - 1] |= | |
2592 | r->fbc_val << WM1_LP_FBC_SHIFT; | |
2593 | ||
2594 | /* | |
2595 | * Always set WM1S_LP_EN when spr_val != 0, even if the | |
2596 | * level is disabled. Doing otherwise could cause underruns. | |
2597 | */ | |
2598 | if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { | |
2599 | WARN_ON(wm_lp != 1); | |
2600 | results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; | |
2601 | } else | |
2602 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; | |
2603 | } | |
2604 | ||
2605 | /* LP0 register values */ | |
2606 | for_each_intel_crtc(dev, intel_crtc) { | |
2607 | enum pipe pipe = intel_crtc->pipe; | |
2608 | const struct intel_wm_level *r = | |
2609 | &intel_crtc->wm.active.wm[0]; | |
2610 | ||
2611 | if (WARN_ON(!r->enable)) | |
2612 | continue; | |
2613 | ||
2614 | results->wm_linetime[pipe] = intel_crtc->wm.active.linetime; | |
2615 | ||
2616 | results->wm_pipe[pipe] = | |
2617 | (r->pri_val << WM0_PIPE_PLANE_SHIFT) | | |
2618 | (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | | |
2619 | r->cur_val; | |
2620 | } | |
2621 | } | |
2622 | ||
2623 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in | |
2624 | * case both are at the same level. Prefer r1 in case they're the same. */ | |
2625 | static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, | |
2626 | struct intel_pipe_wm *r1, | |
2627 | struct intel_pipe_wm *r2) | |
2628 | { | |
2629 | int level, max_level = ilk_wm_max_level(dev); | |
2630 | int level1 = 0, level2 = 0; | |
2631 | ||
2632 | for (level = 1; level <= max_level; level++) { | |
2633 | if (r1->wm[level].enable) | |
2634 | level1 = level; | |
2635 | if (r2->wm[level].enable) | |
2636 | level2 = level; | |
2637 | } | |
2638 | ||
2639 | if (level1 == level2) { | |
2640 | if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) | |
2641 | return r2; | |
2642 | else | |
2643 | return r1; | |
2644 | } else if (level1 > level2) { | |
2645 | return r1; | |
2646 | } else { | |
2647 | return r2; | |
2648 | } | |
2649 | } | |
2650 | ||
2651 | /* dirty bits used to track which watermarks need changes */ | |
2652 | #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) | |
2653 | #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) | |
2654 | #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) | |
2655 | #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) | |
2656 | #define WM_DIRTY_FBC (1 << 24) | |
2657 | #define WM_DIRTY_DDB (1 << 25) | |
2658 | ||
2659 | static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, | |
2660 | const struct ilk_wm_values *old, | |
2661 | const struct ilk_wm_values *new) | |
2662 | { | |
2663 | unsigned int dirty = 0; | |
2664 | enum pipe pipe; | |
2665 | int wm_lp; | |
2666 | ||
2667 | for_each_pipe(dev_priv, pipe) { | |
2668 | if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { | |
2669 | dirty |= WM_DIRTY_LINETIME(pipe); | |
2670 | /* Must disable LP1+ watermarks too */ | |
2671 | dirty |= WM_DIRTY_LP_ALL; | |
2672 | } | |
2673 | ||
2674 | if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { | |
2675 | dirty |= WM_DIRTY_PIPE(pipe); | |
2676 | /* Must disable LP1+ watermarks too */ | |
2677 | dirty |= WM_DIRTY_LP_ALL; | |
2678 | } | |
2679 | } | |
2680 | ||
2681 | if (old->enable_fbc_wm != new->enable_fbc_wm) { | |
2682 | dirty |= WM_DIRTY_FBC; | |
2683 | /* Must disable LP1+ watermarks too */ | |
2684 | dirty |= WM_DIRTY_LP_ALL; | |
2685 | } | |
2686 | ||
2687 | if (old->partitioning != new->partitioning) { | |
2688 | dirty |= WM_DIRTY_DDB; | |
2689 | /* Must disable LP1+ watermarks too */ | |
2690 | dirty |= WM_DIRTY_LP_ALL; | |
2691 | } | |
2692 | ||
2693 | /* LP1+ watermarks already deemed dirty, no need to continue */ | |
2694 | if (dirty & WM_DIRTY_LP_ALL) | |
2695 | return dirty; | |
2696 | ||
2697 | /* Find the lowest numbered LP1+ watermark in need of an update... */ | |
2698 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { | |
2699 | if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || | |
2700 | old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) | |
2701 | break; | |
2702 | } | |
2703 | ||
2704 | /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ | |
2705 | for (; wm_lp <= 3; wm_lp++) | |
2706 | dirty |= WM_DIRTY_LP(wm_lp); | |
2707 | ||
2708 | return dirty; | |
2709 | } | |
2710 | ||
2711 | static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, | |
2712 | unsigned int dirty) | |
2713 | { | |
2714 | struct ilk_wm_values *previous = &dev_priv->wm.hw; | |
2715 | bool changed = false; | |
2716 | ||
2717 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { | |
2718 | previous->wm_lp[2] &= ~WM1_LP_SR_EN; | |
2719 | I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); | |
2720 | changed = true; | |
2721 | } | |
2722 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { | |
2723 | previous->wm_lp[1] &= ~WM1_LP_SR_EN; | |
2724 | I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); | |
2725 | changed = true; | |
2726 | } | |
2727 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { | |
2728 | previous->wm_lp[0] &= ~WM1_LP_SR_EN; | |
2729 | I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); | |
2730 | changed = true; | |
2731 | } | |
2732 | ||
2733 | /* | |
2734 | * Don't touch WM1S_LP_EN here. | |
2735 | * Doing so could cause underruns. | |
2736 | */ | |
2737 | ||
2738 | return changed; | |
2739 | } | |
2740 | ||
2741 | /* | |
2742 | * The spec says we shouldn't write when we don't need, because every write | |
2743 | * causes WMs to be re-evaluated, expending some power. | |
2744 | */ | |
2745 | static void ilk_write_wm_values(struct drm_i915_private *dev_priv, | |
2746 | struct ilk_wm_values *results) | |
2747 | { | |
2748 | struct drm_device *dev = dev_priv->dev; | |
2749 | struct ilk_wm_values *previous = &dev_priv->wm.hw; | |
2750 | unsigned int dirty; | |
2751 | uint32_t val; | |
2752 | ||
2753 | dirty = ilk_compute_wm_dirty(dev_priv, previous, results); | |
2754 | if (!dirty) | |
2755 | return; | |
2756 | ||
2757 | _ilk_disable_lp_wm(dev_priv, dirty); | |
2758 | ||
2759 | if (dirty & WM_DIRTY_PIPE(PIPE_A)) | |
2760 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); | |
2761 | if (dirty & WM_DIRTY_PIPE(PIPE_B)) | |
2762 | I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); | |
2763 | if (dirty & WM_DIRTY_PIPE(PIPE_C)) | |
2764 | I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); | |
2765 | ||
2766 | if (dirty & WM_DIRTY_LINETIME(PIPE_A)) | |
2767 | I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); | |
2768 | if (dirty & WM_DIRTY_LINETIME(PIPE_B)) | |
2769 | I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); | |
2770 | if (dirty & WM_DIRTY_LINETIME(PIPE_C)) | |
2771 | I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); | |
2772 | ||
2773 | if (dirty & WM_DIRTY_DDB) { | |
2774 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | |
2775 | val = I915_READ(WM_MISC); | |
2776 | if (results->partitioning == INTEL_DDB_PART_1_2) | |
2777 | val &= ~WM_MISC_DATA_PARTITION_5_6; | |
2778 | else | |
2779 | val |= WM_MISC_DATA_PARTITION_5_6; | |
2780 | I915_WRITE(WM_MISC, val); | |
2781 | } else { | |
2782 | val = I915_READ(DISP_ARB_CTL2); | |
2783 | if (results->partitioning == INTEL_DDB_PART_1_2) | |
2784 | val &= ~DISP_DATA_PARTITION_5_6; | |
2785 | else | |
2786 | val |= DISP_DATA_PARTITION_5_6; | |
2787 | I915_WRITE(DISP_ARB_CTL2, val); | |
2788 | } | |
2789 | } | |
2790 | ||
2791 | if (dirty & WM_DIRTY_FBC) { | |
2792 | val = I915_READ(DISP_ARB_CTL); | |
2793 | if (results->enable_fbc_wm) | |
2794 | val &= ~DISP_FBC_WM_DIS; | |
2795 | else | |
2796 | val |= DISP_FBC_WM_DIS; | |
2797 | I915_WRITE(DISP_ARB_CTL, val); | |
2798 | } | |
2799 | ||
2800 | if (dirty & WM_DIRTY_LP(1) && | |
2801 | previous->wm_lp_spr[0] != results->wm_lp_spr[0]) | |
2802 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); | |
2803 | ||
2804 | if (INTEL_INFO(dev)->gen >= 7) { | |
2805 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) | |
2806 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); | |
2807 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) | |
2808 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); | |
2809 | } | |
2810 | ||
2811 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) | |
2812 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); | |
2813 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) | |
2814 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); | |
2815 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) | |
2816 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); | |
2817 | ||
2818 | dev_priv->wm.hw = *results; | |
2819 | } | |
2820 | ||
2821 | static bool ilk_disable_lp_wm(struct drm_device *dev) | |
2822 | { | |
2823 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2824 | ||
2825 | return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); | |
2826 | } | |
2827 | ||
2828 | /* | |
2829 | * On gen9, we need to allocate Display Data Buffer (DDB) portions to the | |
2830 | * different active planes. | |
2831 | */ | |
2832 | ||
2833 | #define SKL_DDB_SIZE 896 /* in blocks */ | |
2834 | #define BXT_DDB_SIZE 512 | |
2835 | ||
2836 | static void | |
2837 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, | |
2838 | struct drm_crtc *for_crtc, | |
2839 | const struct intel_wm_config *config, | |
2840 | const struct skl_pipe_wm_parameters *params, | |
2841 | struct skl_ddb_entry *alloc /* out */) | |
2842 | { | |
2843 | struct drm_crtc *crtc; | |
2844 | unsigned int pipe_size, ddb_size; | |
2845 | int nth_active_pipe; | |
2846 | ||
2847 | if (!params->active) { | |
2848 | alloc->start = 0; | |
2849 | alloc->end = 0; | |
2850 | return; | |
2851 | } | |
2852 | ||
2853 | if (IS_BROXTON(dev)) | |
2854 | ddb_size = BXT_DDB_SIZE; | |
2855 | else | |
2856 | ddb_size = SKL_DDB_SIZE; | |
2857 | ||
2858 | ddb_size -= 4; /* 4 blocks for bypass path allocation */ | |
2859 | ||
2860 | nth_active_pipe = 0; | |
2861 | for_each_crtc(dev, crtc) { | |
2862 | if (!to_intel_crtc(crtc)->active) | |
2863 | continue; | |
2864 | ||
2865 | if (crtc == for_crtc) | |
2866 | break; | |
2867 | ||
2868 | nth_active_pipe++; | |
2869 | } | |
2870 | ||
2871 | pipe_size = ddb_size / config->num_pipes_active; | |
2872 | alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; | |
2873 | alloc->end = alloc->start + pipe_size; | |
2874 | } | |
2875 | ||
2876 | static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) | |
2877 | { | |
2878 | if (config->num_pipes_active == 1) | |
2879 | return 32; | |
2880 | ||
2881 | return 8; | |
2882 | } | |
2883 | ||
2884 | static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) | |
2885 | { | |
2886 | entry->start = reg & 0x3ff; | |
2887 | entry->end = (reg >> 16) & 0x3ff; | |
2888 | if (entry->end) | |
2889 | entry->end += 1; | |
2890 | } | |
2891 | ||
2892 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | |
2893 | struct skl_ddb_allocation *ddb /* out */) | |
2894 | { | |
2895 | enum pipe pipe; | |
2896 | int plane; | |
2897 | u32 val; | |
2898 | ||
2899 | for_each_pipe(dev_priv, pipe) { | |
2900 | for_each_plane(dev_priv, pipe, plane) { | |
2901 | val = I915_READ(PLANE_BUF_CFG(pipe, plane)); | |
2902 | skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], | |
2903 | val); | |
2904 | } | |
2905 | ||
2906 | val = I915_READ(CUR_BUF_CFG(pipe)); | |
2907 | skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val); | |
2908 | } | |
2909 | } | |
2910 | ||
2911 | static unsigned int | |
2912 | skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y) | |
2913 | { | |
2914 | ||
2915 | /* for planar format */ | |
2916 | if (p->y_bytes_per_pixel) { | |
2917 | if (y) /* y-plane data rate */ | |
2918 | return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel; | |
2919 | else /* uv-plane data rate */ | |
2920 | return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel; | |
2921 | } | |
2922 | ||
2923 | /* for packed formats */ | |
2924 | return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel; | |
2925 | } | |
2926 | ||
2927 | /* | |
2928 | * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching | |
2929 | * a 8192x4096@32bpp framebuffer: | |
2930 | * 3 * 4096 * 8192 * 4 < 2^32 | |
2931 | */ | |
2932 | static unsigned int | |
2933 | skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc, | |
2934 | const struct skl_pipe_wm_parameters *params) | |
2935 | { | |
2936 | unsigned int total_data_rate = 0; | |
2937 | int plane; | |
2938 | ||
2939 | for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { | |
2940 | const struct intel_plane_wm_parameters *p; | |
2941 | ||
2942 | p = ¶ms->plane[plane]; | |
2943 | if (!p->enabled) | |
2944 | continue; | |
2945 | ||
2946 | total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */ | |
2947 | if (p->y_bytes_per_pixel) { | |
2948 | total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */ | |
2949 | } | |
2950 | } | |
2951 | ||
2952 | return total_data_rate; | |
2953 | } | |
2954 | ||
2955 | static void | |
2956 | skl_allocate_pipe_ddb(struct drm_crtc *crtc, | |
2957 | const struct intel_wm_config *config, | |
2958 | const struct skl_pipe_wm_parameters *params, | |
2959 | struct skl_ddb_allocation *ddb /* out */) | |
2960 | { | |
2961 | struct drm_device *dev = crtc->dev; | |
2962 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2963 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2964 | enum pipe pipe = intel_crtc->pipe; | |
2965 | struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; | |
2966 | uint16_t alloc_size, start, cursor_blocks; | |
2967 | uint16_t minimum[I915_MAX_PLANES]; | |
2968 | uint16_t y_minimum[I915_MAX_PLANES]; | |
2969 | unsigned int total_data_rate; | |
2970 | int plane; | |
2971 | ||
2972 | skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc); | |
2973 | alloc_size = skl_ddb_entry_size(alloc); | |
2974 | if (alloc_size == 0) { | |
2975 | memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); | |
2976 | memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe])); | |
2977 | return; | |
2978 | } | |
2979 | ||
2980 | cursor_blocks = skl_cursor_allocation(config); | |
2981 | ddb->cursor[pipe].start = alloc->end - cursor_blocks; | |
2982 | ddb->cursor[pipe].end = alloc->end; | |
2983 | ||
2984 | alloc_size -= cursor_blocks; | |
2985 | alloc->end -= cursor_blocks; | |
2986 | ||
2987 | /* 1. Allocate the mininum required blocks for each active plane */ | |
2988 | for_each_plane(dev_priv, pipe, plane) { | |
2989 | const struct intel_plane_wm_parameters *p; | |
2990 | ||
2991 | p = ¶ms->plane[plane]; | |
2992 | if (!p->enabled) | |
2993 | continue; | |
2994 | ||
2995 | minimum[plane] = 8; | |
2996 | alloc_size -= minimum[plane]; | |
2997 | y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0; | |
2998 | alloc_size -= y_minimum[plane]; | |
2999 | } | |
3000 | ||
3001 | /* | |
3002 | * 2. Distribute the remaining space in proportion to the amount of | |
3003 | * data each plane needs to fetch from memory. | |
3004 | * | |
3005 | * FIXME: we may not allocate every single block here. | |
3006 | */ | |
3007 | total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params); | |
3008 | ||
3009 | start = alloc->start; | |
3010 | for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { | |
3011 | const struct intel_plane_wm_parameters *p; | |
3012 | unsigned int data_rate, y_data_rate; | |
3013 | uint16_t plane_blocks, y_plane_blocks = 0; | |
3014 | ||
3015 | p = ¶ms->plane[plane]; | |
3016 | if (!p->enabled) | |
3017 | continue; | |
3018 | ||
3019 | data_rate = skl_plane_relative_data_rate(p, 0); | |
3020 | ||
3021 | /* | |
3022 | * allocation for (packed formats) or (uv-plane part of planar format): | |
3023 | * promote the expression to 64 bits to avoid overflowing, the | |
3024 | * result is < available as data_rate / total_data_rate < 1 | |
3025 | */ | |
3026 | plane_blocks = minimum[plane]; | |
3027 | plane_blocks += div_u64((uint64_t)alloc_size * data_rate, | |
3028 | total_data_rate); | |
3029 | ||
3030 | ddb->plane[pipe][plane].start = start; | |
3031 | ddb->plane[pipe][plane].end = start + plane_blocks; | |
3032 | ||
3033 | start += plane_blocks; | |
3034 | ||
3035 | /* | |
3036 | * allocation for y_plane part of planar format: | |
3037 | */ | |
3038 | if (p->y_bytes_per_pixel) { | |
3039 | y_data_rate = skl_plane_relative_data_rate(p, 1); | |
3040 | y_plane_blocks = y_minimum[plane]; | |
3041 | y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, | |
3042 | total_data_rate); | |
3043 | ||
3044 | ddb->y_plane[pipe][plane].start = start; | |
3045 | ddb->y_plane[pipe][plane].end = start + y_plane_blocks; | |
3046 | ||
3047 | start += y_plane_blocks; | |
3048 | } | |
3049 | ||
3050 | } | |
3051 | ||
3052 | } | |
3053 | ||
3054 | static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) | |
3055 | { | |
3056 | /* TODO: Take into account the scalers once we support them */ | |
3057 | return config->base.adjusted_mode.crtc_clock; | |
3058 | } | |
3059 | ||
3060 | /* | |
3061 | * The max latency should be 257 (max the punit can code is 255 and we add 2us | |
3062 | * for the read latency) and bytes_per_pixel should always be <= 8, so that | |
3063 | * should allow pixel_rate up to ~2 GHz which seems sufficient since max | |
3064 | * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. | |
3065 | */ | |
3066 | static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, | |
3067 | uint32_t latency) | |
3068 | { | |
3069 | uint32_t wm_intermediate_val, ret; | |
3070 | ||
3071 | if (latency == 0) | |
3072 | return UINT_MAX; | |
3073 | ||
3074 | wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512; | |
3075 | ret = DIV_ROUND_UP(wm_intermediate_val, 1000); | |
3076 | ||
3077 | return ret; | |
3078 | } | |
3079 | ||
3080 | static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, | |
3081 | uint32_t horiz_pixels, uint8_t bytes_per_pixel, | |
3082 | uint64_t tiling, uint32_t latency) | |
3083 | { | |
3084 | uint32_t ret; | |
3085 | uint32_t plane_bytes_per_line, plane_blocks_per_line; | |
3086 | uint32_t wm_intermediate_val; | |
3087 | ||
3088 | if (latency == 0) | |
3089 | return UINT_MAX; | |
3090 | ||
3091 | plane_bytes_per_line = horiz_pixels * bytes_per_pixel; | |
3092 | ||
3093 | if (tiling == I915_FORMAT_MOD_Y_TILED || | |
3094 | tiling == I915_FORMAT_MOD_Yf_TILED) { | |
3095 | plane_bytes_per_line *= 4; | |
3096 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | |
3097 | plane_blocks_per_line /= 4; | |
3098 | } else { | |
3099 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | |
3100 | } | |
3101 | ||
3102 | wm_intermediate_val = latency * pixel_rate; | |
3103 | ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * | |
3104 | plane_blocks_per_line; | |
3105 | ||
3106 | return ret; | |
3107 | } | |
3108 | ||
3109 | static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, | |
3110 | const struct intel_crtc *intel_crtc) | |
3111 | { | |
3112 | struct drm_device *dev = intel_crtc->base.dev; | |
3113 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3114 | const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; | |
3115 | enum pipe pipe = intel_crtc->pipe; | |
3116 | ||
3117 | if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe], | |
3118 | sizeof(new_ddb->plane[pipe]))) | |
3119 | return true; | |
3120 | ||
3121 | if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe], | |
3122 | sizeof(new_ddb->cursor[pipe]))) | |
3123 | return true; | |
3124 | ||
3125 | return false; | |
3126 | } | |
3127 | ||
3128 | static void skl_compute_wm_global_parameters(struct drm_device *dev, | |
3129 | struct intel_wm_config *config) | |
3130 | { | |
3131 | struct drm_crtc *crtc; | |
3132 | struct drm_plane *plane; | |
3133 | ||
3134 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | |
3135 | config->num_pipes_active += to_intel_crtc(crtc)->active; | |
3136 | ||
3137 | /* FIXME: I don't think we need those two global parameters on SKL */ | |
3138 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | |
3139 | struct intel_plane *intel_plane = to_intel_plane(plane); | |
3140 | ||
3141 | config->sprites_enabled |= intel_plane->wm.enabled; | |
3142 | config->sprites_scaled |= intel_plane->wm.scaled; | |
3143 | } | |
3144 | } | |
3145 | ||
3146 | static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, | |
3147 | struct skl_pipe_wm_parameters *p) | |
3148 | { | |
3149 | struct drm_device *dev = crtc->dev; | |
3150 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3151 | enum pipe pipe = intel_crtc->pipe; | |
3152 | struct drm_plane *plane; | |
3153 | struct drm_framebuffer *fb; | |
3154 | int i = 1; /* Index for sprite planes start */ | |
3155 | ||
3156 | p->active = intel_crtc->active; | |
3157 | if (p->active) { | |
3158 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; | |
3159 | p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); | |
3160 | ||
3161 | fb = crtc->primary->state->fb; | |
3162 | /* For planar: Bpp is for uv plane, y_Bpp is for y plane */ | |
3163 | if (fb) { | |
3164 | p->plane[0].enabled = true; | |
3165 | p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? | |
3166 | drm_format_plane_cpp(fb->pixel_format, 1) : | |
3167 | drm_format_plane_cpp(fb->pixel_format, 0); | |
3168 | p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? | |
3169 | drm_format_plane_cpp(fb->pixel_format, 0) : 0; | |
3170 | p->plane[0].tiling = fb->modifier[0]; | |
3171 | } else { | |
3172 | p->plane[0].enabled = false; | |
3173 | p->plane[0].bytes_per_pixel = 0; | |
3174 | p->plane[0].y_bytes_per_pixel = 0; | |
3175 | p->plane[0].tiling = DRM_FORMAT_MOD_NONE; | |
3176 | } | |
3177 | p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; | |
3178 | p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; | |
3179 | p->plane[0].rotation = crtc->primary->state->rotation; | |
3180 | ||
3181 | fb = crtc->cursor->state->fb; | |
3182 | p->cursor.y_bytes_per_pixel = 0; | |
3183 | if (fb) { | |
3184 | p->cursor.enabled = true; | |
3185 | p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8; | |
3186 | p->cursor.horiz_pixels = crtc->cursor->state->crtc_w; | |
3187 | p->cursor.vert_pixels = crtc->cursor->state->crtc_h; | |
3188 | } else { | |
3189 | p->cursor.enabled = false; | |
3190 | p->cursor.bytes_per_pixel = 0; | |
3191 | p->cursor.horiz_pixels = 64; | |
3192 | p->cursor.vert_pixels = 64; | |
3193 | } | |
3194 | } | |
3195 | ||
3196 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | |
3197 | struct intel_plane *intel_plane = to_intel_plane(plane); | |
3198 | ||
3199 | if (intel_plane->pipe == pipe && | |
3200 | plane->type == DRM_PLANE_TYPE_OVERLAY) | |
3201 | p->plane[i++] = intel_plane->wm; | |
3202 | } | |
3203 | } | |
3204 | ||
3205 | static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |
3206 | struct skl_pipe_wm_parameters *p, | |
3207 | struct intel_plane_wm_parameters *p_params, | |
3208 | uint16_t ddb_allocation, | |
3209 | int level, | |
3210 | uint16_t *out_blocks, /* out */ | |
3211 | uint8_t *out_lines /* out */) | |
3212 | { | |
3213 | uint32_t latency = dev_priv->wm.skl_latency[level]; | |
3214 | uint32_t method1, method2; | |
3215 | uint32_t plane_bytes_per_line, plane_blocks_per_line; | |
3216 | uint32_t res_blocks, res_lines; | |
3217 | uint32_t selected_result; | |
3218 | uint8_t bytes_per_pixel; | |
3219 | ||
3220 | if (latency == 0 || !p->active || !p_params->enabled) | |
3221 | return false; | |
3222 | ||
3223 | bytes_per_pixel = p_params->y_bytes_per_pixel ? | |
3224 | p_params->y_bytes_per_pixel : | |
3225 | p_params->bytes_per_pixel; | |
3226 | method1 = skl_wm_method1(p->pixel_rate, | |
3227 | bytes_per_pixel, | |
3228 | latency); | |
3229 | method2 = skl_wm_method2(p->pixel_rate, | |
3230 | p->pipe_htotal, | |
3231 | p_params->horiz_pixels, | |
3232 | bytes_per_pixel, | |
3233 | p_params->tiling, | |
3234 | latency); | |
3235 | ||
3236 | plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel; | |
3237 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | |
3238 | ||
3239 | if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || | |
3240 | p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { | |
3241 | uint32_t min_scanlines = 4; | |
3242 | uint32_t y_tile_minimum; | |
3243 | if (intel_rotation_90_or_270(p_params->rotation)) { | |
3244 | switch (p_params->bytes_per_pixel) { | |
3245 | case 1: | |
3246 | min_scanlines = 16; | |
3247 | break; | |
3248 | case 2: | |
3249 | min_scanlines = 8; | |
3250 | break; | |
3251 | case 8: | |
3252 | WARN(1, "Unsupported pixel depth for rotation"); | |
3253 | } | |
3254 | } | |
3255 | y_tile_minimum = plane_blocks_per_line * min_scanlines; | |
3256 | selected_result = max(method2, y_tile_minimum); | |
3257 | } else { | |
3258 | if ((ddb_allocation / plane_blocks_per_line) >= 1) | |
3259 | selected_result = min(method1, method2); | |
3260 | else | |
3261 | selected_result = method1; | |
3262 | } | |
3263 | ||
3264 | res_blocks = selected_result + 1; | |
3265 | res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); | |
3266 | ||
3267 | if (level >= 1 && level <= 7) { | |
3268 | if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || | |
3269 | p_params->tiling == I915_FORMAT_MOD_Yf_TILED) | |
3270 | res_lines += 4; | |
3271 | else | |
3272 | res_blocks++; | |
3273 | } | |
3274 | ||
3275 | if (res_blocks >= ddb_allocation || res_lines > 31) | |
3276 | return false; | |
3277 | ||
3278 | *out_blocks = res_blocks; | |
3279 | *out_lines = res_lines; | |
3280 | ||
3281 | return true; | |
3282 | } | |
3283 | ||
3284 | static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, | |
3285 | struct skl_ddb_allocation *ddb, | |
3286 | struct skl_pipe_wm_parameters *p, | |
3287 | enum pipe pipe, | |
3288 | int level, | |
3289 | int num_planes, | |
3290 | struct skl_wm_level *result) | |
3291 | { | |
3292 | uint16_t ddb_blocks; | |
3293 | int i; | |
3294 | ||
3295 | for (i = 0; i < num_planes; i++) { | |
3296 | ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); | |
3297 | ||
3298 | result->plane_en[i] = skl_compute_plane_wm(dev_priv, | |
3299 | p, &p->plane[i], | |
3300 | ddb_blocks, | |
3301 | level, | |
3302 | &result->plane_res_b[i], | |
3303 | &result->plane_res_l[i]); | |
3304 | } | |
3305 | ||
3306 | ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]); | |
3307 | result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor, | |
3308 | ddb_blocks, level, | |
3309 | &result->cursor_res_b, | |
3310 | &result->cursor_res_l); | |
3311 | } | |
3312 | ||
3313 | static uint32_t | |
3314 | skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) | |
3315 | { | |
3316 | if (!to_intel_crtc(crtc)->active) | |
3317 | return 0; | |
3318 | ||
3319 | if (WARN_ON(p->pixel_rate == 0)) | |
3320 | return 0; | |
3321 | ||
3322 | return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); | |
3323 | } | |
3324 | ||
3325 | static void skl_compute_transition_wm(struct drm_crtc *crtc, | |
3326 | struct skl_pipe_wm_parameters *params, | |
3327 | struct skl_wm_level *trans_wm /* out */) | |
3328 | { | |
3329 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3330 | int i; | |
3331 | ||
3332 | if (!params->active) | |
3333 | return; | |
3334 | ||
3335 | /* Until we know more, just disable transition WMs */ | |
3336 | for (i = 0; i < intel_num_planes(intel_crtc); i++) | |
3337 | trans_wm->plane_en[i] = false; | |
3338 | trans_wm->cursor_en = false; | |
3339 | } | |
3340 | ||
3341 | static void skl_compute_pipe_wm(struct drm_crtc *crtc, | |
3342 | struct skl_ddb_allocation *ddb, | |
3343 | struct skl_pipe_wm_parameters *params, | |
3344 | struct skl_pipe_wm *pipe_wm) | |
3345 | { | |
3346 | struct drm_device *dev = crtc->dev; | |
3347 | const struct drm_i915_private *dev_priv = dev->dev_private; | |
3348 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3349 | int level, max_level = ilk_wm_max_level(dev); | |
3350 | ||
3351 | for (level = 0; level <= max_level; level++) { | |
3352 | skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe, | |
3353 | level, intel_num_planes(intel_crtc), | |
3354 | &pipe_wm->wm[level]); | |
3355 | } | |
3356 | pipe_wm->linetime = skl_compute_linetime_wm(crtc, params); | |
3357 | ||
3358 | skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm); | |
3359 | } | |
3360 | ||
3361 | static void skl_compute_wm_results(struct drm_device *dev, | |
3362 | struct skl_pipe_wm_parameters *p, | |
3363 | struct skl_pipe_wm *p_wm, | |
3364 | struct skl_wm_values *r, | |
3365 | struct intel_crtc *intel_crtc) | |
3366 | { | |
3367 | int level, max_level = ilk_wm_max_level(dev); | |
3368 | enum pipe pipe = intel_crtc->pipe; | |
3369 | uint32_t temp; | |
3370 | int i; | |
3371 | ||
3372 | for (level = 0; level <= max_level; level++) { | |
3373 | for (i = 0; i < intel_num_planes(intel_crtc); i++) { | |
3374 | temp = 0; | |
3375 | ||
3376 | temp |= p_wm->wm[level].plane_res_l[i] << | |
3377 | PLANE_WM_LINES_SHIFT; | |
3378 | temp |= p_wm->wm[level].plane_res_b[i]; | |
3379 | if (p_wm->wm[level].plane_en[i]) | |
3380 | temp |= PLANE_WM_EN; | |
3381 | ||
3382 | r->plane[pipe][i][level] = temp; | |
3383 | } | |
3384 | ||
3385 | temp = 0; | |
3386 | ||
3387 | temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT; | |
3388 | temp |= p_wm->wm[level].cursor_res_b; | |
3389 | ||
3390 | if (p_wm->wm[level].cursor_en) | |
3391 | temp |= PLANE_WM_EN; | |
3392 | ||
3393 | r->cursor[pipe][level] = temp; | |
3394 | ||
3395 | } | |
3396 | ||
3397 | /* transition WMs */ | |
3398 | for (i = 0; i < intel_num_planes(intel_crtc); i++) { | |
3399 | temp = 0; | |
3400 | temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT; | |
3401 | temp |= p_wm->trans_wm.plane_res_b[i]; | |
3402 | if (p_wm->trans_wm.plane_en[i]) | |
3403 | temp |= PLANE_WM_EN; | |
3404 | ||
3405 | r->plane_trans[pipe][i] = temp; | |
3406 | } | |
3407 | ||
3408 | temp = 0; | |
3409 | temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT; | |
3410 | temp |= p_wm->trans_wm.cursor_res_b; | |
3411 | if (p_wm->trans_wm.cursor_en) | |
3412 | temp |= PLANE_WM_EN; | |
3413 | ||
3414 | r->cursor_trans[pipe] = temp; | |
3415 | ||
3416 | r->wm_linetime[pipe] = p_wm->linetime; | |
3417 | } | |
3418 | ||
3419 | static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg, | |
3420 | const struct skl_ddb_entry *entry) | |
3421 | { | |
3422 | if (entry->end) | |
3423 | I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); | |
3424 | else | |
3425 | I915_WRITE(reg, 0); | |
3426 | } | |
3427 | ||
3428 | static void skl_write_wm_values(struct drm_i915_private *dev_priv, | |
3429 | const struct skl_wm_values *new) | |
3430 | { | |
3431 | struct drm_device *dev = dev_priv->dev; | |
3432 | struct intel_crtc *crtc; | |
3433 | ||
3434 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | |
3435 | int i, level, max_level = ilk_wm_max_level(dev); | |
3436 | enum pipe pipe = crtc->pipe; | |
3437 | ||
3438 | if (!new->dirty[pipe]) | |
3439 | continue; | |
3440 | ||
3441 | I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); | |
3442 | ||
3443 | for (level = 0; level <= max_level; level++) { | |
3444 | for (i = 0; i < intel_num_planes(crtc); i++) | |
3445 | I915_WRITE(PLANE_WM(pipe, i, level), | |
3446 | new->plane[pipe][i][level]); | |
3447 | I915_WRITE(CUR_WM(pipe, level), | |
3448 | new->cursor[pipe][level]); | |
3449 | } | |
3450 | for (i = 0; i < intel_num_planes(crtc); i++) | |
3451 | I915_WRITE(PLANE_WM_TRANS(pipe, i), | |
3452 | new->plane_trans[pipe][i]); | |
3453 | I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]); | |
3454 | ||
3455 | for (i = 0; i < intel_num_planes(crtc); i++) { | |
3456 | skl_ddb_entry_write(dev_priv, | |
3457 | PLANE_BUF_CFG(pipe, i), | |
3458 | &new->ddb.plane[pipe][i]); | |
3459 | skl_ddb_entry_write(dev_priv, | |
3460 | PLANE_NV12_BUF_CFG(pipe, i), | |
3461 | &new->ddb.y_plane[pipe][i]); | |
3462 | } | |
3463 | ||
3464 | skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), | |
3465 | &new->ddb.cursor[pipe]); | |
3466 | } | |
3467 | } | |
3468 | ||
3469 | /* | |
3470 | * When setting up a new DDB allocation arrangement, we need to correctly | |
3471 | * sequence the times at which the new allocations for the pipes are taken into | |
3472 | * account or we'll have pipes fetching from space previously allocated to | |
3473 | * another pipe. | |
3474 | * | |
3475 | * Roughly the sequence looks like: | |
3476 | * 1. re-allocate the pipe(s) with the allocation being reduced and not | |
3477 | * overlapping with a previous light-up pipe (another way to put it is: | |
3478 | * pipes with their new allocation strickly included into their old ones). | |
3479 | * 2. re-allocate the other pipes that get their allocation reduced | |
3480 | * 3. allocate the pipes having their allocation increased | |
3481 | * | |
3482 | * Steps 1. and 2. are here to take care of the following case: | |
3483 | * - Initially DDB looks like this: | |
3484 | * | B | C | | |
3485 | * - enable pipe A. | |
3486 | * - pipe B has a reduced DDB allocation that overlaps with the old pipe C | |
3487 | * allocation | |
3488 | * | A | B | C | | |
3489 | * | |
3490 | * We need to sequence the re-allocation: C, B, A (and not B, C, A). | |
3491 | */ | |
3492 | ||
3493 | static void | |
3494 | skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass) | |
3495 | { | |
3496 | int plane; | |
3497 | ||
3498 | DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass); | |
3499 | ||
3500 | for_each_plane(dev_priv, pipe, plane) { | |
3501 | I915_WRITE(PLANE_SURF(pipe, plane), | |
3502 | I915_READ(PLANE_SURF(pipe, plane))); | |
3503 | } | |
3504 | I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); | |
3505 | } | |
3506 | ||
3507 | static bool | |
3508 | skl_ddb_allocation_included(const struct skl_ddb_allocation *old, | |
3509 | const struct skl_ddb_allocation *new, | |
3510 | enum pipe pipe) | |
3511 | { | |
3512 | uint16_t old_size, new_size; | |
3513 | ||
3514 | old_size = skl_ddb_entry_size(&old->pipe[pipe]); | |
3515 | new_size = skl_ddb_entry_size(&new->pipe[pipe]); | |
3516 | ||
3517 | return old_size != new_size && | |
3518 | new->pipe[pipe].start >= old->pipe[pipe].start && | |
3519 | new->pipe[pipe].end <= old->pipe[pipe].end; | |
3520 | } | |
3521 | ||
3522 | static void skl_flush_wm_values(struct drm_i915_private *dev_priv, | |
3523 | struct skl_wm_values *new_values) | |
3524 | { | |
3525 | struct drm_device *dev = dev_priv->dev; | |
3526 | struct skl_ddb_allocation *cur_ddb, *new_ddb; | |
3527 | bool reallocated[I915_MAX_PIPES] = {}; | |
3528 | struct intel_crtc *crtc; | |
3529 | enum pipe pipe; | |
3530 | ||
3531 | new_ddb = &new_values->ddb; | |
3532 | cur_ddb = &dev_priv->wm.skl_hw.ddb; | |
3533 | ||
3534 | /* | |
3535 | * First pass: flush the pipes with the new allocation contained into | |
3536 | * the old space. | |
3537 | * | |
3538 | * We'll wait for the vblank on those pipes to ensure we can safely | |
3539 | * re-allocate the freed space without this pipe fetching from it. | |
3540 | */ | |
3541 | for_each_intel_crtc(dev, crtc) { | |
3542 | if (!crtc->active) | |
3543 | continue; | |
3544 | ||
3545 | pipe = crtc->pipe; | |
3546 | ||
3547 | if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe)) | |
3548 | continue; | |
3549 | ||
3550 | skl_wm_flush_pipe(dev_priv, pipe, 1); | |
3551 | intel_wait_for_vblank(dev, pipe); | |
3552 | ||
3553 | reallocated[pipe] = true; | |
3554 | } | |
3555 | ||
3556 | ||
3557 | /* | |
3558 | * Second pass: flush the pipes that are having their allocation | |
3559 | * reduced, but overlapping with a previous allocation. | |
3560 | * | |
3561 | * Here as well we need to wait for the vblank to make sure the freed | |
3562 | * space is not used anymore. | |
3563 | */ | |
3564 | for_each_intel_crtc(dev, crtc) { | |
3565 | if (!crtc->active) | |
3566 | continue; | |
3567 | ||
3568 | pipe = crtc->pipe; | |
3569 | ||
3570 | if (reallocated[pipe]) | |
3571 | continue; | |
3572 | ||
3573 | if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) < | |
3574 | skl_ddb_entry_size(&cur_ddb->pipe[pipe])) { | |
3575 | skl_wm_flush_pipe(dev_priv, pipe, 2); | |
3576 | intel_wait_for_vblank(dev, pipe); | |
3577 | reallocated[pipe] = true; | |
3578 | } | |
3579 | } | |
3580 | ||
3581 | /* | |
3582 | * Third pass: flush the pipes that got more space allocated. | |
3583 | * | |
3584 | * We don't need to actively wait for the update here, next vblank | |
3585 | * will just get more DDB space with the correct WM values. | |
3586 | */ | |
3587 | for_each_intel_crtc(dev, crtc) { | |
3588 | if (!crtc->active) | |
3589 | continue; | |
3590 | ||
3591 | pipe = crtc->pipe; | |
3592 | ||
3593 | /* | |
3594 | * At this point, only the pipes more space than before are | |
3595 | * left to re-allocate. | |
3596 | */ | |
3597 | if (reallocated[pipe]) | |
3598 | continue; | |
3599 | ||
3600 | skl_wm_flush_pipe(dev_priv, pipe, 3); | |
3601 | } | |
3602 | } | |
3603 | ||
3604 | static bool skl_update_pipe_wm(struct drm_crtc *crtc, | |
3605 | struct skl_pipe_wm_parameters *params, | |
3606 | struct intel_wm_config *config, | |
3607 | struct skl_ddb_allocation *ddb, /* out */ | |
3608 | struct skl_pipe_wm *pipe_wm /* out */) | |
3609 | { | |
3610 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3611 | ||
3612 | skl_compute_wm_pipe_parameters(crtc, params); | |
3613 | skl_allocate_pipe_ddb(crtc, config, params, ddb); | |
3614 | skl_compute_pipe_wm(crtc, ddb, params, pipe_wm); | |
3615 | ||
3616 | if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm))) | |
3617 | return false; | |
3618 | ||
3619 | intel_crtc->wm.skl_active = *pipe_wm; | |
3620 | ||
3621 | return true; | |
3622 | } | |
3623 | ||
3624 | static void skl_update_other_pipe_wm(struct drm_device *dev, | |
3625 | struct drm_crtc *crtc, | |
3626 | struct intel_wm_config *config, | |
3627 | struct skl_wm_values *r) | |
3628 | { | |
3629 | struct intel_crtc *intel_crtc; | |
3630 | struct intel_crtc *this_crtc = to_intel_crtc(crtc); | |
3631 | ||
3632 | /* | |
3633 | * If the WM update hasn't changed the allocation for this_crtc (the | |
3634 | * crtc we are currently computing the new WM values for), other | |
3635 | * enabled crtcs will keep the same allocation and we don't need to | |
3636 | * recompute anything for them. | |
3637 | */ | |
3638 | if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) | |
3639 | return; | |
3640 | ||
3641 | /* | |
3642 | * Otherwise, because of this_crtc being freshly enabled/disabled, the | |
3643 | * other active pipes need new DDB allocation and WM values. | |
3644 | */ | |
3645 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | |
3646 | base.head) { | |
3647 | struct skl_pipe_wm_parameters params = {}; | |
3648 | struct skl_pipe_wm pipe_wm = {}; | |
3649 | bool wm_changed; | |
3650 | ||
3651 | if (this_crtc->pipe == intel_crtc->pipe) | |
3652 | continue; | |
3653 | ||
3654 | if (!intel_crtc->active) | |
3655 | continue; | |
3656 | ||
3657 | wm_changed = skl_update_pipe_wm(&intel_crtc->base, | |
3658 | ¶ms, config, | |
3659 | &r->ddb, &pipe_wm); | |
3660 | ||
3661 | /* | |
3662 | * If we end up re-computing the other pipe WM values, it's | |
3663 | * because it was really needed, so we expect the WM values to | |
3664 | * be different. | |
3665 | */ | |
3666 | WARN_ON(!wm_changed); | |
3667 | ||
3668 | skl_compute_wm_results(dev, ¶ms, &pipe_wm, r, intel_crtc); | |
3669 | r->dirty[intel_crtc->pipe] = true; | |
3670 | } | |
3671 | } | |
3672 | ||
3673 | static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe) | |
3674 | { | |
3675 | watermarks->wm_linetime[pipe] = 0; | |
3676 | memset(watermarks->plane[pipe], 0, | |
3677 | sizeof(uint32_t) * 8 * I915_MAX_PLANES); | |
3678 | memset(watermarks->cursor[pipe], 0, sizeof(uint32_t) * 8); | |
3679 | memset(watermarks->plane_trans[pipe], | |
3680 | 0, sizeof(uint32_t) * I915_MAX_PLANES); | |
3681 | watermarks->cursor_trans[pipe] = 0; | |
3682 | ||
3683 | /* Clear ddb entries for pipe */ | |
3684 | memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); | |
3685 | memset(&watermarks->ddb.plane[pipe], 0, | |
3686 | sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); | |
3687 | memset(&watermarks->ddb.y_plane[pipe], 0, | |
3688 | sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); | |
3689 | memset(&watermarks->ddb.cursor[pipe], 0, sizeof(struct skl_ddb_entry)); | |
3690 | ||
3691 | } | |
3692 | ||
3693 | static void skl_update_wm(struct drm_crtc *crtc) | |
3694 | { | |
3695 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3696 | struct drm_device *dev = crtc->dev; | |
3697 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3698 | struct skl_pipe_wm_parameters params = {}; | |
3699 | struct skl_wm_values *results = &dev_priv->wm.skl_results; | |
3700 | struct skl_pipe_wm pipe_wm = {}; | |
3701 | struct intel_wm_config config = {}; | |
3702 | ||
3703 | ||
3704 | /* Clear all dirty flags */ | |
3705 | memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES); | |
3706 | ||
3707 | skl_clear_wm(results, intel_crtc->pipe); | |
3708 | ||
3709 | skl_compute_wm_global_parameters(dev, &config); | |
3710 | ||
3711 | if (!skl_update_pipe_wm(crtc, ¶ms, &config, | |
3712 | &results->ddb, &pipe_wm)) | |
3713 | return; | |
3714 | ||
3715 | skl_compute_wm_results(dev, ¶ms, &pipe_wm, results, intel_crtc); | |
3716 | results->dirty[intel_crtc->pipe] = true; | |
3717 | ||
3718 | skl_update_other_pipe_wm(dev, crtc, &config, results); | |
3719 | skl_write_wm_values(dev_priv, results); | |
3720 | skl_flush_wm_values(dev_priv, results); | |
3721 | ||
3722 | /* store the new configuration */ | |
3723 | dev_priv->wm.skl_hw = *results; | |
3724 | } | |
3725 | ||
3726 | static void | |
3727 | skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc, | |
3728 | uint32_t sprite_width, uint32_t sprite_height, | |
3729 | int pixel_size, bool enabled, bool scaled) | |
3730 | { | |
3731 | struct intel_plane *intel_plane = to_intel_plane(plane); | |
3732 | struct drm_framebuffer *fb = plane->state->fb; | |
3733 | ||
3734 | intel_plane->wm.enabled = enabled; | |
3735 | intel_plane->wm.scaled = scaled; | |
3736 | intel_plane->wm.horiz_pixels = sprite_width; | |
3737 | intel_plane->wm.vert_pixels = sprite_height; | |
3738 | intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE; | |
3739 | ||
3740 | /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */ | |
3741 | intel_plane->wm.bytes_per_pixel = | |
3742 | (fb && fb->pixel_format == DRM_FORMAT_NV12) ? | |
3743 | drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size; | |
3744 | intel_plane->wm.y_bytes_per_pixel = | |
3745 | (fb && fb->pixel_format == DRM_FORMAT_NV12) ? | |
3746 | drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0; | |
3747 | ||
3748 | /* | |
3749 | * Framebuffer can be NULL on plane disable, but it does not | |
3750 | * matter for watermarks if we assume no tiling in that case. | |
3751 | */ | |
3752 | if (fb) | |
3753 | intel_plane->wm.tiling = fb->modifier[0]; | |
3754 | intel_plane->wm.rotation = plane->state->rotation; | |
3755 | ||
3756 | skl_update_wm(crtc); | |
3757 | } | |
3758 | ||
3759 | static void ilk_update_wm(struct drm_crtc *crtc) | |
3760 | { | |
3761 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3762 | struct drm_device *dev = crtc->dev; | |
3763 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3764 | struct ilk_wm_maximums max; | |
3765 | struct ilk_pipe_wm_parameters params = {}; | |
3766 | struct ilk_wm_values results = {}; | |
3767 | enum intel_ddb_partitioning partitioning; | |
3768 | struct intel_pipe_wm pipe_wm = {}; | |
3769 | struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; | |
3770 | struct intel_wm_config config = {}; | |
3771 | ||
3772 | ilk_compute_wm_parameters(crtc, ¶ms); | |
3773 | ||
3774 | intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); | |
3775 | ||
3776 | if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) | |
3777 | return; | |
3778 | ||
3779 | intel_crtc->wm.active = pipe_wm; | |
3780 | ||
3781 | ilk_compute_wm_config(dev, &config); | |
3782 | ||
3783 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); | |
3784 | ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); | |
3785 | ||
3786 | /* 5/6 split only in single pipe config on IVB+ */ | |
3787 | if (INTEL_INFO(dev)->gen >= 7 && | |
3788 | config.num_pipes_active == 1 && config.sprites_enabled) { | |
3789 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); | |
3790 | ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); | |
3791 | ||
3792 | best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); | |
3793 | } else { | |
3794 | best_lp_wm = &lp_wm_1_2; | |
3795 | } | |
3796 | ||
3797 | partitioning = (best_lp_wm == &lp_wm_1_2) ? | |
3798 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; | |
3799 | ||
3800 | ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); | |
3801 | ||
3802 | ilk_write_wm_values(dev_priv, &results); | |
3803 | } | |
3804 | ||
3805 | static void | |
3806 | ilk_update_sprite_wm(struct drm_plane *plane, | |
3807 | struct drm_crtc *crtc, | |
3808 | uint32_t sprite_width, uint32_t sprite_height, | |
3809 | int pixel_size, bool enabled, bool scaled) | |
3810 | { | |
3811 | struct drm_device *dev = plane->dev; | |
3812 | struct intel_plane *intel_plane = to_intel_plane(plane); | |
3813 | ||
3814 | intel_plane->wm.enabled = enabled; | |
3815 | intel_plane->wm.scaled = scaled; | |
3816 | intel_plane->wm.horiz_pixels = sprite_width; | |
3817 | intel_plane->wm.vert_pixels = sprite_width; | |
3818 | intel_plane->wm.bytes_per_pixel = pixel_size; | |
3819 | ||
3820 | /* | |
3821 | * IVB workaround: must disable low power watermarks for at least | |
3822 | * one frame before enabling scaling. LP watermarks can be re-enabled | |
3823 | * when scaling is disabled. | |
3824 | * | |
3825 | * WaCxSRDisabledForSpriteScaling:ivb | |
3826 | */ | |
3827 | if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) | |
3828 | intel_wait_for_vblank(dev, intel_plane->pipe); | |
3829 | ||
3830 | ilk_update_wm(crtc); | |
3831 | } | |
3832 | ||
3833 | static void skl_pipe_wm_active_state(uint32_t val, | |
3834 | struct skl_pipe_wm *active, | |
3835 | bool is_transwm, | |
3836 | bool is_cursor, | |
3837 | int i, | |
3838 | int level) | |
3839 | { | |
3840 | bool is_enabled = (val & PLANE_WM_EN) != 0; | |
3841 | ||
3842 | if (!is_transwm) { | |
3843 | if (!is_cursor) { | |
3844 | active->wm[level].plane_en[i] = is_enabled; | |
3845 | active->wm[level].plane_res_b[i] = | |
3846 | val & PLANE_WM_BLOCKS_MASK; | |
3847 | active->wm[level].plane_res_l[i] = | |
3848 | (val >> PLANE_WM_LINES_SHIFT) & | |
3849 | PLANE_WM_LINES_MASK; | |
3850 | } else { | |
3851 | active->wm[level].cursor_en = is_enabled; | |
3852 | active->wm[level].cursor_res_b = | |
3853 | val & PLANE_WM_BLOCKS_MASK; | |
3854 | active->wm[level].cursor_res_l = | |
3855 | (val >> PLANE_WM_LINES_SHIFT) & | |
3856 | PLANE_WM_LINES_MASK; | |
3857 | } | |
3858 | } else { | |
3859 | if (!is_cursor) { | |
3860 | active->trans_wm.plane_en[i] = is_enabled; | |
3861 | active->trans_wm.plane_res_b[i] = | |
3862 | val & PLANE_WM_BLOCKS_MASK; | |
3863 | active->trans_wm.plane_res_l[i] = | |
3864 | (val >> PLANE_WM_LINES_SHIFT) & | |
3865 | PLANE_WM_LINES_MASK; | |
3866 | } else { | |
3867 | active->trans_wm.cursor_en = is_enabled; | |
3868 | active->trans_wm.cursor_res_b = | |
3869 | val & PLANE_WM_BLOCKS_MASK; | |
3870 | active->trans_wm.cursor_res_l = | |
3871 | (val >> PLANE_WM_LINES_SHIFT) & | |
3872 | PLANE_WM_LINES_MASK; | |
3873 | } | |
3874 | } | |
3875 | } | |
3876 | ||
3877 | static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |
3878 | { | |
3879 | struct drm_device *dev = crtc->dev; | |
3880 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3881 | struct skl_wm_values *hw = &dev_priv->wm.skl_hw; | |
3882 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3883 | struct skl_pipe_wm *active = &intel_crtc->wm.skl_active; | |
3884 | enum pipe pipe = intel_crtc->pipe; | |
3885 | int level, i, max_level; | |
3886 | uint32_t temp; | |
3887 | ||
3888 | max_level = ilk_wm_max_level(dev); | |
3889 | ||
3890 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | |
3891 | ||
3892 | for (level = 0; level <= max_level; level++) { | |
3893 | for (i = 0; i < intel_num_planes(intel_crtc); i++) | |
3894 | hw->plane[pipe][i][level] = | |
3895 | I915_READ(PLANE_WM(pipe, i, level)); | |
3896 | hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level)); | |
3897 | } | |
3898 | ||
3899 | for (i = 0; i < intel_num_planes(intel_crtc); i++) | |
3900 | hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); | |
3901 | hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe)); | |
3902 | ||
3903 | if (!intel_crtc->active) | |
3904 | return; | |
3905 | ||
3906 | hw->dirty[pipe] = true; | |
3907 | ||
3908 | active->linetime = hw->wm_linetime[pipe]; | |
3909 | ||
3910 | for (level = 0; level <= max_level; level++) { | |
3911 | for (i = 0; i < intel_num_planes(intel_crtc); i++) { | |
3912 | temp = hw->plane[pipe][i][level]; | |
3913 | skl_pipe_wm_active_state(temp, active, false, | |
3914 | false, i, level); | |
3915 | } | |
3916 | temp = hw->cursor[pipe][level]; | |
3917 | skl_pipe_wm_active_state(temp, active, false, true, i, level); | |
3918 | } | |
3919 | ||
3920 | for (i = 0; i < intel_num_planes(intel_crtc); i++) { | |
3921 | temp = hw->plane_trans[pipe][i]; | |
3922 | skl_pipe_wm_active_state(temp, active, true, false, i, 0); | |
3923 | } | |
3924 | ||
3925 | temp = hw->cursor_trans[pipe]; | |
3926 | skl_pipe_wm_active_state(temp, active, true, true, i, 0); | |
3927 | } | |
3928 | ||
3929 | void skl_wm_get_hw_state(struct drm_device *dev) | |
3930 | { | |
3931 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3932 | struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; | |
3933 | struct drm_crtc *crtc; | |
3934 | ||
3935 | skl_ddb_get_hw_state(dev_priv, ddb); | |
3936 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | |
3937 | skl_pipe_wm_get_hw_state(crtc); | |
3938 | } | |
3939 | ||
3940 | static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |
3941 | { | |
3942 | struct drm_device *dev = crtc->dev; | |
3943 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3944 | struct ilk_wm_values *hw = &dev_priv->wm.hw; | |
3945 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3946 | struct intel_pipe_wm *active = &intel_crtc->wm.active; | |
3947 | enum pipe pipe = intel_crtc->pipe; | |
3948 | static const unsigned int wm0_pipe_reg[] = { | |
3949 | [PIPE_A] = WM0_PIPEA_ILK, | |
3950 | [PIPE_B] = WM0_PIPEB_ILK, | |
3951 | [PIPE_C] = WM0_PIPEC_IVB, | |
3952 | }; | |
3953 | ||
3954 | hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); | |
3955 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | |
3956 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | |
3957 | ||
3958 | active->pipe_enabled = intel_crtc->active; | |
3959 | ||
3960 | if (active->pipe_enabled) { | |
3961 | u32 tmp = hw->wm_pipe[pipe]; | |
3962 | ||
3963 | /* | |
3964 | * For active pipes LP0 watermark is marked as | |
3965 | * enabled, and LP1+ watermaks as disabled since | |
3966 | * we can't really reverse compute them in case | |
3967 | * multiple pipes are active. | |
3968 | */ | |
3969 | active->wm[0].enable = true; | |
3970 | active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; | |
3971 | active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; | |
3972 | active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; | |
3973 | active->linetime = hw->wm_linetime[pipe]; | |
3974 | } else { | |
3975 | int level, max_level = ilk_wm_max_level(dev); | |
3976 | ||
3977 | /* | |
3978 | * For inactive pipes, all watermark levels | |
3979 | * should be marked as enabled but zeroed, | |
3980 | * which is what we'd compute them to. | |
3981 | */ | |
3982 | for (level = 0; level <= max_level; level++) | |
3983 | active->wm[level].enable = true; | |
3984 | } | |
3985 | } | |
3986 | ||
3987 | #define _FW_WM(value, plane) \ | |
3988 | (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) | |
3989 | #define _FW_WM_VLV(value, plane) \ | |
3990 | (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) | |
3991 | ||
3992 | static void vlv_read_wm_values(struct drm_i915_private *dev_priv, | |
3993 | struct vlv_wm_values *wm) | |
3994 | { | |
3995 | enum pipe pipe; | |
3996 | uint32_t tmp; | |
3997 | ||
3998 | for_each_pipe(dev_priv, pipe) { | |
3999 | tmp = I915_READ(VLV_DDL(pipe)); | |
4000 | ||
4001 | wm->ddl[pipe].primary = | |
4002 | (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | |
4003 | wm->ddl[pipe].cursor = | |
4004 | (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | |
4005 | wm->ddl[pipe].sprite[0] = | |
4006 | (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | |
4007 | wm->ddl[pipe].sprite[1] = | |
4008 | (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | |
4009 | } | |
4010 | ||
4011 | tmp = I915_READ(DSPFW1); | |
4012 | wm->sr.plane = _FW_WM(tmp, SR); | |
4013 | wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB); | |
4014 | wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB); | |
4015 | wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA); | |
4016 | ||
4017 | tmp = I915_READ(DSPFW2); | |
4018 | wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB); | |
4019 | wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA); | |
4020 | wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA); | |
4021 | ||
4022 | tmp = I915_READ(DSPFW3); | |
4023 | wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); | |
4024 | ||
4025 | if (IS_CHERRYVIEW(dev_priv)) { | |
4026 | tmp = I915_READ(DSPFW7_CHV); | |
4027 | wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); | |
4028 | wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); | |
4029 | ||
4030 | tmp = I915_READ(DSPFW8_CHV); | |
4031 | wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF); | |
4032 | wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE); | |
4033 | ||
4034 | tmp = I915_READ(DSPFW9_CHV); | |
4035 | wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC); | |
4036 | wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC); | |
4037 | ||
4038 | tmp = I915_READ(DSPHOWM); | |
4039 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | |
4040 | wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8; | |
4041 | wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8; | |
4042 | wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8; | |
4043 | wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; | |
4044 | wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | |
4045 | wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; | |
4046 | wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | |
4047 | wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | |
4048 | wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; | |
4049 | } else { | |
4050 | tmp = I915_READ(DSPFW7); | |
4051 | wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); | |
4052 | wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); | |
4053 | ||
4054 | tmp = I915_READ(DSPHOWM); | |
4055 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | |
4056 | wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; | |
4057 | wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | |
4058 | wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; | |
4059 | wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | |
4060 | wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | |
4061 | wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; | |
4062 | } | |
4063 | } | |
4064 | ||
4065 | #undef _FW_WM | |
4066 | #undef _FW_WM_VLV | |
4067 | ||
4068 | void vlv_wm_get_hw_state(struct drm_device *dev) | |
4069 | { | |
4070 | struct drm_i915_private *dev_priv = to_i915(dev); | |
4071 | struct vlv_wm_values *wm = &dev_priv->wm.vlv; | |
4072 | struct intel_plane *plane; | |
4073 | enum pipe pipe; | |
4074 | u32 val; | |
4075 | ||
4076 | vlv_read_wm_values(dev_priv, wm); | |
4077 | ||
4078 | for_each_intel_plane(dev, plane) { | |
4079 | switch (plane->base.type) { | |
4080 | int sprite; | |
4081 | case DRM_PLANE_TYPE_CURSOR: | |
4082 | plane->wm.fifo_size = 63; | |
4083 | break; | |
4084 | case DRM_PLANE_TYPE_PRIMARY: | |
4085 | plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0); | |
4086 | break; | |
4087 | case DRM_PLANE_TYPE_OVERLAY: | |
4088 | sprite = plane->plane; | |
4089 | plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1); | |
4090 | break; | |
4091 | } | |
4092 | } | |
4093 | ||
4094 | wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; | |
4095 | wm->level = VLV_WM_LEVEL_PM2; | |
4096 | ||
4097 | if (IS_CHERRYVIEW(dev_priv)) { | |
4098 | mutex_lock(&dev_priv->rps.hw_lock); | |
4099 | ||
4100 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | |
4101 | if (val & DSP_MAXFIFO_PM5_ENABLE) | |
4102 | wm->level = VLV_WM_LEVEL_PM5; | |
4103 | ||
4104 | /* | |
4105 | * If DDR DVFS is disabled in the BIOS, Punit | |
4106 | * will never ack the request. So if that happens | |
4107 | * assume we don't have to enable/disable DDR DVFS | |
4108 | * dynamically. To test that just set the REQ_ACK | |
4109 | * bit to poke the Punit, but don't change the | |
4110 | * HIGH/LOW bits so that we don't actually change | |
4111 | * the current state. | |
4112 | */ | |
4113 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); | |
4114 | val |= FORCE_DDR_FREQ_REQ_ACK; | |
4115 | vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); | |
4116 | ||
4117 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & | |
4118 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { | |
4119 | DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " | |
4120 | "assuming DDR DVFS is disabled\n"); | |
4121 | dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; | |
4122 | } else { | |
4123 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); | |
4124 | if ((val & FORCE_DDR_HIGH_FREQ) == 0) | |
4125 | wm->level = VLV_WM_LEVEL_DDR_DVFS; | |
4126 | } | |
4127 | ||
4128 | mutex_unlock(&dev_priv->rps.hw_lock); | |
4129 | } | |
4130 | ||
4131 | for_each_pipe(dev_priv, pipe) | |
4132 | DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", | |
4133 | pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor, | |
4134 | wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]); | |
4135 | ||
4136 | DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", | |
4137 | wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); | |
4138 | } | |
4139 | ||
4140 | void ilk_wm_get_hw_state(struct drm_device *dev) | |
4141 | { | |
4142 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4143 | struct ilk_wm_values *hw = &dev_priv->wm.hw; | |
4144 | struct drm_crtc *crtc; | |
4145 | ||
4146 | for_each_crtc(dev, crtc) | |
4147 | ilk_pipe_wm_get_hw_state(crtc); | |
4148 | ||
4149 | hw->wm_lp[0] = I915_READ(WM1_LP_ILK); | |
4150 | hw->wm_lp[1] = I915_READ(WM2_LP_ILK); | |
4151 | hw->wm_lp[2] = I915_READ(WM3_LP_ILK); | |
4152 | ||
4153 | hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); | |
4154 | if (INTEL_INFO(dev)->gen >= 7) { | |
4155 | hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); | |
4156 | hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); | |
4157 | } | |
4158 | ||
4159 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | |
4160 | hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? | |
4161 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | |
4162 | else if (IS_IVYBRIDGE(dev)) | |
4163 | hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? | |
4164 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | |
4165 | ||
4166 | hw->enable_fbc_wm = | |
4167 | !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); | |
4168 | } | |
4169 | ||
4170 | /** | |
4171 | * intel_update_watermarks - update FIFO watermark values based on current modes | |
4172 | * | |
4173 | * Calculate watermark values for the various WM regs based on current mode | |
4174 | * and plane configuration. | |
4175 | * | |
4176 | * There are several cases to deal with here: | |
4177 | * - normal (i.e. non-self-refresh) | |
4178 | * - self-refresh (SR) mode | |
4179 | * - lines are large relative to FIFO size (buffer can hold up to 2) | |
4180 | * - lines are small relative to FIFO size (buffer can hold more than 2 | |
4181 | * lines), so need to account for TLB latency | |
4182 | * | |
4183 | * The normal calculation is: | |
4184 | * watermark = dotclock * bytes per pixel * latency | |
4185 | * where latency is platform & configuration dependent (we assume pessimal | |
4186 | * values here). | |
4187 | * | |
4188 | * The SR calculation is: | |
4189 | * watermark = (trunc(latency/line time)+1) * surface width * | |
4190 | * bytes per pixel | |
4191 | * where | |
4192 | * line time = htotal / dotclock | |
4193 | * surface width = hdisplay for normal plane and 64 for cursor | |
4194 | * and latency is assumed to be high, as above. | |
4195 | * | |
4196 | * The final value programmed to the register should always be rounded up, | |
4197 | * and include an extra 2 entries to account for clock crossings. | |
4198 | * | |
4199 | * We don't use the sprite, so we can ignore that. And on Crestline we have | |
4200 | * to set the non-SR watermarks to 8. | |
4201 | */ | |
4202 | void intel_update_watermarks(struct drm_crtc *crtc) | |
4203 | { | |
4204 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | |
4205 | ||
4206 | if (dev_priv->display.update_wm) | |
4207 | dev_priv->display.update_wm(crtc); | |
4208 | } | |
4209 | ||
4210 | void intel_update_sprite_watermarks(struct drm_plane *plane, | |
4211 | struct drm_crtc *crtc, | |
4212 | uint32_t sprite_width, | |
4213 | uint32_t sprite_height, | |
4214 | int pixel_size, | |
4215 | bool enabled, bool scaled) | |
4216 | { | |
4217 | struct drm_i915_private *dev_priv = plane->dev->dev_private; | |
4218 | ||
4219 | if (dev_priv->display.update_sprite_wm) | |
4220 | dev_priv->display.update_sprite_wm(plane, crtc, | |
4221 | sprite_width, sprite_height, | |
4222 | pixel_size, enabled, scaled); | |
4223 | } | |
4224 | ||
4225 | /** | |
4226 | * Lock protecting IPS related data structures | |
4227 | */ | |
4228 | DEFINE_SPINLOCK(mchdev_lock); | |
4229 | ||
4230 | /* Global for IPS driver to get at the current i915 device. Protected by | |
4231 | * mchdev_lock. */ | |
4232 | static struct drm_i915_private *i915_mch_dev; | |
4233 | ||
4234 | bool ironlake_set_drps(struct drm_device *dev, u8 val) | |
4235 | { | |
4236 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4237 | u16 rgvswctl; | |
4238 | ||
4239 | assert_spin_locked(&mchdev_lock); | |
4240 | ||
4241 | rgvswctl = I915_READ16(MEMSWCTL); | |
4242 | if (rgvswctl & MEMCTL_CMD_STS) { | |
4243 | DRM_DEBUG("gpu busy, RCS change rejected\n"); | |
4244 | return false; /* still busy with another command */ | |
4245 | } | |
4246 | ||
4247 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | |
4248 | (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | |
4249 | I915_WRITE16(MEMSWCTL, rgvswctl); | |
4250 | POSTING_READ16(MEMSWCTL); | |
4251 | ||
4252 | rgvswctl |= MEMCTL_CMD_STS; | |
4253 | I915_WRITE16(MEMSWCTL, rgvswctl); | |
4254 | ||
4255 | return true; | |
4256 | } | |
4257 | ||
4258 | static void ironlake_enable_drps(struct drm_device *dev) | |
4259 | { | |
4260 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4261 | u32 rgvmodectl = I915_READ(MEMMODECTL); | |
4262 | u8 fmax, fmin, fstart, vstart; | |
4263 | ||
4264 | spin_lock_irq(&mchdev_lock); | |
4265 | ||
4266 | /* Enable temp reporting */ | |
4267 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); | |
4268 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); | |
4269 | ||
4270 | /* 100ms RC evaluation intervals */ | |
4271 | I915_WRITE(RCUPEI, 100000); | |
4272 | I915_WRITE(RCDNEI, 100000); | |
4273 | ||
4274 | /* Set max/min thresholds to 90ms and 80ms respectively */ | |
4275 | I915_WRITE(RCBMAXAVG, 90000); | |
4276 | I915_WRITE(RCBMINAVG, 80000); | |
4277 | ||
4278 | I915_WRITE(MEMIHYST, 1); | |
4279 | ||
4280 | /* Set up min, max, and cur for interrupt handling */ | |
4281 | fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; | |
4282 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | |
4283 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | |
4284 | MEMMODE_FSTART_SHIFT; | |
4285 | ||
4286 | vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >> | |
4287 | PXVFREQ_PX_SHIFT; | |
4288 | ||
4289 | dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ | |
4290 | dev_priv->ips.fstart = fstart; | |
4291 | ||
4292 | dev_priv->ips.max_delay = fstart; | |
4293 | dev_priv->ips.min_delay = fmin; | |
4294 | dev_priv->ips.cur_delay = fstart; | |
4295 | ||
4296 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", | |
4297 | fmax, fmin, fstart); | |
4298 | ||
4299 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | |
4300 | ||
4301 | /* | |
4302 | * Interrupts will be enabled in ironlake_irq_postinstall | |
4303 | */ | |
4304 | ||
4305 | I915_WRITE(VIDSTART, vstart); | |
4306 | POSTING_READ(VIDSTART); | |
4307 | ||
4308 | rgvmodectl |= MEMMODE_SWMODE_EN; | |
4309 | I915_WRITE(MEMMODECTL, rgvmodectl); | |
4310 | ||
4311 | if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) | |
4312 | DRM_ERROR("stuck trying to change perf mode\n"); | |
4313 | mdelay(1); | |
4314 | ||
4315 | ironlake_set_drps(dev, fstart); | |
4316 | ||
4317 | dev_priv->ips.last_count1 = I915_READ(DMIEC) + | |
4318 | I915_READ(DDREC) + I915_READ(CSIEC); | |
4319 | dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); | |
4320 | dev_priv->ips.last_count2 = I915_READ(GFXEC); | |
4321 | dev_priv->ips.last_time2 = ktime_get_raw_ns(); | |
4322 | ||
4323 | spin_unlock_irq(&mchdev_lock); | |
4324 | } | |
4325 | ||
4326 | static void ironlake_disable_drps(struct drm_device *dev) | |
4327 | { | |
4328 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4329 | u16 rgvswctl; | |
4330 | ||
4331 | spin_lock_irq(&mchdev_lock); | |
4332 | ||
4333 | rgvswctl = I915_READ16(MEMSWCTL); | |
4334 | ||
4335 | /* Ack interrupts, disable EFC interrupt */ | |
4336 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); | |
4337 | I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); | |
4338 | I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); | |
4339 | I915_WRITE(DEIIR, DE_PCU_EVENT); | |
4340 | I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); | |
4341 | ||
4342 | /* Go back to the starting frequency */ | |
4343 | ironlake_set_drps(dev, dev_priv->ips.fstart); | |
4344 | mdelay(1); | |
4345 | rgvswctl |= MEMCTL_CMD_STS; | |
4346 | I915_WRITE(MEMSWCTL, rgvswctl); | |
4347 | mdelay(1); | |
4348 | ||
4349 | spin_unlock_irq(&mchdev_lock); | |
4350 | } | |
4351 | ||
4352 | /* There's a funny hw issue where the hw returns all 0 when reading from | |
4353 | * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value | |
4354 | * ourselves, instead of doing a rmw cycle (which might result in us clearing | |
4355 | * all limits and the gpu stuck at whatever frequency it is at atm). | |
4356 | */ | |
4357 | static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) | |
4358 | { | |
4359 | u32 limits; | |
4360 | ||
4361 | /* Only set the down limit when we've reached the lowest level to avoid | |
4362 | * getting more interrupts, otherwise leave this clear. This prevents a | |
4363 | * race in the hw when coming out of rc6: There's a tiny window where | |
4364 | * the hw runs at the minimal clock before selecting the desired | |
4365 | * frequency, if the down threshold expires in that window we will not | |
4366 | * receive a down interrupt. */ | |
4367 | if (IS_GEN9(dev_priv->dev)) { | |
4368 | limits = (dev_priv->rps.max_freq_softlimit) << 23; | |
4369 | if (val <= dev_priv->rps.min_freq_softlimit) | |
4370 | limits |= (dev_priv->rps.min_freq_softlimit) << 14; | |
4371 | } else { | |
4372 | limits = dev_priv->rps.max_freq_softlimit << 24; | |
4373 | if (val <= dev_priv->rps.min_freq_softlimit) | |
4374 | limits |= dev_priv->rps.min_freq_softlimit << 16; | |
4375 | } | |
4376 | ||
4377 | return limits; | |
4378 | } | |
4379 | ||
4380 | static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |
4381 | { | |
4382 | int new_power; | |
4383 | u32 threshold_up = 0, threshold_down = 0; /* in % */ | |
4384 | u32 ei_up = 0, ei_down = 0; | |
4385 | ||
4386 | new_power = dev_priv->rps.power; | |
4387 | switch (dev_priv->rps.power) { | |
4388 | case LOW_POWER: | |
4389 | if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) | |
4390 | new_power = BETWEEN; | |
4391 | break; | |
4392 | ||
4393 | case BETWEEN: | |
4394 | if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) | |
4395 | new_power = LOW_POWER; | |
4396 | else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) | |
4397 | new_power = HIGH_POWER; | |
4398 | break; | |
4399 | ||
4400 | case HIGH_POWER: | |
4401 | if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) | |
4402 | new_power = BETWEEN; | |
4403 | break; | |
4404 | } | |
4405 | /* Max/min bins are special */ | |
4406 | if (val <= dev_priv->rps.min_freq_softlimit) | |
4407 | new_power = LOW_POWER; | |
4408 | if (val >= dev_priv->rps.max_freq_softlimit) | |
4409 | new_power = HIGH_POWER; | |
4410 | if (new_power == dev_priv->rps.power) | |
4411 | return; | |
4412 | ||
4413 | /* Note the units here are not exactly 1us, but 1280ns. */ | |
4414 | switch (new_power) { | |
4415 | case LOW_POWER: | |
4416 | /* Upclock if more than 95% busy over 16ms */ | |
4417 | ei_up = 16000; | |
4418 | threshold_up = 95; | |
4419 | ||
4420 | /* Downclock if less than 85% busy over 32ms */ | |
4421 | ei_down = 32000; | |
4422 | threshold_down = 85; | |
4423 | break; | |
4424 | ||
4425 | case BETWEEN: | |
4426 | /* Upclock if more than 90% busy over 13ms */ | |
4427 | ei_up = 13000; | |
4428 | threshold_up = 90; | |
4429 | ||
4430 | /* Downclock if less than 75% busy over 32ms */ | |
4431 | ei_down = 32000; | |
4432 | threshold_down = 75; | |
4433 | break; | |
4434 | ||
4435 | case HIGH_POWER: | |
4436 | /* Upclock if more than 85% busy over 10ms */ | |
4437 | ei_up = 10000; | |
4438 | threshold_up = 85; | |
4439 | ||
4440 | /* Downclock if less than 60% busy over 32ms */ | |
4441 | ei_down = 32000; | |
4442 | threshold_down = 60; | |
4443 | break; | |
4444 | } | |
4445 | ||
4446 | I915_WRITE(GEN6_RP_UP_EI, | |
4447 | GT_INTERVAL_FROM_US(dev_priv, ei_up)); | |
4448 | I915_WRITE(GEN6_RP_UP_THRESHOLD, | |
4449 | GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); | |
4450 | ||
4451 | I915_WRITE(GEN6_RP_DOWN_EI, | |
4452 | GT_INTERVAL_FROM_US(dev_priv, ei_down)); | |
4453 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, | |
4454 | GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); | |
4455 | ||
4456 | I915_WRITE(GEN6_RP_CONTROL, | |
4457 | GEN6_RP_MEDIA_TURBO | | |
4458 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | |
4459 | GEN6_RP_MEDIA_IS_GFX | | |
4460 | GEN6_RP_ENABLE | | |
4461 | GEN6_RP_UP_BUSY_AVG | | |
4462 | GEN6_RP_DOWN_IDLE_AVG); | |
4463 | ||
4464 | dev_priv->rps.power = new_power; | |
4465 | dev_priv->rps.up_threshold = threshold_up; | |
4466 | dev_priv->rps.down_threshold = threshold_down; | |
4467 | dev_priv->rps.last_adj = 0; | |
4468 | } | |
4469 | ||
4470 | static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) | |
4471 | { | |
4472 | u32 mask = 0; | |
4473 | ||
4474 | if (val > dev_priv->rps.min_freq_softlimit) | |
4475 | mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; | |
4476 | if (val < dev_priv->rps.max_freq_softlimit) | |
4477 | mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; | |
4478 | ||
4479 | mask &= dev_priv->pm_rps_events; | |
4480 | ||
4481 | return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); | |
4482 | } | |
4483 | ||
4484 | /* gen6_set_rps is called to update the frequency request, but should also be | |
4485 | * called when the range (min_delay and max_delay) is modified so that we can | |
4486 | * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ | |
4487 | static void gen6_set_rps(struct drm_device *dev, u8 val) | |
4488 | { | |
4489 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4490 | ||
4491 | /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ | |
4492 | if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) | |
4493 | return; | |
4494 | ||
4495 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
4496 | WARN_ON(val > dev_priv->rps.max_freq); | |
4497 | WARN_ON(val < dev_priv->rps.min_freq); | |
4498 | ||
4499 | /* min/max delay may still have been modified so be sure to | |
4500 | * write the limits value. | |
4501 | */ | |
4502 | if (val != dev_priv->rps.cur_freq) { | |
4503 | gen6_set_rps_thresholds(dev_priv, val); | |
4504 | ||
4505 | if (IS_GEN9(dev)) | |
4506 | I915_WRITE(GEN6_RPNSWREQ, | |
4507 | GEN9_FREQUENCY(val)); | |
4508 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | |
4509 | I915_WRITE(GEN6_RPNSWREQ, | |
4510 | HSW_FREQUENCY(val)); | |
4511 | else | |
4512 | I915_WRITE(GEN6_RPNSWREQ, | |
4513 | GEN6_FREQUENCY(val) | | |
4514 | GEN6_OFFSET(0) | | |
4515 | GEN6_AGGRESSIVE_TURBO); | |
4516 | } | |
4517 | ||
4518 | /* Make sure we continue to get interrupts | |
4519 | * until we hit the minimum or maximum frequencies. | |
4520 | */ | |
4521 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); | |
4522 | I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); | |
4523 | ||
4524 | POSTING_READ(GEN6_RPNSWREQ); | |
4525 | ||
4526 | dev_priv->rps.cur_freq = val; | |
4527 | trace_intel_gpu_freq_change(val * 50); | |
4528 | } | |
4529 | ||
4530 | static void valleyview_set_rps(struct drm_device *dev, u8 val) | |
4531 | { | |
4532 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4533 | ||
4534 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
4535 | WARN_ON(val > dev_priv->rps.max_freq); | |
4536 | WARN_ON(val < dev_priv->rps.min_freq); | |
4537 | ||
4538 | if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), | |
4539 | "Odd GPU freq value\n")) | |
4540 | val &= ~1; | |
4541 | ||
4542 | I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); | |
4543 | ||
4544 | if (val != dev_priv->rps.cur_freq) { | |
4545 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); | |
4546 | if (!IS_CHERRYVIEW(dev_priv)) | |
4547 | gen6_set_rps_thresholds(dev_priv, val); | |
4548 | } | |
4549 | ||
4550 | dev_priv->rps.cur_freq = val; | |
4551 | trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); | |
4552 | } | |
4553 | ||
4554 | /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down | |
4555 | * | |
4556 | * * If Gfx is Idle, then | |
4557 | * 1. Forcewake Media well. | |
4558 | * 2. Request idle freq. | |
4559 | * 3. Release Forcewake of Media well. | |
4560 | */ | |
4561 | static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |
4562 | { | |
4563 | u32 val = dev_priv->rps.idle_freq; | |
4564 | ||
4565 | if (dev_priv->rps.cur_freq <= val) | |
4566 | return; | |
4567 | ||
4568 | /* Wake up the media well, as that takes a lot less | |
4569 | * power than the Render well. */ | |
4570 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); | |
4571 | valleyview_set_rps(dev_priv->dev, val); | |
4572 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); | |
4573 | } | |
4574 | ||
4575 | void gen6_rps_busy(struct drm_i915_private *dev_priv) | |
4576 | { | |
4577 | mutex_lock(&dev_priv->rps.hw_lock); | |
4578 | if (dev_priv->rps.enabled) { | |
4579 | if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) | |
4580 | gen6_rps_reset_ei(dev_priv); | |
4581 | I915_WRITE(GEN6_PMINTRMSK, | |
4582 | gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); | |
4583 | } | |
4584 | mutex_unlock(&dev_priv->rps.hw_lock); | |
4585 | } | |
4586 | ||
4587 | void gen6_rps_idle(struct drm_i915_private *dev_priv) | |
4588 | { | |
4589 | struct drm_device *dev = dev_priv->dev; | |
4590 | ||
4591 | mutex_lock(&dev_priv->rps.hw_lock); | |
4592 | if (dev_priv->rps.enabled) { | |
4593 | if (IS_VALLEYVIEW(dev)) | |
4594 | vlv_set_rps_idle(dev_priv); | |
4595 | else | |
4596 | gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); | |
4597 | dev_priv->rps.last_adj = 0; | |
4598 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | |
4599 | } | |
4600 | mutex_unlock(&dev_priv->rps.hw_lock); | |
4601 | ||
4602 | spin_lock(&dev_priv->rps.client_lock); | |
4603 | while (!list_empty(&dev_priv->rps.clients)) | |
4604 | list_del_init(dev_priv->rps.clients.next); | |
4605 | spin_unlock(&dev_priv->rps.client_lock); | |
4606 | } | |
4607 | ||
4608 | void gen6_rps_boost(struct drm_i915_private *dev_priv, | |
4609 | struct intel_rps_client *rps, | |
4610 | unsigned long submitted) | |
4611 | { | |
4612 | /* This is intentionally racy! We peek at the state here, then | |
4613 | * validate inside the RPS worker. | |
4614 | */ | |
4615 | if (!(dev_priv->mm.busy && | |
4616 | dev_priv->rps.enabled && | |
4617 | dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) | |
4618 | return; | |
4619 | ||
4620 | /* Force a RPS boost (and don't count it against the client) if | |
4621 | * the GPU is severely congested. | |
4622 | */ | |
4623 | if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES)) | |
4624 | rps = NULL; | |
4625 | ||
4626 | spin_lock(&dev_priv->rps.client_lock); | |
4627 | if (rps == NULL || list_empty(&rps->link)) { | |
4628 | spin_lock_irq(&dev_priv->irq_lock); | |
4629 | if (dev_priv->rps.interrupts_enabled) { | |
4630 | dev_priv->rps.client_boost = true; | |
4631 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
4632 | } | |
4633 | spin_unlock_irq(&dev_priv->irq_lock); | |
4634 | ||
4635 | if (rps != NULL) { | |
4636 | list_add(&rps->link, &dev_priv->rps.clients); | |
4637 | rps->boosts++; | |
4638 | } else | |
4639 | dev_priv->rps.boosts++; | |
4640 | } | |
4641 | spin_unlock(&dev_priv->rps.client_lock); | |
4642 | } | |
4643 | ||
4644 | void intel_set_rps(struct drm_device *dev, u8 val) | |
4645 | { | |
4646 | if (IS_VALLEYVIEW(dev)) | |
4647 | valleyview_set_rps(dev, val); | |
4648 | else | |
4649 | gen6_set_rps(dev, val); | |
4650 | } | |
4651 | ||
4652 | static void gen9_disable_rps(struct drm_device *dev) | |
4653 | { | |
4654 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4655 | ||
4656 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
4657 | I915_WRITE(GEN9_PG_ENABLE, 0); | |
4658 | } | |
4659 | ||
4660 | static void gen6_disable_rps(struct drm_device *dev) | |
4661 | { | |
4662 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4663 | ||
4664 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
4665 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | |
4666 | } | |
4667 | ||
4668 | static void cherryview_disable_rps(struct drm_device *dev) | |
4669 | { | |
4670 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4671 | ||
4672 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
4673 | } | |
4674 | ||
4675 | static void valleyview_disable_rps(struct drm_device *dev) | |
4676 | { | |
4677 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4678 | ||
4679 | /* we're doing forcewake before Disabling RC6, | |
4680 | * This what the BIOS expects when going into suspend */ | |
4681 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
4682 | ||
4683 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
4684 | ||
4685 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
4686 | } | |
4687 | ||
4688 | static void intel_print_rc6_info(struct drm_device *dev, u32 mode) | |
4689 | { | |
4690 | if (IS_VALLEYVIEW(dev)) { | |
4691 | if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) | |
4692 | mode = GEN6_RC_CTL_RC6_ENABLE; | |
4693 | else | |
4694 | mode = 0; | |
4695 | } | |
4696 | if (HAS_RC6p(dev)) | |
4697 | DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", | |
4698 | (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", | |
4699 | (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", | |
4700 | (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); | |
4701 | ||
4702 | else | |
4703 | DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", | |
4704 | (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); | |
4705 | } | |
4706 | ||
4707 | static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) | |
4708 | { | |
4709 | /* No RC6 before Ironlake and code is gone for ilk. */ | |
4710 | if (INTEL_INFO(dev)->gen < 6) | |
4711 | return 0; | |
4712 | ||
4713 | /* Respect the kernel parameter if it is set */ | |
4714 | if (enable_rc6 >= 0) { | |
4715 | int mask; | |
4716 | ||
4717 | if (HAS_RC6p(dev)) | |
4718 | mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | | |
4719 | INTEL_RC6pp_ENABLE; | |
4720 | else | |
4721 | mask = INTEL_RC6_ENABLE; | |
4722 | ||
4723 | if ((enable_rc6 & mask) != enable_rc6) | |
4724 | DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", | |
4725 | enable_rc6 & mask, enable_rc6, mask); | |
4726 | ||
4727 | return enable_rc6 & mask; | |
4728 | } | |
4729 | ||
4730 | if (IS_IVYBRIDGE(dev)) | |
4731 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); | |
4732 | ||
4733 | return INTEL_RC6_ENABLE; | |
4734 | } | |
4735 | ||
4736 | int intel_enable_rc6(const struct drm_device *dev) | |
4737 | { | |
4738 | return i915.enable_rc6; | |
4739 | } | |
4740 | ||
4741 | static void gen6_init_rps_frequencies(struct drm_device *dev) | |
4742 | { | |
4743 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4744 | uint32_t rp_state_cap; | |
4745 | u32 ddcc_status = 0; | |
4746 | int ret; | |
4747 | ||
4748 | /* All of these values are in units of 50MHz */ | |
4749 | dev_priv->rps.cur_freq = 0; | |
4750 | /* static values from HW: RP0 > RP1 > RPn (min_freq) */ | |
4751 | if (IS_BROXTON(dev)) { | |
4752 | rp_state_cap = I915_READ(BXT_RP_STATE_CAP); | |
4753 | dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; | |
4754 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; | |
4755 | dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff; | |
4756 | } else { | |
4757 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | |
4758 | dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; | |
4759 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; | |
4760 | dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; | |
4761 | } | |
4762 | ||
4763 | /* hw_max = RP0 until we check for overclocking */ | |
4764 | dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; | |
4765 | ||
4766 | dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; | |
4767 | if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) { | |
4768 | ret = sandybridge_pcode_read(dev_priv, | |
4769 | HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, | |
4770 | &ddcc_status); | |
4771 | if (0 == ret) | |
4772 | dev_priv->rps.efficient_freq = | |
4773 | clamp_t(u8, | |
4774 | ((ddcc_status >> 8) & 0xff), | |
4775 | dev_priv->rps.min_freq, | |
4776 | dev_priv->rps.max_freq); | |
4777 | } | |
4778 | ||
4779 | if (IS_SKYLAKE(dev)) { | |
4780 | /* Store the frequency values in 16.66 MHZ units, which is | |
4781 | the natural hardware unit for SKL */ | |
4782 | dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; | |
4783 | dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; | |
4784 | dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; | |
4785 | dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; | |
4786 | dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; | |
4787 | } | |
4788 | ||
4789 | dev_priv->rps.idle_freq = dev_priv->rps.min_freq; | |
4790 | ||
4791 | /* Preserve min/max settings in case of re-init */ | |
4792 | if (dev_priv->rps.max_freq_softlimit == 0) | |
4793 | dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; | |
4794 | ||
4795 | if (dev_priv->rps.min_freq_softlimit == 0) { | |
4796 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | |
4797 | dev_priv->rps.min_freq_softlimit = | |
4798 | max_t(int, dev_priv->rps.efficient_freq, | |
4799 | intel_freq_opcode(dev_priv, 450)); | |
4800 | else | |
4801 | dev_priv->rps.min_freq_softlimit = | |
4802 | dev_priv->rps.min_freq; | |
4803 | } | |
4804 | } | |
4805 | ||
4806 | /* See the Gen9_GT_PM_Programming_Guide doc for the below */ | |
4807 | static void gen9_enable_rps(struct drm_device *dev) | |
4808 | { | |
4809 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4810 | ||
4811 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
4812 | ||
4813 | gen6_init_rps_frequencies(dev); | |
4814 | ||
4815 | /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ | |
4816 | if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) { | |
4817 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
4818 | return; | |
4819 | } | |
4820 | ||
4821 | /* Program defaults and thresholds for RPS*/ | |
4822 | I915_WRITE(GEN6_RC_VIDEO_FREQ, | |
4823 | GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); | |
4824 | ||
4825 | /* 1 second timeout*/ | |
4826 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, | |
4827 | GT_INTERVAL_FROM_US(dev_priv, 1000000)); | |
4828 | ||
4829 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); | |
4830 | ||
4831 | /* Leaning on the below call to gen6_set_rps to program/setup the | |
4832 | * Up/Down EI & threshold registers, as well as the RP_CONTROL, | |
4833 | * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ | |
4834 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ | |
4835 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); | |
4836 | ||
4837 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
4838 | } | |
4839 | ||
4840 | static void gen9_enable_rc6(struct drm_device *dev) | |
4841 | { | |
4842 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4843 | struct intel_engine_cs *ring; | |
4844 | uint32_t rc6_mask = 0; | |
4845 | int unused; | |
4846 | ||
4847 | /* 1a: Software RC state - RC0 */ | |
4848 | I915_WRITE(GEN6_RC_STATE, 0); | |
4849 | ||
4850 | /* 1b: Get forcewake during program sequence. Although the driver | |
4851 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ | |
4852 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
4853 | ||
4854 | /* 2a: Disable RC states. */ | |
4855 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
4856 | ||
4857 | /* 2b: Program RC6 thresholds.*/ | |
4858 | ||
4859 | /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ | |
4860 | if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && | |
4861 | (INTEL_REVID(dev) <= SKL_REVID_E0))) | |
4862 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); | |
4863 | else | |
4864 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); | |
4865 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ | |
4866 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ | |
4867 | for_each_ring(ring, dev_priv, unused) | |
4868 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | |
4869 | ||
4870 | if (HAS_GUC_UCODE(dev)) | |
4871 | I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); | |
4872 | ||
4873 | I915_WRITE(GEN6_RC_SLEEP, 0); | |
4874 | I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ | |
4875 | ||
4876 | /* 2c: Program Coarse Power Gating Policies. */ | |
4877 | I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); | |
4878 | I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); | |
4879 | ||
4880 | /* 3a: Enable RC6 */ | |
4881 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) | |
4882 | rc6_mask = GEN6_RC_CTL_RC6_ENABLE; | |
4883 | DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? | |
4884 | "on" : "off"); | |
4885 | ||
4886 | if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) || | |
4887 | (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) | |
4888 | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | | |
4889 | GEN7_RC_CTL_TO_MODE | | |
4890 | rc6_mask); | |
4891 | else | |
4892 | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | | |
4893 | GEN6_RC_CTL_EI_MODE(1) | | |
4894 | rc6_mask); | |
4895 | ||
4896 | /* | |
4897 | * 3b: Enable Coarse Power Gating only when RC6 is enabled. | |
4898 | * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. | |
4899 | */ | |
4900 | if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || | |
4901 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) | |
4902 | I915_WRITE(GEN9_PG_ENABLE, 0); | |
4903 | else | |
4904 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? | |
4905 | (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); | |
4906 | ||
4907 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
4908 | ||
4909 | } | |
4910 | ||
4911 | static void gen8_enable_rps(struct drm_device *dev) | |
4912 | { | |
4913 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4914 | struct intel_engine_cs *ring; | |
4915 | uint32_t rc6_mask = 0; | |
4916 | int unused; | |
4917 | ||
4918 | /* 1a: Software RC state - RC0 */ | |
4919 | I915_WRITE(GEN6_RC_STATE, 0); | |
4920 | ||
4921 | /* 1c & 1d: Get forcewake during program sequence. Although the driver | |
4922 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ | |
4923 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
4924 | ||
4925 | /* 2a: Disable RC states. */ | |
4926 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
4927 | ||
4928 | /* Initialize rps frequencies */ | |
4929 | gen6_init_rps_frequencies(dev); | |
4930 | ||
4931 | /* 2b: Program RC6 thresholds.*/ | |
4932 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); | |
4933 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ | |
4934 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ | |
4935 | for_each_ring(ring, dev_priv, unused) | |
4936 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | |
4937 | I915_WRITE(GEN6_RC_SLEEP, 0); | |
4938 | if (IS_BROADWELL(dev)) | |
4939 | I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ | |
4940 | else | |
4941 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ | |
4942 | ||
4943 | /* 3: Enable RC6 */ | |
4944 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) | |
4945 | rc6_mask = GEN6_RC_CTL_RC6_ENABLE; | |
4946 | intel_print_rc6_info(dev, rc6_mask); | |
4947 | if (IS_BROADWELL(dev)) | |
4948 | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | | |
4949 | GEN7_RC_CTL_TO_MODE | | |
4950 | rc6_mask); | |
4951 | else | |
4952 | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | | |
4953 | GEN6_RC_CTL_EI_MODE(1) | | |
4954 | rc6_mask); | |
4955 | ||
4956 | /* 4 Program defaults and thresholds for RPS*/ | |
4957 | I915_WRITE(GEN6_RPNSWREQ, | |
4958 | HSW_FREQUENCY(dev_priv->rps.rp1_freq)); | |
4959 | I915_WRITE(GEN6_RC_VIDEO_FREQ, | |
4960 | HSW_FREQUENCY(dev_priv->rps.rp1_freq)); | |
4961 | /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ | |
4962 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ | |
4963 | ||
4964 | /* Docs recommend 900MHz, and 300 MHz respectively */ | |
4965 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | |
4966 | dev_priv->rps.max_freq_softlimit << 24 | | |
4967 | dev_priv->rps.min_freq_softlimit << 16); | |
4968 | ||
4969 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ | |
4970 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ | |
4971 | I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ | |
4972 | I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ | |
4973 | ||
4974 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | |
4975 | ||
4976 | /* 5: Enable RPS */ | |
4977 | I915_WRITE(GEN6_RP_CONTROL, | |
4978 | GEN6_RP_MEDIA_TURBO | | |
4979 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | |
4980 | GEN6_RP_MEDIA_IS_GFX | | |
4981 | GEN6_RP_ENABLE | | |
4982 | GEN6_RP_UP_BUSY_AVG | | |
4983 | GEN6_RP_DOWN_IDLE_AVG); | |
4984 | ||
4985 | /* 6: Ring frequency + overclocking (our driver does this later */ | |
4986 | ||
4987 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ | |
4988 | gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); | |
4989 | ||
4990 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
4991 | } | |
4992 | ||
4993 | static void gen6_enable_rps(struct drm_device *dev) | |
4994 | { | |
4995 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4996 | struct intel_engine_cs *ring; | |
4997 | u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; | |
4998 | u32 gtfifodbg; | |
4999 | int rc6_mode; | |
5000 | int i, ret; | |
5001 | ||
5002 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
5003 | ||
5004 | /* Here begins a magic sequence of register writes to enable | |
5005 | * auto-downclocking. | |
5006 | * | |
5007 | * Perhaps there might be some value in exposing these to | |
5008 | * userspace... | |
5009 | */ | |
5010 | I915_WRITE(GEN6_RC_STATE, 0); | |
5011 | ||
5012 | /* Clear the DBG now so we don't confuse earlier errors */ | |
5013 | if ((gtfifodbg = I915_READ(GTFIFODBG))) { | |
5014 | DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); | |
5015 | I915_WRITE(GTFIFODBG, gtfifodbg); | |
5016 | } | |
5017 | ||
5018 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
5019 | ||
5020 | /* Initialize rps frequencies */ | |
5021 | gen6_init_rps_frequencies(dev); | |
5022 | ||
5023 | /* disable the counters and set deterministic thresholds */ | |
5024 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
5025 | ||
5026 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); | |
5027 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); | |
5028 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); | |
5029 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); | |
5030 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); | |
5031 | ||
5032 | for_each_ring(ring, dev_priv, i) | |
5033 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | |
5034 | ||
5035 | I915_WRITE(GEN6_RC_SLEEP, 0); | |
5036 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); | |
5037 | if (IS_IVYBRIDGE(dev)) | |
5038 | I915_WRITE(GEN6_RC6_THRESHOLD, 125000); | |
5039 | else | |
5040 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); | |
5041 | I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); | |
5042 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | |
5043 | ||
5044 | /* Check if we are enabling RC6 */ | |
5045 | rc6_mode = intel_enable_rc6(dev_priv->dev); | |
5046 | if (rc6_mode & INTEL_RC6_ENABLE) | |
5047 | rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; | |
5048 | ||
5049 | /* We don't use those on Haswell */ | |
5050 | if (!IS_HASWELL(dev)) { | |
5051 | if (rc6_mode & INTEL_RC6p_ENABLE) | |
5052 | rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; | |
5053 | ||
5054 | if (rc6_mode & INTEL_RC6pp_ENABLE) | |
5055 | rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; | |
5056 | } | |
5057 | ||
5058 | intel_print_rc6_info(dev, rc6_mask); | |
5059 | ||
5060 | I915_WRITE(GEN6_RC_CONTROL, | |
5061 | rc6_mask | | |
5062 | GEN6_RC_CTL_EI_MODE(1) | | |
5063 | GEN6_RC_CTL_HW_ENABLE); | |
5064 | ||
5065 | /* Power down if completely idle for over 50ms */ | |
5066 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); | |
5067 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | |
5068 | ||
5069 | ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); | |
5070 | if (ret) | |
5071 | DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); | |
5072 | ||
5073 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); | |
5074 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ | |
5075 | DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", | |
5076 | (dev_priv->rps.max_freq_softlimit & 0xff) * 50, | |
5077 | (pcu_mbox & 0xff) * 50); | |
5078 | dev_priv->rps.max_freq = pcu_mbox & 0xff; | |
5079 | } | |
5080 | ||
5081 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ | |
5082 | gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); | |
5083 | ||
5084 | rc6vids = 0; | |
5085 | ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); | |
5086 | if (IS_GEN6(dev) && ret) { | |
5087 | DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); | |
5088 | } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { | |
5089 | DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", | |
5090 | GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); | |
5091 | rc6vids &= 0xffff00; | |
5092 | rc6vids |= GEN6_ENCODE_RC6_VID(450); | |
5093 | ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); | |
5094 | if (ret) | |
5095 | DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); | |
5096 | } | |
5097 | ||
5098 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
5099 | } | |
5100 | ||
5101 | static void __gen6_update_ring_freq(struct drm_device *dev) | |
5102 | { | |
5103 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5104 | int min_freq = 15; | |
5105 | unsigned int gpu_freq; | |
5106 | unsigned int max_ia_freq, min_ring_freq; | |
5107 | unsigned int max_gpu_freq, min_gpu_freq; | |
5108 | int scaling_factor = 180; | |
5109 | struct cpufreq_policy *policy; | |
5110 | ||
5111 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
5112 | ||
5113 | policy = cpufreq_cpu_get(0); | |
5114 | if (policy) { | |
5115 | max_ia_freq = policy->cpuinfo.max_freq; | |
5116 | cpufreq_cpu_put(policy); | |
5117 | } else { | |
5118 | /* | |
5119 | * Default to measured freq if none found, PCU will ensure we | |
5120 | * don't go over | |
5121 | */ | |
5122 | max_ia_freq = tsc_khz; | |
5123 | } | |
5124 | ||
5125 | /* Convert from kHz to MHz */ | |
5126 | max_ia_freq /= 1000; | |
5127 | ||
5128 | min_ring_freq = I915_READ(DCLK) & 0xf; | |
5129 | /* convert DDR frequency from units of 266.6MHz to bandwidth */ | |
5130 | min_ring_freq = mult_frac(min_ring_freq, 8, 3); | |
5131 | ||
5132 | if (IS_SKYLAKE(dev)) { | |
5133 | /* Convert GT frequency to 50 HZ units */ | |
5134 | min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; | |
5135 | max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; | |
5136 | } else { | |
5137 | min_gpu_freq = dev_priv->rps.min_freq; | |
5138 | max_gpu_freq = dev_priv->rps.max_freq; | |
5139 | } | |
5140 | ||
5141 | /* | |
5142 | * For each potential GPU frequency, load a ring frequency we'd like | |
5143 | * to use for memory access. We do this by specifying the IA frequency | |
5144 | * the PCU should use as a reference to determine the ring frequency. | |
5145 | */ | |
5146 | for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { | |
5147 | int diff = max_gpu_freq - gpu_freq; | |
5148 | unsigned int ia_freq = 0, ring_freq = 0; | |
5149 | ||
5150 | if (IS_SKYLAKE(dev)) { | |
5151 | /* | |
5152 | * ring_freq = 2 * GT. ring_freq is in 100MHz units | |
5153 | * No floor required for ring frequency on SKL. | |
5154 | */ | |
5155 | ring_freq = gpu_freq; | |
5156 | } else if (INTEL_INFO(dev)->gen >= 8) { | |
5157 | /* max(2 * GT, DDR). NB: GT is 50MHz units */ | |
5158 | ring_freq = max(min_ring_freq, gpu_freq); | |
5159 | } else if (IS_HASWELL(dev)) { | |
5160 | ring_freq = mult_frac(gpu_freq, 5, 4); | |
5161 | ring_freq = max(min_ring_freq, ring_freq); | |
5162 | /* leave ia_freq as the default, chosen by cpufreq */ | |
5163 | } else { | |
5164 | /* On older processors, there is no separate ring | |
5165 | * clock domain, so in order to boost the bandwidth | |
5166 | * of the ring, we need to upclock the CPU (ia_freq). | |
5167 | * | |
5168 | * For GPU frequencies less than 750MHz, | |
5169 | * just use the lowest ring freq. | |
5170 | */ | |
5171 | if (gpu_freq < min_freq) | |
5172 | ia_freq = 800; | |
5173 | else | |
5174 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | |
5175 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | |
5176 | } | |
5177 | ||
5178 | sandybridge_pcode_write(dev_priv, | |
5179 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE, | |
5180 | ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | | |
5181 | ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | | |
5182 | gpu_freq); | |
5183 | } | |
5184 | } | |
5185 | ||
5186 | void gen6_update_ring_freq(struct drm_device *dev) | |
5187 | { | |
5188 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5189 | ||
5190 | if (!HAS_CORE_RING_FREQ(dev)) | |
5191 | return; | |
5192 | ||
5193 | mutex_lock(&dev_priv->rps.hw_lock); | |
5194 | __gen6_update_ring_freq(dev); | |
5195 | mutex_unlock(&dev_priv->rps.hw_lock); | |
5196 | } | |
5197 | ||
5198 | static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) | |
5199 | { | |
5200 | struct drm_device *dev = dev_priv->dev; | |
5201 | u32 val, rp0; | |
5202 | ||
5203 | if (dev->pdev->revision >= 0x20) { | |
5204 | val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); | |
5205 | ||
5206 | switch (INTEL_INFO(dev)->eu_total) { | |
5207 | case 8: | |
5208 | /* (2 * 4) config */ | |
5209 | rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); | |
5210 | break; | |
5211 | case 12: | |
5212 | /* (2 * 6) config */ | |
5213 | rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); | |
5214 | break; | |
5215 | case 16: | |
5216 | /* (2 * 8) config */ | |
5217 | default: | |
5218 | /* Setting (2 * 8) Min RP0 for any other combination */ | |
5219 | rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); | |
5220 | break; | |
5221 | } | |
5222 | rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); | |
5223 | } else { | |
5224 | /* For pre-production hardware */ | |
5225 | val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); | |
5226 | rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & | |
5227 | PUNIT_GPU_STATUS_MAX_FREQ_MASK; | |
5228 | } | |
5229 | return rp0; | |
5230 | } | |
5231 | ||
5232 | static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) | |
5233 | { | |
5234 | u32 val, rpe; | |
5235 | ||
5236 | val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); | |
5237 | rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; | |
5238 | ||
5239 | return rpe; | |
5240 | } | |
5241 | ||
5242 | static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) | |
5243 | { | |
5244 | struct drm_device *dev = dev_priv->dev; | |
5245 | u32 val, rp1; | |
5246 | ||
5247 | if (dev->pdev->revision >= 0x20) { | |
5248 | val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); | |
5249 | rp1 = (val & FB_GFX_FREQ_FUSE_MASK); | |
5250 | } else { | |
5251 | /* For pre-production hardware */ | |
5252 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | |
5253 | rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & | |
5254 | PUNIT_GPU_STATUS_MAX_FREQ_MASK); | |
5255 | } | |
5256 | return rp1; | |
5257 | } | |
5258 | ||
5259 | static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) | |
5260 | { | |
5261 | u32 val, rp1; | |
5262 | ||
5263 | val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); | |
5264 | ||
5265 | rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; | |
5266 | ||
5267 | return rp1; | |
5268 | } | |
5269 | ||
5270 | static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) | |
5271 | { | |
5272 | u32 val, rp0; | |
5273 | ||
5274 | val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); | |
5275 | ||
5276 | rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; | |
5277 | /* Clamp to max */ | |
5278 | rp0 = min_t(u32, rp0, 0xea); | |
5279 | ||
5280 | return rp0; | |
5281 | } | |
5282 | ||
5283 | static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) | |
5284 | { | |
5285 | u32 val, rpe; | |
5286 | ||
5287 | val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); | |
5288 | rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; | |
5289 | val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); | |
5290 | rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; | |
5291 | ||
5292 | return rpe; | |
5293 | } | |
5294 | ||
5295 | static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) | |
5296 | { | |
5297 | return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; | |
5298 | } | |
5299 | ||
5300 | /* Check that the pctx buffer wasn't move under us. */ | |
5301 | static void valleyview_check_pctx(struct drm_i915_private *dev_priv) | |
5302 | { | |
5303 | unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; | |
5304 | ||
5305 | WARN_ON(pctx_addr != dev_priv->mm.stolen_base + | |
5306 | dev_priv->vlv_pctx->stolen->start); | |
5307 | } | |
5308 | ||
5309 | ||
5310 | /* Check that the pcbr address is not empty. */ | |
5311 | static void cherryview_check_pctx(struct drm_i915_private *dev_priv) | |
5312 | { | |
5313 | unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; | |
5314 | ||
5315 | WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); | |
5316 | } | |
5317 | ||
5318 | static void cherryview_setup_pctx(struct drm_device *dev) | |
5319 | { | |
5320 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5321 | unsigned long pctx_paddr, paddr; | |
5322 | struct i915_gtt *gtt = &dev_priv->gtt; | |
5323 | u32 pcbr; | |
5324 | int pctx_size = 32*1024; | |
5325 | ||
5326 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
5327 | ||
5328 | pcbr = I915_READ(VLV_PCBR); | |
5329 | if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { | |
5330 | DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); | |
5331 | paddr = (dev_priv->mm.stolen_base + | |
5332 | (gtt->stolen_size - pctx_size)); | |
5333 | ||
5334 | pctx_paddr = (paddr & (~4095)); | |
5335 | I915_WRITE(VLV_PCBR, pctx_paddr); | |
5336 | } | |
5337 | ||
5338 | DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); | |
5339 | } | |
5340 | ||
5341 | static void valleyview_setup_pctx(struct drm_device *dev) | |
5342 | { | |
5343 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5344 | struct drm_i915_gem_object *pctx; | |
5345 | unsigned long pctx_paddr; | |
5346 | u32 pcbr; | |
5347 | int pctx_size = 24*1024; | |
5348 | ||
5349 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
5350 | ||
5351 | pcbr = I915_READ(VLV_PCBR); | |
5352 | if (pcbr) { | |
5353 | /* BIOS set it up already, grab the pre-alloc'd space */ | |
5354 | int pcbr_offset; | |
5355 | ||
5356 | pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; | |
5357 | pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, | |
5358 | pcbr_offset, | |
5359 | I915_GTT_OFFSET_NONE, | |
5360 | pctx_size); | |
5361 | goto out; | |
5362 | } | |
5363 | ||
5364 | DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); | |
5365 | ||
5366 | /* | |
5367 | * From the Gunit register HAS: | |
5368 | * The Gfx driver is expected to program this register and ensure | |
5369 | * proper allocation within Gfx stolen memory. For example, this | |
5370 | * register should be programmed such than the PCBR range does not | |
5371 | * overlap with other ranges, such as the frame buffer, protected | |
5372 | * memory, or any other relevant ranges. | |
5373 | */ | |
5374 | pctx = i915_gem_object_create_stolen(dev, pctx_size); | |
5375 | if (!pctx) { | |
5376 | DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); | |
5377 | return; | |
5378 | } | |
5379 | ||
5380 | pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; | |
5381 | I915_WRITE(VLV_PCBR, pctx_paddr); | |
5382 | ||
5383 | out: | |
5384 | DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); | |
5385 | dev_priv->vlv_pctx = pctx; | |
5386 | } | |
5387 | ||
5388 | static void valleyview_cleanup_pctx(struct drm_device *dev) | |
5389 | { | |
5390 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5391 | ||
5392 | if (WARN_ON(!dev_priv->vlv_pctx)) | |
5393 | return; | |
5394 | ||
5395 | drm_gem_object_unreference(&dev_priv->vlv_pctx->base); | |
5396 | dev_priv->vlv_pctx = NULL; | |
5397 | } | |
5398 | ||
5399 | static void valleyview_init_gt_powersave(struct drm_device *dev) | |
5400 | { | |
5401 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5402 | u32 val; | |
5403 | ||
5404 | valleyview_setup_pctx(dev); | |
5405 | ||
5406 | mutex_lock(&dev_priv->rps.hw_lock); | |
5407 | ||
5408 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | |
5409 | switch ((val >> 6) & 3) { | |
5410 | case 0: | |
5411 | case 1: | |
5412 | dev_priv->mem_freq = 800; | |
5413 | break; | |
5414 | case 2: | |
5415 | dev_priv->mem_freq = 1066; | |
5416 | break; | |
5417 | case 3: | |
5418 | dev_priv->mem_freq = 1333; | |
5419 | break; | |
5420 | } | |
5421 | DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); | |
5422 | ||
5423 | dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); | |
5424 | dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; | |
5425 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | |
5426 | intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), | |
5427 | dev_priv->rps.max_freq); | |
5428 | ||
5429 | dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); | |
5430 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | |
5431 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | |
5432 | dev_priv->rps.efficient_freq); | |
5433 | ||
5434 | dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); | |
5435 | DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", | |
5436 | intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), | |
5437 | dev_priv->rps.rp1_freq); | |
5438 | ||
5439 | dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); | |
5440 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | |
5441 | intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), | |
5442 | dev_priv->rps.min_freq); | |
5443 | ||
5444 | dev_priv->rps.idle_freq = dev_priv->rps.min_freq; | |
5445 | ||
5446 | /* Preserve min/max settings in case of re-init */ | |
5447 | if (dev_priv->rps.max_freq_softlimit == 0) | |
5448 | dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; | |
5449 | ||
5450 | if (dev_priv->rps.min_freq_softlimit == 0) | |
5451 | dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; | |
5452 | ||
5453 | mutex_unlock(&dev_priv->rps.hw_lock); | |
5454 | } | |
5455 | ||
5456 | static void cherryview_init_gt_powersave(struct drm_device *dev) | |
5457 | { | |
5458 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5459 | u32 val; | |
5460 | ||
5461 | cherryview_setup_pctx(dev); | |
5462 | ||
5463 | mutex_lock(&dev_priv->rps.hw_lock); | |
5464 | ||
5465 | mutex_lock(&dev_priv->sb_lock); | |
5466 | val = vlv_cck_read(dev_priv, CCK_FUSE_REG); | |
5467 | mutex_unlock(&dev_priv->sb_lock); | |
5468 | ||
5469 | switch ((val >> 2) & 0x7) { | |
5470 | case 3: | |
5471 | dev_priv->mem_freq = 2000; | |
5472 | break; | |
5473 | default: | |
5474 | dev_priv->mem_freq = 1600; | |
5475 | break; | |
5476 | } | |
5477 | DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); | |
5478 | ||
5479 | dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); | |
5480 | dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; | |
5481 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | |
5482 | intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), | |
5483 | dev_priv->rps.max_freq); | |
5484 | ||
5485 | dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); | |
5486 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | |
5487 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | |
5488 | dev_priv->rps.efficient_freq); | |
5489 | ||
5490 | dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); | |
5491 | DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", | |
5492 | intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), | |
5493 | dev_priv->rps.rp1_freq); | |
5494 | ||
5495 | /* PUnit validated range is only [RPe, RP0] */ | |
5496 | dev_priv->rps.min_freq = dev_priv->rps.efficient_freq; | |
5497 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | |
5498 | intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), | |
5499 | dev_priv->rps.min_freq); | |
5500 | ||
5501 | WARN_ONCE((dev_priv->rps.max_freq | | |
5502 | dev_priv->rps.efficient_freq | | |
5503 | dev_priv->rps.rp1_freq | | |
5504 | dev_priv->rps.min_freq) & 1, | |
5505 | "Odd GPU freq values\n"); | |
5506 | ||
5507 | dev_priv->rps.idle_freq = dev_priv->rps.min_freq; | |
5508 | ||
5509 | /* Preserve min/max settings in case of re-init */ | |
5510 | if (dev_priv->rps.max_freq_softlimit == 0) | |
5511 | dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; | |
5512 | ||
5513 | if (dev_priv->rps.min_freq_softlimit == 0) | |
5514 | dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; | |
5515 | ||
5516 | mutex_unlock(&dev_priv->rps.hw_lock); | |
5517 | } | |
5518 | ||
5519 | static void valleyview_cleanup_gt_powersave(struct drm_device *dev) | |
5520 | { | |
5521 | valleyview_cleanup_pctx(dev); | |
5522 | } | |
5523 | ||
5524 | static void cherryview_enable_rps(struct drm_device *dev) | |
5525 | { | |
5526 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5527 | struct intel_engine_cs *ring; | |
5528 | u32 gtfifodbg, val, rc6_mode = 0, pcbr; | |
5529 | int i; | |
5530 | ||
5531 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
5532 | ||
5533 | gtfifodbg = I915_READ(GTFIFODBG); | |
5534 | if (gtfifodbg) { | |
5535 | DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", | |
5536 | gtfifodbg); | |
5537 | I915_WRITE(GTFIFODBG, gtfifodbg); | |
5538 | } | |
5539 | ||
5540 | cherryview_check_pctx(dev_priv); | |
5541 | ||
5542 | /* 1a & 1b: Get forcewake during program sequence. Although the driver | |
5543 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ | |
5544 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
5545 | ||
5546 | /* Disable RC states. */ | |
5547 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
5548 | ||
5549 | /* 2a: Program RC6 thresholds.*/ | |
5550 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); | |
5551 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ | |
5552 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ | |
5553 | ||
5554 | for_each_ring(ring, dev_priv, i) | |
5555 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | |
5556 | I915_WRITE(GEN6_RC_SLEEP, 0); | |
5557 | ||
5558 | /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ | |
5559 | I915_WRITE(GEN6_RC6_THRESHOLD, 0x186); | |
5560 | ||
5561 | /* allows RC6 residency counter to work */ | |
5562 | I915_WRITE(VLV_COUNTER_CONTROL, | |
5563 | _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | | |
5564 | VLV_MEDIA_RC6_COUNT_EN | | |
5565 | VLV_RENDER_RC6_COUNT_EN)); | |
5566 | ||
5567 | /* For now we assume BIOS is allocating and populating the PCBR */ | |
5568 | pcbr = I915_READ(VLV_PCBR); | |
5569 | ||
5570 | /* 3: Enable RC6 */ | |
5571 | if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && | |
5572 | (pcbr >> VLV_PCBR_ADDR_SHIFT)) | |
5573 | rc6_mode = GEN7_RC_CTL_TO_MODE; | |
5574 | ||
5575 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); | |
5576 | ||
5577 | /* 4 Program defaults and thresholds for RPS*/ | |
5578 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | |
5579 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | |
5580 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); | |
5581 | I915_WRITE(GEN6_RP_UP_EI, 66000); | |
5582 | I915_WRITE(GEN6_RP_DOWN_EI, 350000); | |
5583 | ||
5584 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | |
5585 | ||
5586 | /* 5: Enable RPS */ | |
5587 | I915_WRITE(GEN6_RP_CONTROL, | |
5588 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | |
5589 | GEN6_RP_MEDIA_IS_GFX | | |
5590 | GEN6_RP_ENABLE | | |
5591 | GEN6_RP_UP_BUSY_AVG | | |
5592 | GEN6_RP_DOWN_IDLE_AVG); | |
5593 | ||
5594 | /* Setting Fixed Bias */ | |
5595 | val = VLV_OVERRIDE_EN | | |
5596 | VLV_SOC_TDP_EN | | |
5597 | CHV_BIAS_CPU_50_SOC_50; | |
5598 | vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); | |
5599 | ||
5600 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | |
5601 | ||
5602 | /* RPS code assumes GPLL is used */ | |
5603 | WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); | |
5604 | ||
5605 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); | |
5606 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | |
5607 | ||
5608 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; | |
5609 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | |
5610 | intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), | |
5611 | dev_priv->rps.cur_freq); | |
5612 | ||
5613 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | |
5614 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | |
5615 | dev_priv->rps.efficient_freq); | |
5616 | ||
5617 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); | |
5618 | ||
5619 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
5620 | } | |
5621 | ||
5622 | static void valleyview_enable_rps(struct drm_device *dev) | |
5623 | { | |
5624 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5625 | struct intel_engine_cs *ring; | |
5626 | u32 gtfifodbg, val, rc6_mode = 0; | |
5627 | int i; | |
5628 | ||
5629 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
5630 | ||
5631 | valleyview_check_pctx(dev_priv); | |
5632 | ||
5633 | if ((gtfifodbg = I915_READ(GTFIFODBG))) { | |
5634 | DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", | |
5635 | gtfifodbg); | |
5636 | I915_WRITE(GTFIFODBG, gtfifodbg); | |
5637 | } | |
5638 | ||
5639 | /* If VLV, Forcewake all wells, else re-direct to regular path */ | |
5640 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
5641 | ||
5642 | /* Disable RC states. */ | |
5643 | I915_WRITE(GEN6_RC_CONTROL, 0); | |
5644 | ||
5645 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | |
5646 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | |
5647 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); | |
5648 | I915_WRITE(GEN6_RP_UP_EI, 66000); | |
5649 | I915_WRITE(GEN6_RP_DOWN_EI, 350000); | |
5650 | ||
5651 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | |
5652 | ||
5653 | I915_WRITE(GEN6_RP_CONTROL, | |
5654 | GEN6_RP_MEDIA_TURBO | | |
5655 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | |
5656 | GEN6_RP_MEDIA_IS_GFX | | |
5657 | GEN6_RP_ENABLE | | |
5658 | GEN6_RP_UP_BUSY_AVG | | |
5659 | GEN6_RP_DOWN_IDLE_CONT); | |
5660 | ||
5661 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); | |
5662 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); | |
5663 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); | |
5664 | ||
5665 | for_each_ring(ring, dev_priv, i) | |
5666 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | |
5667 | ||
5668 | I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); | |
5669 | ||
5670 | /* allows RC6 residency counter to work */ | |
5671 | I915_WRITE(VLV_COUNTER_CONTROL, | |
5672 | _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | | |
5673 | VLV_RENDER_RC0_COUNT_EN | | |
5674 | VLV_MEDIA_RC6_COUNT_EN | | |
5675 | VLV_RENDER_RC6_COUNT_EN)); | |
5676 | ||
5677 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) | |
5678 | rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; | |
5679 | ||
5680 | intel_print_rc6_info(dev, rc6_mode); | |
5681 | ||
5682 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); | |
5683 | ||
5684 | /* Setting Fixed Bias */ | |
5685 | val = VLV_OVERRIDE_EN | | |
5686 | VLV_SOC_TDP_EN | | |
5687 | VLV_BIAS_CPU_125_SOC_875; | |
5688 | vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); | |
5689 | ||
5690 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | |
5691 | ||
5692 | /* RPS code assumes GPLL is used */ | |
5693 | WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); | |
5694 | ||
5695 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); | |
5696 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | |
5697 | ||
5698 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; | |
5699 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | |
5700 | intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), | |
5701 | dev_priv->rps.cur_freq); | |
5702 | ||
5703 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | |
5704 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | |
5705 | dev_priv->rps.efficient_freq); | |
5706 | ||
5707 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); | |
5708 | ||
5709 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
5710 | } | |
5711 | ||
5712 | static unsigned long intel_pxfreq(u32 vidfreq) | |
5713 | { | |
5714 | unsigned long freq; | |
5715 | int div = (vidfreq & 0x3f0000) >> 16; | |
5716 | int post = (vidfreq & 0x3000) >> 12; | |
5717 | int pre = (vidfreq & 0x7); | |
5718 | ||
5719 | if (!pre) | |
5720 | return 0; | |
5721 | ||
5722 | freq = ((div * 133333) / ((1<<post) * pre)); | |
5723 | ||
5724 | return freq; | |
5725 | } | |
5726 | ||
5727 | static const struct cparams { | |
5728 | u16 i; | |
5729 | u16 t; | |
5730 | u16 m; | |
5731 | u16 c; | |
5732 | } cparams[] = { | |
5733 | { 1, 1333, 301, 28664 }, | |
5734 | { 1, 1066, 294, 24460 }, | |
5735 | { 1, 800, 294, 25192 }, | |
5736 | { 0, 1333, 276, 27605 }, | |
5737 | { 0, 1066, 276, 27605 }, | |
5738 | { 0, 800, 231, 23784 }, | |
5739 | }; | |
5740 | ||
5741 | static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) | |
5742 | { | |
5743 | u64 total_count, diff, ret; | |
5744 | u32 count1, count2, count3, m = 0, c = 0; | |
5745 | unsigned long now = jiffies_to_msecs(jiffies), diff1; | |
5746 | int i; | |
5747 | ||
5748 | assert_spin_locked(&mchdev_lock); | |
5749 | ||
5750 | diff1 = now - dev_priv->ips.last_time1; | |
5751 | ||
5752 | /* Prevent division-by-zero if we are asking too fast. | |
5753 | * Also, we don't get interesting results if we are polling | |
5754 | * faster than once in 10ms, so just return the saved value | |
5755 | * in such cases. | |
5756 | */ | |
5757 | if (diff1 <= 10) | |
5758 | return dev_priv->ips.chipset_power; | |
5759 | ||
5760 | count1 = I915_READ(DMIEC); | |
5761 | count2 = I915_READ(DDREC); | |
5762 | count3 = I915_READ(CSIEC); | |
5763 | ||
5764 | total_count = count1 + count2 + count3; | |
5765 | ||
5766 | /* FIXME: handle per-counter overflow */ | |
5767 | if (total_count < dev_priv->ips.last_count1) { | |
5768 | diff = ~0UL - dev_priv->ips.last_count1; | |
5769 | diff += total_count; | |
5770 | } else { | |
5771 | diff = total_count - dev_priv->ips.last_count1; | |
5772 | } | |
5773 | ||
5774 | for (i = 0; i < ARRAY_SIZE(cparams); i++) { | |
5775 | if (cparams[i].i == dev_priv->ips.c_m && | |
5776 | cparams[i].t == dev_priv->ips.r_t) { | |
5777 | m = cparams[i].m; | |
5778 | c = cparams[i].c; | |
5779 | break; | |
5780 | } | |
5781 | } | |
5782 | ||
5783 | diff = div_u64(diff, diff1); | |
5784 | ret = ((m * diff) + c); | |
5785 | ret = div_u64(ret, 10); | |
5786 | ||
5787 | dev_priv->ips.last_count1 = total_count; | |
5788 | dev_priv->ips.last_time1 = now; | |
5789 | ||
5790 | dev_priv->ips.chipset_power = ret; | |
5791 | ||
5792 | return ret; | |
5793 | } | |
5794 | ||
5795 | unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |
5796 | { | |
5797 | struct drm_device *dev = dev_priv->dev; | |
5798 | unsigned long val; | |
5799 | ||
5800 | if (INTEL_INFO(dev)->gen != 5) | |
5801 | return 0; | |
5802 | ||
5803 | spin_lock_irq(&mchdev_lock); | |
5804 | ||
5805 | val = __i915_chipset_val(dev_priv); | |
5806 | ||
5807 | spin_unlock_irq(&mchdev_lock); | |
5808 | ||
5809 | return val; | |
5810 | } | |
5811 | ||
5812 | unsigned long i915_mch_val(struct drm_i915_private *dev_priv) | |
5813 | { | |
5814 | unsigned long m, x, b; | |
5815 | u32 tsfs; | |
5816 | ||
5817 | tsfs = I915_READ(TSFS); | |
5818 | ||
5819 | m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); | |
5820 | x = I915_READ8(TR1); | |
5821 | ||
5822 | b = tsfs & TSFS_INTR_MASK; | |
5823 | ||
5824 | return ((m * x) / 127) - b; | |
5825 | } | |
5826 | ||
5827 | static int _pxvid_to_vd(u8 pxvid) | |
5828 | { | |
5829 | if (pxvid == 0) | |
5830 | return 0; | |
5831 | ||
5832 | if (pxvid >= 8 && pxvid < 31) | |
5833 | pxvid = 31; | |
5834 | ||
5835 | return (pxvid + 2) * 125; | |
5836 | } | |
5837 | ||
5838 | static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | |
5839 | { | |
5840 | struct drm_device *dev = dev_priv->dev; | |
5841 | const int vd = _pxvid_to_vd(pxvid); | |
5842 | const int vm = vd - 1125; | |
5843 | ||
5844 | if (INTEL_INFO(dev)->is_mobile) | |
5845 | return vm > 0 ? vm : 0; | |
5846 | ||
5847 | return vd; | |
5848 | } | |
5849 | ||
5850 | static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) | |
5851 | { | |
5852 | u64 now, diff, diffms; | |
5853 | u32 count; | |
5854 | ||
5855 | assert_spin_locked(&mchdev_lock); | |
5856 | ||
5857 | now = ktime_get_raw_ns(); | |
5858 | diffms = now - dev_priv->ips.last_time2; | |
5859 | do_div(diffms, NSEC_PER_MSEC); | |
5860 | ||
5861 | /* Don't divide by 0 */ | |
5862 | if (!diffms) | |
5863 | return; | |
5864 | ||
5865 | count = I915_READ(GFXEC); | |
5866 | ||
5867 | if (count < dev_priv->ips.last_count2) { | |
5868 | diff = ~0UL - dev_priv->ips.last_count2; | |
5869 | diff += count; | |
5870 | } else { | |
5871 | diff = count - dev_priv->ips.last_count2; | |
5872 | } | |
5873 | ||
5874 | dev_priv->ips.last_count2 = count; | |
5875 | dev_priv->ips.last_time2 = now; | |
5876 | ||
5877 | /* More magic constants... */ | |
5878 | diff = diff * 1181; | |
5879 | diff = div_u64(diff, diffms * 10); | |
5880 | dev_priv->ips.gfx_power = diff; | |
5881 | } | |
5882 | ||
5883 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | |
5884 | { | |
5885 | struct drm_device *dev = dev_priv->dev; | |
5886 | ||
5887 | if (INTEL_INFO(dev)->gen != 5) | |
5888 | return; | |
5889 | ||
5890 | spin_lock_irq(&mchdev_lock); | |
5891 | ||
5892 | __i915_update_gfx_val(dev_priv); | |
5893 | ||
5894 | spin_unlock_irq(&mchdev_lock); | |
5895 | } | |
5896 | ||
5897 | static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) | |
5898 | { | |
5899 | unsigned long t, corr, state1, corr2, state2; | |
5900 | u32 pxvid, ext_v; | |
5901 | ||
5902 | assert_spin_locked(&mchdev_lock); | |
5903 | ||
5904 | pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq)); | |
5905 | pxvid = (pxvid >> 24) & 0x7f; | |
5906 | ext_v = pvid_to_extvid(dev_priv, pxvid); | |
5907 | ||
5908 | state1 = ext_v; | |
5909 | ||
5910 | t = i915_mch_val(dev_priv); | |
5911 | ||
5912 | /* Revel in the empirically derived constants */ | |
5913 | ||
5914 | /* Correction factor in 1/100000 units */ | |
5915 | if (t > 80) | |
5916 | corr = ((t * 2349) + 135940); | |
5917 | else if (t >= 50) | |
5918 | corr = ((t * 964) + 29317); | |
5919 | else /* < 50 */ | |
5920 | corr = ((t * 301) + 1004); | |
5921 | ||
5922 | corr = corr * ((150142 * state1) / 10000 - 78642); | |
5923 | corr /= 100000; | |
5924 | corr2 = (corr * dev_priv->ips.corr); | |
5925 | ||
5926 | state2 = (corr2 * state1) / 10000; | |
5927 | state2 /= 100; /* convert to mW */ | |
5928 | ||
5929 | __i915_update_gfx_val(dev_priv); | |
5930 | ||
5931 | return dev_priv->ips.gfx_power + state2; | |
5932 | } | |
5933 | ||
5934 | unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) | |
5935 | { | |
5936 | struct drm_device *dev = dev_priv->dev; | |
5937 | unsigned long val; | |
5938 | ||
5939 | if (INTEL_INFO(dev)->gen != 5) | |
5940 | return 0; | |
5941 | ||
5942 | spin_lock_irq(&mchdev_lock); | |
5943 | ||
5944 | val = __i915_gfx_val(dev_priv); | |
5945 | ||
5946 | spin_unlock_irq(&mchdev_lock); | |
5947 | ||
5948 | return val; | |
5949 | } | |
5950 | ||
5951 | /** | |
5952 | * i915_read_mch_val - return value for IPS use | |
5953 | * | |
5954 | * Calculate and return a value for the IPS driver to use when deciding whether | |
5955 | * we have thermal and power headroom to increase CPU or GPU power budget. | |
5956 | */ | |
5957 | unsigned long i915_read_mch_val(void) | |
5958 | { | |
5959 | struct drm_i915_private *dev_priv; | |
5960 | unsigned long chipset_val, graphics_val, ret = 0; | |
5961 | ||
5962 | spin_lock_irq(&mchdev_lock); | |
5963 | if (!i915_mch_dev) | |
5964 | goto out_unlock; | |
5965 | dev_priv = i915_mch_dev; | |
5966 | ||
5967 | chipset_val = __i915_chipset_val(dev_priv); | |
5968 | graphics_val = __i915_gfx_val(dev_priv); | |
5969 | ||
5970 | ret = chipset_val + graphics_val; | |
5971 | ||
5972 | out_unlock: | |
5973 | spin_unlock_irq(&mchdev_lock); | |
5974 | ||
5975 | return ret; | |
5976 | } | |
5977 | EXPORT_SYMBOL_GPL(i915_read_mch_val); | |
5978 | ||
5979 | /** | |
5980 | * i915_gpu_raise - raise GPU frequency limit | |
5981 | * | |
5982 | * Raise the limit; IPS indicates we have thermal headroom. | |
5983 | */ | |
5984 | bool i915_gpu_raise(void) | |
5985 | { | |
5986 | struct drm_i915_private *dev_priv; | |
5987 | bool ret = true; | |
5988 | ||
5989 | spin_lock_irq(&mchdev_lock); | |
5990 | if (!i915_mch_dev) { | |
5991 | ret = false; | |
5992 | goto out_unlock; | |
5993 | } | |
5994 | dev_priv = i915_mch_dev; | |
5995 | ||
5996 | if (dev_priv->ips.max_delay > dev_priv->ips.fmax) | |
5997 | dev_priv->ips.max_delay--; | |
5998 | ||
5999 | out_unlock: | |
6000 | spin_unlock_irq(&mchdev_lock); | |
6001 | ||
6002 | return ret; | |
6003 | } | |
6004 | EXPORT_SYMBOL_GPL(i915_gpu_raise); | |
6005 | ||
6006 | /** | |
6007 | * i915_gpu_lower - lower GPU frequency limit | |
6008 | * | |
6009 | * IPS indicates we're close to a thermal limit, so throttle back the GPU | |
6010 | * frequency maximum. | |
6011 | */ | |
6012 | bool i915_gpu_lower(void) | |
6013 | { | |
6014 | struct drm_i915_private *dev_priv; | |
6015 | bool ret = true; | |
6016 | ||
6017 | spin_lock_irq(&mchdev_lock); | |
6018 | if (!i915_mch_dev) { | |
6019 | ret = false; | |
6020 | goto out_unlock; | |
6021 | } | |
6022 | dev_priv = i915_mch_dev; | |
6023 | ||
6024 | if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) | |
6025 | dev_priv->ips.max_delay++; | |
6026 | ||
6027 | out_unlock: | |
6028 | spin_unlock_irq(&mchdev_lock); | |
6029 | ||
6030 | return ret; | |
6031 | } | |
6032 | EXPORT_SYMBOL_GPL(i915_gpu_lower); | |
6033 | ||
6034 | /** | |
6035 | * i915_gpu_busy - indicate GPU business to IPS | |
6036 | * | |
6037 | * Tell the IPS driver whether or not the GPU is busy. | |
6038 | */ | |
6039 | bool i915_gpu_busy(void) | |
6040 | { | |
6041 | struct drm_i915_private *dev_priv; | |
6042 | struct intel_engine_cs *ring; | |
6043 | bool ret = false; | |
6044 | int i; | |
6045 | ||
6046 | spin_lock_irq(&mchdev_lock); | |
6047 | if (!i915_mch_dev) | |
6048 | goto out_unlock; | |
6049 | dev_priv = i915_mch_dev; | |
6050 | ||
6051 | for_each_ring(ring, dev_priv, i) | |
6052 | ret |= !list_empty(&ring->request_list); | |
6053 | ||
6054 | out_unlock: | |
6055 | spin_unlock_irq(&mchdev_lock); | |
6056 | ||
6057 | return ret; | |
6058 | } | |
6059 | EXPORT_SYMBOL_GPL(i915_gpu_busy); | |
6060 | ||
6061 | /** | |
6062 | * i915_gpu_turbo_disable - disable graphics turbo | |
6063 | * | |
6064 | * Disable graphics turbo by resetting the max frequency and setting the | |
6065 | * current frequency to the default. | |
6066 | */ | |
6067 | bool i915_gpu_turbo_disable(void) | |
6068 | { | |
6069 | struct drm_i915_private *dev_priv; | |
6070 | bool ret = true; | |
6071 | ||
6072 | spin_lock_irq(&mchdev_lock); | |
6073 | if (!i915_mch_dev) { | |
6074 | ret = false; | |
6075 | goto out_unlock; | |
6076 | } | |
6077 | dev_priv = i915_mch_dev; | |
6078 | ||
6079 | dev_priv->ips.max_delay = dev_priv->ips.fstart; | |
6080 | ||
6081 | if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) | |
6082 | ret = false; | |
6083 | ||
6084 | out_unlock: | |
6085 | spin_unlock_irq(&mchdev_lock); | |
6086 | ||
6087 | return ret; | |
6088 | } | |
6089 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | |
6090 | ||
6091 | /** | |
6092 | * Tells the intel_ips driver that the i915 driver is now loaded, if | |
6093 | * IPS got loaded first. | |
6094 | * | |
6095 | * This awkward dance is so that neither module has to depend on the | |
6096 | * other in order for IPS to do the appropriate communication of | |
6097 | * GPU turbo limits to i915. | |
6098 | */ | |
6099 | static void | |
6100 | ips_ping_for_i915_load(void) | |
6101 | { | |
6102 | void (*link)(void); | |
6103 | ||
6104 | link = symbol_get(ips_link_to_i915_driver); | |
6105 | if (link) { | |
6106 | link(); | |
6107 | symbol_put(ips_link_to_i915_driver); | |
6108 | } | |
6109 | } | |
6110 | ||
6111 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv) | |
6112 | { | |
6113 | /* We only register the i915 ips part with intel-ips once everything is | |
6114 | * set up, to avoid intel-ips sneaking in and reading bogus values. */ | |
6115 | spin_lock_irq(&mchdev_lock); | |
6116 | i915_mch_dev = dev_priv; | |
6117 | spin_unlock_irq(&mchdev_lock); | |
6118 | ||
6119 | ips_ping_for_i915_load(); | |
6120 | } | |
6121 | ||
6122 | void intel_gpu_ips_teardown(void) | |
6123 | { | |
6124 | spin_lock_irq(&mchdev_lock); | |
6125 | i915_mch_dev = NULL; | |
6126 | spin_unlock_irq(&mchdev_lock); | |
6127 | } | |
6128 | ||
6129 | static void intel_init_emon(struct drm_device *dev) | |
6130 | { | |
6131 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6132 | u32 lcfuse; | |
6133 | u8 pxw[16]; | |
6134 | int i; | |
6135 | ||
6136 | /* Disable to program */ | |
6137 | I915_WRITE(ECR, 0); | |
6138 | POSTING_READ(ECR); | |
6139 | ||
6140 | /* Program energy weights for various events */ | |
6141 | I915_WRITE(SDEW, 0x15040d00); | |
6142 | I915_WRITE(CSIEW0, 0x007f0000); | |
6143 | I915_WRITE(CSIEW1, 0x1e220004); | |
6144 | I915_WRITE(CSIEW2, 0x04000004); | |
6145 | ||
6146 | for (i = 0; i < 5; i++) | |
6147 | I915_WRITE(PEW(i), 0); | |
6148 | for (i = 0; i < 3; i++) | |
6149 | I915_WRITE(DEW(i), 0); | |
6150 | ||
6151 | /* Program P-state weights to account for frequency power adjustment */ | |
6152 | for (i = 0; i < 16; i++) { | |
6153 | u32 pxvidfreq = I915_READ(PXVFREQ(i)); | |
6154 | unsigned long freq = intel_pxfreq(pxvidfreq); | |
6155 | unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> | |
6156 | PXVFREQ_PX_SHIFT; | |
6157 | unsigned long val; | |
6158 | ||
6159 | val = vid * vid; | |
6160 | val *= (freq / 1000); | |
6161 | val *= 255; | |
6162 | val /= (127*127*900); | |
6163 | if (val > 0xff) | |
6164 | DRM_ERROR("bad pxval: %ld\n", val); | |
6165 | pxw[i] = val; | |
6166 | } | |
6167 | /* Render standby states get 0 weight */ | |
6168 | pxw[14] = 0; | |
6169 | pxw[15] = 0; | |
6170 | ||
6171 | for (i = 0; i < 4; i++) { | |
6172 | u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | | |
6173 | (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); | |
6174 | I915_WRITE(PXW(i), val); | |
6175 | } | |
6176 | ||
6177 | /* Adjust magic regs to magic values (more experimental results) */ | |
6178 | I915_WRITE(OGW0, 0); | |
6179 | I915_WRITE(OGW1, 0); | |
6180 | I915_WRITE(EG0, 0x00007f00); | |
6181 | I915_WRITE(EG1, 0x0000000e); | |
6182 | I915_WRITE(EG2, 0x000e0000); | |
6183 | I915_WRITE(EG3, 0x68000300); | |
6184 | I915_WRITE(EG4, 0x42000000); | |
6185 | I915_WRITE(EG5, 0x00140031); | |
6186 | I915_WRITE(EG6, 0); | |
6187 | I915_WRITE(EG7, 0); | |
6188 | ||
6189 | for (i = 0; i < 8; i++) | |
6190 | I915_WRITE(PXWL(i), 0); | |
6191 | ||
6192 | /* Enable PMON + select events */ | |
6193 | I915_WRITE(ECR, 0x80000019); | |
6194 | ||
6195 | lcfuse = I915_READ(LCFUSE02); | |
6196 | ||
6197 | dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); | |
6198 | } | |
6199 | ||
6200 | void intel_init_gt_powersave(struct drm_device *dev) | |
6201 | { | |
6202 | i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); | |
6203 | ||
6204 | if (IS_CHERRYVIEW(dev)) | |
6205 | cherryview_init_gt_powersave(dev); | |
6206 | else if (IS_VALLEYVIEW(dev)) | |
6207 | valleyview_init_gt_powersave(dev); | |
6208 | } | |
6209 | ||
6210 | void intel_cleanup_gt_powersave(struct drm_device *dev) | |
6211 | { | |
6212 | if (IS_CHERRYVIEW(dev)) | |
6213 | return; | |
6214 | else if (IS_VALLEYVIEW(dev)) | |
6215 | valleyview_cleanup_gt_powersave(dev); | |
6216 | } | |
6217 | ||
6218 | static void gen6_suspend_rps(struct drm_device *dev) | |
6219 | { | |
6220 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6221 | ||
6222 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | |
6223 | ||
6224 | gen6_disable_rps_interrupts(dev); | |
6225 | } | |
6226 | ||
6227 | /** | |
6228 | * intel_suspend_gt_powersave - suspend PM work and helper threads | |
6229 | * @dev: drm device | |
6230 | * | |
6231 | * We don't want to disable RC6 or other features here, we just want | |
6232 | * to make sure any work we've queued has finished and won't bother | |
6233 | * us while we're suspended. | |
6234 | */ | |
6235 | void intel_suspend_gt_powersave(struct drm_device *dev) | |
6236 | { | |
6237 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6238 | ||
6239 | if (INTEL_INFO(dev)->gen < 6) | |
6240 | return; | |
6241 | ||
6242 | gen6_suspend_rps(dev); | |
6243 | ||
6244 | /* Force GPU to min freq during suspend */ | |
6245 | gen6_rps_idle(dev_priv); | |
6246 | } | |
6247 | ||
6248 | void intel_disable_gt_powersave(struct drm_device *dev) | |
6249 | { | |
6250 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6251 | ||
6252 | if (IS_IRONLAKE_M(dev)) { | |
6253 | ironlake_disable_drps(dev); | |
6254 | } else if (INTEL_INFO(dev)->gen >= 6) { | |
6255 | intel_suspend_gt_powersave(dev); | |
6256 | ||
6257 | mutex_lock(&dev_priv->rps.hw_lock); | |
6258 | if (INTEL_INFO(dev)->gen >= 9) | |
6259 | gen9_disable_rps(dev); | |
6260 | else if (IS_CHERRYVIEW(dev)) | |
6261 | cherryview_disable_rps(dev); | |
6262 | else if (IS_VALLEYVIEW(dev)) | |
6263 | valleyview_disable_rps(dev); | |
6264 | else | |
6265 | gen6_disable_rps(dev); | |
6266 | ||
6267 | dev_priv->rps.enabled = false; | |
6268 | mutex_unlock(&dev_priv->rps.hw_lock); | |
6269 | } | |
6270 | } | |
6271 | ||
6272 | static void intel_gen6_powersave_work(struct work_struct *work) | |
6273 | { | |
6274 | struct drm_i915_private *dev_priv = | |
6275 | container_of(work, struct drm_i915_private, | |
6276 | rps.delayed_resume_work.work); | |
6277 | struct drm_device *dev = dev_priv->dev; | |
6278 | ||
6279 | mutex_lock(&dev_priv->rps.hw_lock); | |
6280 | ||
6281 | gen6_reset_rps_interrupts(dev); | |
6282 | ||
6283 | if (IS_CHERRYVIEW(dev)) { | |
6284 | cherryview_enable_rps(dev); | |
6285 | } else if (IS_VALLEYVIEW(dev)) { | |
6286 | valleyview_enable_rps(dev); | |
6287 | } else if (INTEL_INFO(dev)->gen >= 9) { | |
6288 | gen9_enable_rc6(dev); | |
6289 | gen9_enable_rps(dev); | |
6290 | if (IS_SKYLAKE(dev)) | |
6291 | __gen6_update_ring_freq(dev); | |
6292 | } else if (IS_BROADWELL(dev)) { | |
6293 | gen8_enable_rps(dev); | |
6294 | __gen6_update_ring_freq(dev); | |
6295 | } else { | |
6296 | gen6_enable_rps(dev); | |
6297 | __gen6_update_ring_freq(dev); | |
6298 | } | |
6299 | ||
6300 | WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); | |
6301 | WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq); | |
6302 | ||
6303 | WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); | |
6304 | WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); | |
6305 | ||
6306 | dev_priv->rps.enabled = true; | |
6307 | ||
6308 | gen6_enable_rps_interrupts(dev); | |
6309 | ||
6310 | mutex_unlock(&dev_priv->rps.hw_lock); | |
6311 | ||
6312 | intel_runtime_pm_put(dev_priv); | |
6313 | } | |
6314 | ||
6315 | void intel_enable_gt_powersave(struct drm_device *dev) | |
6316 | { | |
6317 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6318 | ||
6319 | /* Powersaving is controlled by the host when inside a VM */ | |
6320 | if (intel_vgpu_active(dev)) | |
6321 | return; | |
6322 | ||
6323 | if (IS_IRONLAKE_M(dev)) { | |
6324 | mutex_lock(&dev->struct_mutex); | |
6325 | ironlake_enable_drps(dev); | |
6326 | intel_init_emon(dev); | |
6327 | mutex_unlock(&dev->struct_mutex); | |
6328 | } else if (INTEL_INFO(dev)->gen >= 6) { | |
6329 | /* | |
6330 | * PCU communication is slow and this doesn't need to be | |
6331 | * done at any specific time, so do this out of our fast path | |
6332 | * to make resume and init faster. | |
6333 | * | |
6334 | * We depend on the HW RC6 power context save/restore | |
6335 | * mechanism when entering D3 through runtime PM suspend. So | |
6336 | * disable RPM until RPS/RC6 is properly setup. We can only | |
6337 | * get here via the driver load/system resume/runtime resume | |
6338 | * paths, so the _noresume version is enough (and in case of | |
6339 | * runtime resume it's necessary). | |
6340 | */ | |
6341 | if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work, | |
6342 | round_jiffies_up_relative(HZ))) | |
6343 | intel_runtime_pm_get_noresume(dev_priv); | |
6344 | } | |
6345 | } | |
6346 | ||
6347 | void intel_reset_gt_powersave(struct drm_device *dev) | |
6348 | { | |
6349 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6350 | ||
6351 | if (INTEL_INFO(dev)->gen < 6) | |
6352 | return; | |
6353 | ||
6354 | gen6_suspend_rps(dev); | |
6355 | dev_priv->rps.enabled = false; | |
6356 | } | |
6357 | ||
6358 | static void ibx_init_clock_gating(struct drm_device *dev) | |
6359 | { | |
6360 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6361 | ||
6362 | /* | |
6363 | * On Ibex Peak and Cougar Point, we need to disable clock | |
6364 | * gating for the panel power sequencer or it will fail to | |
6365 | * start up when no ports are active. | |
6366 | */ | |
6367 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | |
6368 | } | |
6369 | ||
6370 | static void g4x_disable_trickle_feed(struct drm_device *dev) | |
6371 | { | |
6372 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6373 | enum pipe pipe; | |
6374 | ||
6375 | for_each_pipe(dev_priv, pipe) { | |
6376 | I915_WRITE(DSPCNTR(pipe), | |
6377 | I915_READ(DSPCNTR(pipe)) | | |
6378 | DISPPLANE_TRICKLE_FEED_DISABLE); | |
6379 | ||
6380 | I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); | |
6381 | POSTING_READ(DSPSURF(pipe)); | |
6382 | } | |
6383 | } | |
6384 | ||
6385 | static void ilk_init_lp_watermarks(struct drm_device *dev) | |
6386 | { | |
6387 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6388 | ||
6389 | I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); | |
6390 | I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); | |
6391 | I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); | |
6392 | ||
6393 | /* | |
6394 | * Don't touch WM1S_LP_EN here. | |
6395 | * Doing so could cause underruns. | |
6396 | */ | |
6397 | } | |
6398 | ||
6399 | static void ironlake_init_clock_gating(struct drm_device *dev) | |
6400 | { | |
6401 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6402 | uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; | |
6403 | ||
6404 | /* | |
6405 | * Required for FBC | |
6406 | * WaFbcDisableDpfcClockGating:ilk | |
6407 | */ | |
6408 | dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | | |
6409 | ILK_DPFCUNIT_CLOCK_GATE_DISABLE | | |
6410 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; | |
6411 | ||
6412 | I915_WRITE(PCH_3DCGDIS0, | |
6413 | MARIUNIT_CLOCK_GATE_DISABLE | | |
6414 | SVSMUNIT_CLOCK_GATE_DISABLE); | |
6415 | I915_WRITE(PCH_3DCGDIS1, | |
6416 | VFMUNIT_CLOCK_GATE_DISABLE); | |
6417 | ||
6418 | /* | |
6419 | * According to the spec the following bits should be set in | |
6420 | * order to enable memory self-refresh | |
6421 | * The bit 22/21 of 0x42004 | |
6422 | * The bit 5 of 0x42020 | |
6423 | * The bit 15 of 0x45000 | |
6424 | */ | |
6425 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
6426 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6427 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | |
6428 | dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; | |
6429 | I915_WRITE(DISP_ARB_CTL, | |
6430 | (I915_READ(DISP_ARB_CTL) | | |
6431 | DISP_FBC_WM_DIS)); | |
6432 | ||
6433 | ilk_init_lp_watermarks(dev); | |
6434 | ||
6435 | /* | |
6436 | * Based on the document from hardware guys the following bits | |
6437 | * should be set unconditionally in order to enable FBC. | |
6438 | * The bit 22 of 0x42000 | |
6439 | * The bit 22 of 0x42004 | |
6440 | * The bit 7,8,9 of 0x42020. | |
6441 | */ | |
6442 | if (IS_IRONLAKE_M(dev)) { | |
6443 | /* WaFbcAsynchFlipDisableFbcQueue:ilk */ | |
6444 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | |
6445 | I915_READ(ILK_DISPLAY_CHICKEN1) | | |
6446 | ILK_FBCQ_DIS); | |
6447 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
6448 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6449 | ILK_DPARB_GATE); | |
6450 | } | |
6451 | ||
6452 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); | |
6453 | ||
6454 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
6455 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6456 | ILK_ELPIN_409_SELECT); | |
6457 | I915_WRITE(_3D_CHICKEN2, | |
6458 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | |
6459 | _3D_CHICKEN2_WM_READ_PIPELINED); | |
6460 | ||
6461 | /* WaDisableRenderCachePipelinedFlush:ilk */ | |
6462 | I915_WRITE(CACHE_MODE_0, | |
6463 | _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); | |
6464 | ||
6465 | /* WaDisable_RenderCache_OperationalFlush:ilk */ | |
6466 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
6467 | ||
6468 | g4x_disable_trickle_feed(dev); | |
6469 | ||
6470 | ibx_init_clock_gating(dev); | |
6471 | } | |
6472 | ||
6473 | static void cpt_init_clock_gating(struct drm_device *dev) | |
6474 | { | |
6475 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6476 | int pipe; | |
6477 | uint32_t val; | |
6478 | ||
6479 | /* | |
6480 | * On Ibex Peak and Cougar Point, we need to disable clock | |
6481 | * gating for the panel power sequencer or it will fail to | |
6482 | * start up when no ports are active. | |
6483 | */ | |
6484 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | | |
6485 | PCH_DPLUNIT_CLOCK_GATE_DISABLE | | |
6486 | PCH_CPUNIT_CLOCK_GATE_DISABLE); | |
6487 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | |
6488 | DPLS_EDP_PPS_FIX_DIS); | |
6489 | /* The below fixes the weird display corruption, a few pixels shifted | |
6490 | * downward, on (only) LVDS of some HP laptops with IVY. | |
6491 | */ | |
6492 | for_each_pipe(dev_priv, pipe) { | |
6493 | val = I915_READ(TRANS_CHICKEN2(pipe)); | |
6494 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; | |
6495 | val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; | |
6496 | if (dev_priv->vbt.fdi_rx_polarity_inverted) | |
6497 | val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; | |
6498 | val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; | |
6499 | val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; | |
6500 | val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; | |
6501 | I915_WRITE(TRANS_CHICKEN2(pipe), val); | |
6502 | } | |
6503 | /* WADP0ClockGatingDisable */ | |
6504 | for_each_pipe(dev_priv, pipe) { | |
6505 | I915_WRITE(TRANS_CHICKEN1(pipe), | |
6506 | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); | |
6507 | } | |
6508 | } | |
6509 | ||
6510 | static void gen6_check_mch_setup(struct drm_device *dev) | |
6511 | { | |
6512 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6513 | uint32_t tmp; | |
6514 | ||
6515 | tmp = I915_READ(MCH_SSKPD); | |
6516 | if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) | |
6517 | DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", | |
6518 | tmp); | |
6519 | } | |
6520 | ||
6521 | static void gen6_init_clock_gating(struct drm_device *dev) | |
6522 | { | |
6523 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6524 | uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; | |
6525 | ||
6526 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); | |
6527 | ||
6528 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
6529 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6530 | ILK_ELPIN_409_SELECT); | |
6531 | ||
6532 | /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ | |
6533 | I915_WRITE(_3D_CHICKEN, | |
6534 | _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); | |
6535 | ||
6536 | /* WaDisable_RenderCache_OperationalFlush:snb */ | |
6537 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
6538 | ||
6539 | /* | |
6540 | * BSpec recoomends 8x4 when MSAA is used, | |
6541 | * however in practice 16x4 seems fastest. | |
6542 | * | |
6543 | * Note that PS/WM thread counts depend on the WIZ hashing | |
6544 | * disable bit, which we don't touch here, but it's good | |
6545 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |
6546 | */ | |
6547 | I915_WRITE(GEN6_GT_MODE, | |
6548 | _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); | |
6549 | ||
6550 | ilk_init_lp_watermarks(dev); | |
6551 | ||
6552 | I915_WRITE(CACHE_MODE_0, | |
6553 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | |
6554 | ||
6555 | I915_WRITE(GEN6_UCGCTL1, | |
6556 | I915_READ(GEN6_UCGCTL1) | | |
6557 | GEN6_BLBUNIT_CLOCK_GATE_DISABLE | | |
6558 | GEN6_CSUNIT_CLOCK_GATE_DISABLE); | |
6559 | ||
6560 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock | |
6561 | * gating disable must be set. Failure to set it results in | |
6562 | * flickering pixels due to Z write ordering failures after | |
6563 | * some amount of runtime in the Mesa "fire" demo, and Unigine | |
6564 | * Sanctuary and Tropics, and apparently anything else with | |
6565 | * alpha test or pixel discard. | |
6566 | * | |
6567 | * According to the spec, bit 11 (RCCUNIT) must also be set, | |
6568 | * but we didn't debug actual testcases to find it out. | |
6569 | * | |
6570 | * WaDisableRCCUnitClockGating:snb | |
6571 | * WaDisableRCPBUnitClockGating:snb | |
6572 | */ | |
6573 | I915_WRITE(GEN6_UCGCTL2, | |
6574 | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | | |
6575 | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); | |
6576 | ||
6577 | /* WaStripsFansDisableFastClipPerformanceFix:snb */ | |
6578 | I915_WRITE(_3D_CHICKEN3, | |
6579 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); | |
6580 | ||
6581 | /* | |
6582 | * Bspec says: | |
6583 | * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and | |
6584 | * 3DSTATE_SF number of SF output attributes is more than 16." | |
6585 | */ | |
6586 | I915_WRITE(_3D_CHICKEN3, | |
6587 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); | |
6588 | ||
6589 | /* | |
6590 | * According to the spec the following bits should be | |
6591 | * set in order to enable memory self-refresh and fbc: | |
6592 | * The bit21 and bit22 of 0x42000 | |
6593 | * The bit21 and bit22 of 0x42004 | |
6594 | * The bit5 and bit7 of 0x42020 | |
6595 | * The bit14 of 0x70180 | |
6596 | * The bit14 of 0x71180 | |
6597 | * | |
6598 | * WaFbcAsynchFlipDisableFbcQueue:snb | |
6599 | */ | |
6600 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | |
6601 | I915_READ(ILK_DISPLAY_CHICKEN1) | | |
6602 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); | |
6603 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
6604 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
6605 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); | |
6606 | I915_WRITE(ILK_DSPCLK_GATE_D, | |
6607 | I915_READ(ILK_DSPCLK_GATE_D) | | |
6608 | ILK_DPARBUNIT_CLOCK_GATE_ENABLE | | |
6609 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE); | |
6610 | ||
6611 | g4x_disable_trickle_feed(dev); | |
6612 | ||
6613 | cpt_init_clock_gating(dev); | |
6614 | ||
6615 | gen6_check_mch_setup(dev); | |
6616 | } | |
6617 | ||
6618 | static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) | |
6619 | { | |
6620 | uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); | |
6621 | ||
6622 | /* | |
6623 | * WaVSThreadDispatchOverride:ivb,vlv | |
6624 | * | |
6625 | * This actually overrides the dispatch | |
6626 | * mode for all thread types. | |
6627 | */ | |
6628 | reg &= ~GEN7_FF_SCHED_MASK; | |
6629 | reg |= GEN7_FF_TS_SCHED_HW; | |
6630 | reg |= GEN7_FF_VS_SCHED_HW; | |
6631 | reg |= GEN7_FF_DS_SCHED_HW; | |
6632 | ||
6633 | I915_WRITE(GEN7_FF_THREAD_MODE, reg); | |
6634 | } | |
6635 | ||
6636 | static void lpt_init_clock_gating(struct drm_device *dev) | |
6637 | { | |
6638 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6639 | ||
6640 | /* | |
6641 | * TODO: this bit should only be enabled when really needed, then | |
6642 | * disabled when not needed anymore in order to save power. | |
6643 | */ | |
6644 | if (HAS_PCH_LPT_LP(dev)) | |
6645 | I915_WRITE(SOUTH_DSPCLK_GATE_D, | |
6646 | I915_READ(SOUTH_DSPCLK_GATE_D) | | |
6647 | PCH_LP_PARTITION_LEVEL_DISABLE); | |
6648 | ||
6649 | /* WADPOClockGatingDisable:hsw */ | |
6650 | I915_WRITE(TRANS_CHICKEN1(PIPE_A), | |
6651 | I915_READ(TRANS_CHICKEN1(PIPE_A)) | | |
6652 | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); | |
6653 | } | |
6654 | ||
6655 | static void lpt_suspend_hw(struct drm_device *dev) | |
6656 | { | |
6657 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6658 | ||
6659 | if (HAS_PCH_LPT_LP(dev)) { | |
6660 | uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); | |
6661 | ||
6662 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; | |
6663 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); | |
6664 | } | |
6665 | } | |
6666 | ||
6667 | static void broadwell_init_clock_gating(struct drm_device *dev) | |
6668 | { | |
6669 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6670 | enum pipe pipe; | |
6671 | uint32_t misccpctl; | |
6672 | ||
6673 | ilk_init_lp_watermarks(dev); | |
6674 | ||
6675 | /* WaSwitchSolVfFArbitrationPriority:bdw */ | |
6676 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); | |
6677 | ||
6678 | /* WaPsrDPAMaskVBlankInSRD:bdw */ | |
6679 | I915_WRITE(CHICKEN_PAR1_1, | |
6680 | I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); | |
6681 | ||
6682 | /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ | |
6683 | for_each_pipe(dev_priv, pipe) { | |
6684 | I915_WRITE(CHICKEN_PIPESL_1(pipe), | |
6685 | I915_READ(CHICKEN_PIPESL_1(pipe)) | | |
6686 | BDW_DPRS_MASK_VBLANK_SRD); | |
6687 | } | |
6688 | ||
6689 | /* WaVSRefCountFullforceMissDisable:bdw */ | |
6690 | /* WaDSRefCountFullforceMissDisable:bdw */ | |
6691 | I915_WRITE(GEN7_FF_THREAD_MODE, | |
6692 | I915_READ(GEN7_FF_THREAD_MODE) & | |
6693 | ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); | |
6694 | ||
6695 | I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, | |
6696 | _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); | |
6697 | ||
6698 | /* WaDisableSDEUnitClockGating:bdw */ | |
6699 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | |
6700 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | |
6701 | ||
6702 | /* | |
6703 | * WaProgramL3SqcReg1Default:bdw | |
6704 | * WaTempDisableDOPClkGating:bdw | |
6705 | */ | |
6706 | misccpctl = I915_READ(GEN7_MISCCPCTL); | |
6707 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
6708 | I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT); | |
6709 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | |
6710 | ||
6711 | /* | |
6712 | * WaGttCachingOffByDefault:bdw | |
6713 | * GTT cache may not work with big pages, so if those | |
6714 | * are ever enabled GTT cache may need to be disabled. | |
6715 | */ | |
6716 | I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); | |
6717 | ||
6718 | lpt_init_clock_gating(dev); | |
6719 | } | |
6720 | ||
6721 | static void haswell_init_clock_gating(struct drm_device *dev) | |
6722 | { | |
6723 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6724 | ||
6725 | ilk_init_lp_watermarks(dev); | |
6726 | ||
6727 | /* L3 caching of data atomics doesn't work -- disable it. */ | |
6728 | I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); | |
6729 | I915_WRITE(HSW_ROW_CHICKEN3, | |
6730 | _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); | |
6731 | ||
6732 | /* This is required by WaCatErrorRejectionIssue:hsw */ | |
6733 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, | |
6734 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | |
6735 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); | |
6736 | ||
6737 | /* WaVSRefCountFullforceMissDisable:hsw */ | |
6738 | I915_WRITE(GEN7_FF_THREAD_MODE, | |
6739 | I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); | |
6740 | ||
6741 | /* WaDisable_RenderCache_OperationalFlush:hsw */ | |
6742 | I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
6743 | ||
6744 | /* enable HiZ Raw Stall Optimization */ | |
6745 | I915_WRITE(CACHE_MODE_0_GEN7, | |
6746 | _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); | |
6747 | ||
6748 | /* WaDisable4x2SubspanOptimization:hsw */ | |
6749 | I915_WRITE(CACHE_MODE_1, | |
6750 | _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | |
6751 | ||
6752 | /* | |
6753 | * BSpec recommends 8x4 when MSAA is used, | |
6754 | * however in practice 16x4 seems fastest. | |
6755 | * | |
6756 | * Note that PS/WM thread counts depend on the WIZ hashing | |
6757 | * disable bit, which we don't touch here, but it's good | |
6758 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |
6759 | */ | |
6760 | I915_WRITE(GEN7_GT_MODE, | |
6761 | _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); | |
6762 | ||
6763 | /* WaSampleCChickenBitEnable:hsw */ | |
6764 | I915_WRITE(HALF_SLICE_CHICKEN3, | |
6765 | _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); | |
6766 | ||
6767 | /* WaSwitchSolVfFArbitrationPriority:hsw */ | |
6768 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); | |
6769 | ||
6770 | /* WaRsPkgCStateDisplayPMReq:hsw */ | |
6771 | I915_WRITE(CHICKEN_PAR1_1, | |
6772 | I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); | |
6773 | ||
6774 | lpt_init_clock_gating(dev); | |
6775 | } | |
6776 | ||
6777 | static void ivybridge_init_clock_gating(struct drm_device *dev) | |
6778 | { | |
6779 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6780 | uint32_t snpcr; | |
6781 | ||
6782 | ilk_init_lp_watermarks(dev); | |
6783 | ||
6784 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); | |
6785 | ||
6786 | /* WaDisableEarlyCull:ivb */ | |
6787 | I915_WRITE(_3D_CHICKEN3, | |
6788 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | |
6789 | ||
6790 | /* WaDisableBackToBackFlipFix:ivb */ | |
6791 | I915_WRITE(IVB_CHICKEN3, | |
6792 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | |
6793 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | |
6794 | ||
6795 | /* WaDisablePSDDualDispatchEnable:ivb */ | |
6796 | if (IS_IVB_GT1(dev)) | |
6797 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, | |
6798 | _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | |
6799 | ||
6800 | /* WaDisable_RenderCache_OperationalFlush:ivb */ | |
6801 | I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
6802 | ||
6803 | /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ | |
6804 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, | |
6805 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | |
6806 | ||
6807 | /* WaApplyL3ControlAndL3ChickenMode:ivb */ | |
6808 | I915_WRITE(GEN7_L3CNTLREG1, | |
6809 | GEN7_WA_FOR_GEN7_L3_CONTROL); | |
6810 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, | |
6811 | GEN7_WA_L3_CHICKEN_MODE); | |
6812 | if (IS_IVB_GT1(dev)) | |
6813 | I915_WRITE(GEN7_ROW_CHICKEN2, | |
6814 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | |
6815 | else { | |
6816 | /* must write both registers */ | |
6817 | I915_WRITE(GEN7_ROW_CHICKEN2, | |
6818 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | |
6819 | I915_WRITE(GEN7_ROW_CHICKEN2_GT2, | |
6820 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | |
6821 | } | |
6822 | ||
6823 | /* WaForceL3Serialization:ivb */ | |
6824 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & | |
6825 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | |
6826 | ||
6827 | /* | |
6828 | * According to the spec, bit 13 (RCZUNIT) must be set on IVB. | |
6829 | * This implements the WaDisableRCZUnitClockGating:ivb workaround. | |
6830 | */ | |
6831 | I915_WRITE(GEN6_UCGCTL2, | |
6832 | GEN6_RCZUNIT_CLOCK_GATE_DISABLE); | |
6833 | ||
6834 | /* This is required by WaCatErrorRejectionIssue:ivb */ | |
6835 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, | |
6836 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | |
6837 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); | |
6838 | ||
6839 | g4x_disable_trickle_feed(dev); | |
6840 | ||
6841 | gen7_setup_fixed_func_scheduler(dev_priv); | |
6842 | ||
6843 | if (0) { /* causes HiZ corruption on ivb:gt1 */ | |
6844 | /* enable HiZ Raw Stall Optimization */ | |
6845 | I915_WRITE(CACHE_MODE_0_GEN7, | |
6846 | _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); | |
6847 | } | |
6848 | ||
6849 | /* WaDisable4x2SubspanOptimization:ivb */ | |
6850 | I915_WRITE(CACHE_MODE_1, | |
6851 | _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | |
6852 | ||
6853 | /* | |
6854 | * BSpec recommends 8x4 when MSAA is used, | |
6855 | * however in practice 16x4 seems fastest. | |
6856 | * | |
6857 | * Note that PS/WM thread counts depend on the WIZ hashing | |
6858 | * disable bit, which we don't touch here, but it's good | |
6859 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |
6860 | */ | |
6861 | I915_WRITE(GEN7_GT_MODE, | |
6862 | _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); | |
6863 | ||
6864 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | |
6865 | snpcr &= ~GEN6_MBC_SNPCR_MASK; | |
6866 | snpcr |= GEN6_MBC_SNPCR_MED; | |
6867 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | |
6868 | ||
6869 | if (!HAS_PCH_NOP(dev)) | |
6870 | cpt_init_clock_gating(dev); | |
6871 | ||
6872 | gen6_check_mch_setup(dev); | |
6873 | } | |
6874 | ||
6875 | static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) | |
6876 | { | |
6877 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | |
6878 | ||
6879 | /* | |
6880 | * Disable trickle feed and enable pnd deadline calculation | |
6881 | */ | |
6882 | I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); | |
6883 | I915_WRITE(CBR1_VLV, 0); | |
6884 | } | |
6885 | ||
6886 | static void valleyview_init_clock_gating(struct drm_device *dev) | |
6887 | { | |
6888 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6889 | ||
6890 | vlv_init_display_clock_gating(dev_priv); | |
6891 | ||
6892 | /* WaDisableEarlyCull:vlv */ | |
6893 | I915_WRITE(_3D_CHICKEN3, | |
6894 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | |
6895 | ||
6896 | /* WaDisableBackToBackFlipFix:vlv */ | |
6897 | I915_WRITE(IVB_CHICKEN3, | |
6898 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | |
6899 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | |
6900 | ||
6901 | /* WaPsdDispatchEnable:vlv */ | |
6902 | /* WaDisablePSDDualDispatchEnable:vlv */ | |
6903 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, | |
6904 | _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | | |
6905 | GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | |
6906 | ||
6907 | /* WaDisable_RenderCache_OperationalFlush:vlv */ | |
6908 | I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
6909 | ||
6910 | /* WaForceL3Serialization:vlv */ | |
6911 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & | |
6912 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | |
6913 | ||
6914 | /* WaDisableDopClockGating:vlv */ | |
6915 | I915_WRITE(GEN7_ROW_CHICKEN2, | |
6916 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | |
6917 | ||
6918 | /* This is required by WaCatErrorRejectionIssue:vlv */ | |
6919 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, | |
6920 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | |
6921 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); | |
6922 | ||
6923 | gen7_setup_fixed_func_scheduler(dev_priv); | |
6924 | ||
6925 | /* | |
6926 | * According to the spec, bit 13 (RCZUNIT) must be set on IVB. | |
6927 | * This implements the WaDisableRCZUnitClockGating:vlv workaround. | |
6928 | */ | |
6929 | I915_WRITE(GEN6_UCGCTL2, | |
6930 | GEN6_RCZUNIT_CLOCK_GATE_DISABLE); | |
6931 | ||
6932 | /* WaDisableL3Bank2xClockGate:vlv | |
6933 | * Disabling L3 clock gating- MMIO 940c[25] = 1 | |
6934 | * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ | |
6935 | I915_WRITE(GEN7_UCGCTL4, | |
6936 | I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); | |
6937 | ||
6938 | /* | |
6939 | * BSpec says this must be set, even though | |
6940 | * WaDisable4x2SubspanOptimization isn't listed for VLV. | |
6941 | */ | |
6942 | I915_WRITE(CACHE_MODE_1, | |
6943 | _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); | |
6944 | ||
6945 | /* | |
6946 | * BSpec recommends 8x4 when MSAA is used, | |
6947 | * however in practice 16x4 seems fastest. | |
6948 | * | |
6949 | * Note that PS/WM thread counts depend on the WIZ hashing | |
6950 | * disable bit, which we don't touch here, but it's good | |
6951 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |
6952 | */ | |
6953 | I915_WRITE(GEN7_GT_MODE, | |
6954 | _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); | |
6955 | ||
6956 | /* | |
6957 | * WaIncreaseL3CreditsForVLVB0:vlv | |
6958 | * This is the hardware default actually. | |
6959 | */ | |
6960 | I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); | |
6961 | ||
6962 | /* | |
6963 | * WaDisableVLVClockGating_VBIIssue:vlv | |
6964 | * Disable clock gating on th GCFG unit to prevent a delay | |
6965 | * in the reporting of vblank events. | |
6966 | */ | |
6967 | I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); | |
6968 | } | |
6969 | ||
6970 | static void cherryview_init_clock_gating(struct drm_device *dev) | |
6971 | { | |
6972 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6973 | ||
6974 | vlv_init_display_clock_gating(dev_priv); | |
6975 | ||
6976 | /* WaVSRefCountFullforceMissDisable:chv */ | |
6977 | /* WaDSRefCountFullforceMissDisable:chv */ | |
6978 | I915_WRITE(GEN7_FF_THREAD_MODE, | |
6979 | I915_READ(GEN7_FF_THREAD_MODE) & | |
6980 | ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); | |
6981 | ||
6982 | /* WaDisableSemaphoreAndSyncFlipWait:chv */ | |
6983 | I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, | |
6984 | _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); | |
6985 | ||
6986 | /* WaDisableCSUnitClockGating:chv */ | |
6987 | I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | | |
6988 | GEN6_CSUNIT_CLOCK_GATE_DISABLE); | |
6989 | ||
6990 | /* WaDisableSDEUnitClockGating:chv */ | |
6991 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | |
6992 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | |
6993 | ||
6994 | /* | |
6995 | * GTT cache may not work with big pages, so if those | |
6996 | * are ever enabled GTT cache may need to be disabled. | |
6997 | */ | |
6998 | I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); | |
6999 | } | |
7000 | ||
7001 | static void g4x_init_clock_gating(struct drm_device *dev) | |
7002 | { | |
7003 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7004 | uint32_t dspclk_gate; | |
7005 | ||
7006 | I915_WRITE(RENCLK_GATE_D1, 0); | |
7007 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | |
7008 | GS_UNIT_CLOCK_GATE_DISABLE | | |
7009 | CL_UNIT_CLOCK_GATE_DISABLE); | |
7010 | I915_WRITE(RAMCLK_GATE_D, 0); | |
7011 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | | |
7012 | OVRUNIT_CLOCK_GATE_DISABLE | | |
7013 | OVCUNIT_CLOCK_GATE_DISABLE; | |
7014 | if (IS_GM45(dev)) | |
7015 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | |
7016 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | |
7017 | ||
7018 | /* WaDisableRenderCachePipelinedFlush */ | |
7019 | I915_WRITE(CACHE_MODE_0, | |
7020 | _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); | |
7021 | ||
7022 | /* WaDisable_RenderCache_OperationalFlush:g4x */ | |
7023 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
7024 | ||
7025 | g4x_disable_trickle_feed(dev); | |
7026 | } | |
7027 | ||
7028 | static void crestline_init_clock_gating(struct drm_device *dev) | |
7029 | { | |
7030 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7031 | ||
7032 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); | |
7033 | I915_WRITE(RENCLK_GATE_D2, 0); | |
7034 | I915_WRITE(DSPCLK_GATE_D, 0); | |
7035 | I915_WRITE(RAMCLK_GATE_D, 0); | |
7036 | I915_WRITE16(DEUC, 0); | |
7037 | I915_WRITE(MI_ARB_STATE, | |
7038 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); | |
7039 | ||
7040 | /* WaDisable_RenderCache_OperationalFlush:gen4 */ | |
7041 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
7042 | } | |
7043 | ||
7044 | static void broadwater_init_clock_gating(struct drm_device *dev) | |
7045 | { | |
7046 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7047 | ||
7048 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | | |
7049 | I965_RCC_CLOCK_GATE_DISABLE | | |
7050 | I965_RCPB_CLOCK_GATE_DISABLE | | |
7051 | I965_ISC_CLOCK_GATE_DISABLE | | |
7052 | I965_FBC_CLOCK_GATE_DISABLE); | |
7053 | I915_WRITE(RENCLK_GATE_D2, 0); | |
7054 | I915_WRITE(MI_ARB_STATE, | |
7055 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); | |
7056 | ||
7057 | /* WaDisable_RenderCache_OperationalFlush:gen4 */ | |
7058 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); | |
7059 | } | |
7060 | ||
7061 | static void gen3_init_clock_gating(struct drm_device *dev) | |
7062 | { | |
7063 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7064 | u32 dstate = I915_READ(D_STATE); | |
7065 | ||
7066 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | |
7067 | DSTATE_DOT_CLOCK_GATING; | |
7068 | I915_WRITE(D_STATE, dstate); | |
7069 | ||
7070 | if (IS_PINEVIEW(dev)) | |
7071 | I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); | |
7072 | ||
7073 | /* IIR "flip pending" means done if this bit is set */ | |
7074 | I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); | |
7075 | ||
7076 | /* interrupts should cause a wake up from C3 */ | |
7077 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); | |
7078 | ||
7079 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | |
7080 | I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); | |
7081 | ||
7082 | I915_WRITE(MI_ARB_STATE, | |
7083 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); | |
7084 | } | |
7085 | ||
7086 | static void i85x_init_clock_gating(struct drm_device *dev) | |
7087 | { | |
7088 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7089 | ||
7090 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); | |
7091 | ||
7092 | /* interrupts should cause a wake up from C3 */ | |
7093 | I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | | |
7094 | _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); | |
7095 | ||
7096 | I915_WRITE(MEM_MODE, | |
7097 | _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); | |
7098 | } | |
7099 | ||
7100 | static void i830_init_clock_gating(struct drm_device *dev) | |
7101 | { | |
7102 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7103 | ||
7104 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | |
7105 | ||
7106 | I915_WRITE(MEM_MODE, | |
7107 | _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | | |
7108 | _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); | |
7109 | } | |
7110 | ||
7111 | void intel_init_clock_gating(struct drm_device *dev) | |
7112 | { | |
7113 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7114 | ||
7115 | if (dev_priv->display.init_clock_gating) | |
7116 | dev_priv->display.init_clock_gating(dev); | |
7117 | } | |
7118 | ||
7119 | void intel_suspend_hw(struct drm_device *dev) | |
7120 | { | |
7121 | if (HAS_PCH_LPT(dev)) | |
7122 | lpt_suspend_hw(dev); | |
7123 | } | |
7124 | ||
7125 | /* Set up chip specific power management-related functions */ | |
7126 | void intel_init_pm(struct drm_device *dev) | |
7127 | { | |
7128 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7129 | ||
7130 | intel_fbc_init(dev_priv); | |
7131 | ||
7132 | /* For cxsr */ | |
7133 | if (IS_PINEVIEW(dev)) | |
7134 | i915_pineview_get_mem_freq(dev); | |
7135 | else if (IS_GEN5(dev)) | |
7136 | i915_ironlake_get_mem_freq(dev); | |
7137 | ||
7138 | /* For FIFO watermark updates */ | |
7139 | if (INTEL_INFO(dev)->gen >= 9) { | |
7140 | skl_setup_wm_latency(dev); | |
7141 | ||
7142 | if (IS_BROXTON(dev)) | |
7143 | dev_priv->display.init_clock_gating = | |
7144 | bxt_init_clock_gating; | |
7145 | else if (IS_SKYLAKE(dev)) | |
7146 | dev_priv->display.init_clock_gating = | |
7147 | skl_init_clock_gating; | |
7148 | dev_priv->display.update_wm = skl_update_wm; | |
7149 | dev_priv->display.update_sprite_wm = skl_update_sprite_wm; | |
7150 | } else if (HAS_PCH_SPLIT(dev)) { | |
7151 | ilk_setup_wm_latency(dev); | |
7152 | ||
7153 | if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && | |
7154 | dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || | |
7155 | (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && | |
7156 | dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { | |
7157 | dev_priv->display.update_wm = ilk_update_wm; | |
7158 | dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; | |
7159 | } else { | |
7160 | DRM_DEBUG_KMS("Failed to read display plane latency. " | |
7161 | "Disable CxSR\n"); | |
7162 | } | |
7163 | ||
7164 | if (IS_GEN5(dev)) | |
7165 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; | |
7166 | else if (IS_GEN6(dev)) | |
7167 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | |
7168 | else if (IS_IVYBRIDGE(dev)) | |
7169 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; | |
7170 | else if (IS_HASWELL(dev)) | |
7171 | dev_priv->display.init_clock_gating = haswell_init_clock_gating; | |
7172 | else if (INTEL_INFO(dev)->gen == 8) | |
7173 | dev_priv->display.init_clock_gating = broadwell_init_clock_gating; | |
7174 | } else if (IS_CHERRYVIEW(dev)) { | |
7175 | vlv_setup_wm_latency(dev); | |
7176 | ||
7177 | dev_priv->display.update_wm = vlv_update_wm; | |
7178 | dev_priv->display.init_clock_gating = | |
7179 | cherryview_init_clock_gating; | |
7180 | } else if (IS_VALLEYVIEW(dev)) { | |
7181 | vlv_setup_wm_latency(dev); | |
7182 | ||
7183 | dev_priv->display.update_wm = vlv_update_wm; | |
7184 | dev_priv->display.init_clock_gating = | |
7185 | valleyview_init_clock_gating; | |
7186 | } else if (IS_PINEVIEW(dev)) { | |
7187 | if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), | |
7188 | dev_priv->is_ddr3, | |
7189 | dev_priv->fsb_freq, | |
7190 | dev_priv->mem_freq)) { | |
7191 | DRM_INFO("failed to find known CxSR latency " | |
7192 | "(found ddr%s fsb freq %d, mem freq %d), " | |
7193 | "disabling CxSR\n", | |
7194 | (dev_priv->is_ddr3 == 1) ? "3" : "2", | |
7195 | dev_priv->fsb_freq, dev_priv->mem_freq); | |
7196 | /* Disable CxSR and never update its watermark again */ | |
7197 | intel_set_memory_cxsr(dev_priv, false); | |
7198 | dev_priv->display.update_wm = NULL; | |
7199 | } else | |
7200 | dev_priv->display.update_wm = pineview_update_wm; | |
7201 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | |
7202 | } else if (IS_G4X(dev)) { | |
7203 | dev_priv->display.update_wm = g4x_update_wm; | |
7204 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; | |
7205 | } else if (IS_GEN4(dev)) { | |
7206 | dev_priv->display.update_wm = i965_update_wm; | |
7207 | if (IS_CRESTLINE(dev)) | |
7208 | dev_priv->display.init_clock_gating = crestline_init_clock_gating; | |
7209 | else if (IS_BROADWATER(dev)) | |
7210 | dev_priv->display.init_clock_gating = broadwater_init_clock_gating; | |
7211 | } else if (IS_GEN3(dev)) { | |
7212 | dev_priv->display.update_wm = i9xx_update_wm; | |
7213 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | |
7214 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | |
7215 | } else if (IS_GEN2(dev)) { | |
7216 | if (INTEL_INFO(dev)->num_pipes == 1) { | |
7217 | dev_priv->display.update_wm = i845_update_wm; | |
7218 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | |
7219 | } else { | |
7220 | dev_priv->display.update_wm = i9xx_update_wm; | |
7221 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | |
7222 | } | |
7223 | ||
7224 | if (IS_I85X(dev) || IS_I865G(dev)) | |
7225 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | |
7226 | else | |
7227 | dev_priv->display.init_clock_gating = i830_init_clock_gating; | |
7228 | } else { | |
7229 | DRM_ERROR("unexpected fall-through in intel_init_pm\n"); | |
7230 | } | |
7231 | } | |
7232 | ||
7233 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) | |
7234 | { | |
7235 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
7236 | ||
7237 | if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { | |
7238 | DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); | |
7239 | return -EAGAIN; | |
7240 | } | |
7241 | ||
7242 | I915_WRITE(GEN6_PCODE_DATA, *val); | |
7243 | I915_WRITE(GEN6_PCODE_DATA1, 0); | |
7244 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); | |
7245 | ||
7246 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | |
7247 | 500)) { | |
7248 | DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); | |
7249 | return -ETIMEDOUT; | |
7250 | } | |
7251 | ||
7252 | *val = I915_READ(GEN6_PCODE_DATA); | |
7253 | I915_WRITE(GEN6_PCODE_DATA, 0); | |
7254 | ||
7255 | return 0; | |
7256 | } | |
7257 | ||
7258 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) | |
7259 | { | |
7260 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
7261 | ||
7262 | if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { | |
7263 | DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); | |
7264 | return -EAGAIN; | |
7265 | } | |
7266 | ||
7267 | I915_WRITE(GEN6_PCODE_DATA, val); | |
7268 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); | |
7269 | ||
7270 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | |
7271 | 500)) { | |
7272 | DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); | |
7273 | return -ETIMEDOUT; | |
7274 | } | |
7275 | ||
7276 | I915_WRITE(GEN6_PCODE_DATA, 0); | |
7277 | ||
7278 | return 0; | |
7279 | } | |
7280 | ||
7281 | static int vlv_gpu_freq_div(unsigned int czclk_freq) | |
7282 | { | |
7283 | switch (czclk_freq) { | |
7284 | case 200: | |
7285 | return 10; | |
7286 | case 267: | |
7287 | return 12; | |
7288 | case 320: | |
7289 | case 333: | |
7290 | return 16; | |
7291 | case 400: | |
7292 | return 20; | |
7293 | default: | |
7294 | return -1; | |
7295 | } | |
7296 | } | |
7297 | ||
7298 | static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) | |
7299 | { | |
7300 | int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); | |
7301 | ||
7302 | div = vlv_gpu_freq_div(czclk_freq); | |
7303 | if (div < 0) | |
7304 | return div; | |
7305 | ||
7306 | return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div); | |
7307 | } | |
7308 | ||
7309 | static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) | |
7310 | { | |
7311 | int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); | |
7312 | ||
7313 | mul = vlv_gpu_freq_div(czclk_freq); | |
7314 | if (mul < 0) | |
7315 | return mul; | |
7316 | ||
7317 | return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6; | |
7318 | } | |
7319 | ||
7320 | static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) | |
7321 | { | |
7322 | int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); | |
7323 | ||
7324 | div = vlv_gpu_freq_div(czclk_freq) / 2; | |
7325 | if (div < 0) | |
7326 | return div; | |
7327 | ||
7328 | return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2; | |
7329 | } | |
7330 | ||
7331 | static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) | |
7332 | { | |
7333 | int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); | |
7334 | ||
7335 | mul = vlv_gpu_freq_div(czclk_freq) / 2; | |
7336 | if (mul < 0) | |
7337 | return mul; | |
7338 | ||
7339 | /* CHV needs even values */ | |
7340 | return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2; | |
7341 | } | |
7342 | ||
7343 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) | |
7344 | { | |
7345 | if (IS_GEN9(dev_priv->dev)) | |
7346 | return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; | |
7347 | else if (IS_CHERRYVIEW(dev_priv->dev)) | |
7348 | return chv_gpu_freq(dev_priv, val); | |
7349 | else if (IS_VALLEYVIEW(dev_priv->dev)) | |
7350 | return byt_gpu_freq(dev_priv, val); | |
7351 | else | |
7352 | return val * GT_FREQUENCY_MULTIPLIER; | |
7353 | } | |
7354 | ||
7355 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) | |
7356 | { | |
7357 | if (IS_GEN9(dev_priv->dev)) | |
7358 | return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; | |
7359 | else if (IS_CHERRYVIEW(dev_priv->dev)) | |
7360 | return chv_freq_opcode(dev_priv, val); | |
7361 | else if (IS_VALLEYVIEW(dev_priv->dev)) | |
7362 | return byt_freq_opcode(dev_priv, val); | |
7363 | else | |
7364 | return val / GT_FREQUENCY_MULTIPLIER; | |
7365 | } | |
7366 | ||
7367 | struct request_boost { | |
7368 | struct work_struct work; | |
7369 | struct drm_i915_gem_request *req; | |
7370 | }; | |
7371 | ||
7372 | static void __intel_rps_boost_work(struct work_struct *work) | |
7373 | { | |
7374 | struct request_boost *boost = container_of(work, struct request_boost, work); | |
7375 | struct drm_i915_gem_request *req = boost->req; | |
7376 | ||
7377 | if (!i915_gem_request_completed(req, true)) | |
7378 | gen6_rps_boost(to_i915(req->ring->dev), NULL, | |
7379 | req->emitted_jiffies); | |
7380 | ||
7381 | i915_gem_request_unreference__unlocked(req); | |
7382 | kfree(boost); | |
7383 | } | |
7384 | ||
7385 | void intel_queue_rps_boost_for_request(struct drm_device *dev, | |
7386 | struct drm_i915_gem_request *req) | |
7387 | { | |
7388 | struct request_boost *boost; | |
7389 | ||
7390 | if (req == NULL || INTEL_INFO(dev)->gen < 6) | |
7391 | return; | |
7392 | ||
7393 | if (i915_gem_request_completed(req, true)) | |
7394 | return; | |
7395 | ||
7396 | boost = kmalloc(sizeof(*boost), GFP_ATOMIC); | |
7397 | if (boost == NULL) | |
7398 | return; | |
7399 | ||
7400 | i915_gem_request_reference(req); | |
7401 | boost->req = req; | |
7402 | ||
7403 | INIT_WORK(&boost->work, __intel_rps_boost_work); | |
7404 | queue_work(to_i915(dev)->wq, &boost->work); | |
7405 | } | |
7406 | ||
7407 | void intel_pm_setup(struct drm_device *dev) | |
7408 | { | |
7409 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7410 | ||
7411 | mutex_init(&dev_priv->rps.hw_lock); | |
7412 | spin_lock_init(&dev_priv->rps.client_lock); | |
7413 | ||
7414 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | |
7415 | intel_gen6_powersave_work); | |
7416 | INIT_LIST_HEAD(&dev_priv->rps.clients); | |
7417 | INIT_LIST_HEAD(&dev_priv->rps.semaphores.link); | |
7418 | INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link); | |
7419 | ||
7420 | dev_priv->pm.suspended = false; | |
7421 | } |