]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/i915/intel_pm.c
drm/i915: Don't re-compute pipe watermarks except for the affected pipe
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / i915 / intel_pm.c
CommitLineData
85208be0
ED
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
2b4e57bd 28#include <linux/cpufreq.h>
85208be0
ED
29#include "i915_drv.h"
30#include "intel_drv.h"
eb48eb00
DV
31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
f4db9321 33#include <drm/i915_powerwell.h>
85208be0 34
f6750b3c
ED
35/* FBC, or Frame Buffer Compression, is a technique employed to compress the
36 * framebuffer contents in-memory, aiming at reducing the required bandwidth
37 * during in-memory transfers and, therefore, reduce the power packet.
85208be0 38 *
f6750b3c
ED
39 * The benefits of FBC are mostly visible with solid backgrounds and
40 * variation-less patterns.
85208be0 41 *
f6750b3c
ED
42 * FBC-related functionality can be enabled by the means of the
43 * i915.i915_enable_fbc parameter
85208be0
ED
44 */
45
1fa61106 46static void i8xx_disable_fbc(struct drm_device *dev)
85208be0
ED
47{
48 struct drm_i915_private *dev_priv = dev->dev_private;
49 u32 fbc_ctl;
50
51 /* Disable compression */
52 fbc_ctl = I915_READ(FBC_CONTROL);
53 if ((fbc_ctl & FBC_CTL_EN) == 0)
54 return;
55
56 fbc_ctl &= ~FBC_CTL_EN;
57 I915_WRITE(FBC_CONTROL, fbc_ctl);
58
59 /* Wait for compressing bit to clear */
60 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
61 DRM_DEBUG_KMS("FBC idle timed out\n");
62 return;
63 }
64
65 DRM_DEBUG_KMS("disabled FBC\n");
66}
67
1fa61106 68static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
85208be0
ED
69{
70 struct drm_device *dev = crtc->dev;
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 struct drm_framebuffer *fb = crtc->fb;
73 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
74 struct drm_i915_gem_object *obj = intel_fb->obj;
75 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
76 int cfb_pitch;
77 int plane, i;
78 u32 fbc_ctl, fbc_ctl2;
79
5c3fe8b0 80 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
85208be0
ED
81 if (fb->pitches[0] < cfb_pitch)
82 cfb_pitch = fb->pitches[0];
83
84 /* FBC_CTL wants 64B units */
85 cfb_pitch = (cfb_pitch / 64) - 1;
86 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
87
88 /* Clear old tags */
89 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
90 I915_WRITE(FBC_TAG + (i * 4), 0);
91
92 /* Set it up... */
93 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
94 fbc_ctl2 |= plane;
95 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
96 I915_WRITE(FBC_FENCE_OFF, crtc->y);
97
98 /* enable it... */
99 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
100 if (IS_I945GM(dev))
101 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
102 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
103 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
104 fbc_ctl |= obj->fence_reg;
105 I915_WRITE(FBC_CONTROL, fbc_ctl);
106
84f44ce7
VS
107 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
108 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
85208be0
ED
109}
110
1fa61106 111static bool i8xx_fbc_enabled(struct drm_device *dev)
85208be0
ED
112{
113 struct drm_i915_private *dev_priv = dev->dev_private;
114
115 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
116}
117
1fa61106 118static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
85208be0
ED
119{
120 struct drm_device *dev = crtc->dev;
121 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct drm_framebuffer *fb = crtc->fb;
123 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
124 struct drm_i915_gem_object *obj = intel_fb->obj;
125 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
126 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
127 unsigned long stall_watermark = 200;
128 u32 dpfc_ctl;
129
130 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
131 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
132 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
133
134 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
135 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
136 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
137 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
138
139 /* enable it... */
140 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
141
84f44ce7 142 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
85208be0
ED
143}
144
1fa61106 145static void g4x_disable_fbc(struct drm_device *dev)
85208be0
ED
146{
147 struct drm_i915_private *dev_priv = dev->dev_private;
148 u32 dpfc_ctl;
149
150 /* Disable compression */
151 dpfc_ctl = I915_READ(DPFC_CONTROL);
152 if (dpfc_ctl & DPFC_CTL_EN) {
153 dpfc_ctl &= ~DPFC_CTL_EN;
154 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
155
156 DRM_DEBUG_KMS("disabled FBC\n");
157 }
158}
159
1fa61106 160static bool g4x_fbc_enabled(struct drm_device *dev)
85208be0
ED
161{
162 struct drm_i915_private *dev_priv = dev->dev_private;
163
164 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
165}
166
167static void sandybridge_blit_fbc_update(struct drm_device *dev)
168{
169 struct drm_i915_private *dev_priv = dev->dev_private;
170 u32 blt_ecoskpd;
171
172 /* Make sure blitter notifies FBC of writes */
173 gen6_gt_force_wake_get(dev_priv);
174 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
175 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
176 GEN6_BLITTER_LOCK_SHIFT;
177 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
178 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
179 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
180 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
181 GEN6_BLITTER_LOCK_SHIFT);
182 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
183 POSTING_READ(GEN6_BLITTER_ECOSKPD);
184 gen6_gt_force_wake_put(dev_priv);
185}
186
1fa61106 187static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
85208be0
ED
188{
189 struct drm_device *dev = crtc->dev;
190 struct drm_i915_private *dev_priv = dev->dev_private;
191 struct drm_framebuffer *fb = crtc->fb;
192 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
193 struct drm_i915_gem_object *obj = intel_fb->obj;
194 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
195 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
196 unsigned long stall_watermark = 200;
197 u32 dpfc_ctl;
198
199 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
200 dpfc_ctl &= DPFC_RESERVED;
201 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
202 /* Set persistent mode for front-buffer rendering, ala X. */
203 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
204 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
205 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
206
207 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
208 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
209 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
210 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
f343c5f6 211 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
85208be0
ED
212 /* enable it... */
213 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
214
215 if (IS_GEN6(dev)) {
216 I915_WRITE(SNB_DPFC_CTL_SA,
217 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
218 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
219 sandybridge_blit_fbc_update(dev);
220 }
221
84f44ce7 222 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
85208be0
ED
223}
224
1fa61106 225static void ironlake_disable_fbc(struct drm_device *dev)
85208be0
ED
226{
227 struct drm_i915_private *dev_priv = dev->dev_private;
228 u32 dpfc_ctl;
229
230 /* Disable compression */
231 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
232 if (dpfc_ctl & DPFC_CTL_EN) {
233 dpfc_ctl &= ~DPFC_CTL_EN;
234 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
235
b74ea102 236 if (IS_IVYBRIDGE(dev))
7dd23ba0 237 /* WaFbcDisableDpfcClockGating:ivb */
b74ea102
RV
238 I915_WRITE(ILK_DSPCLK_GATE_D,
239 I915_READ(ILK_DSPCLK_GATE_D) &
240 ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
241
d89f2071 242 if (IS_HASWELL(dev))
7dd23ba0 243 /* WaFbcDisableDpfcClockGating:hsw */
d89f2071
RV
244 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
245 I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
246 ~HSW_DPFC_GATING_DISABLE);
247
85208be0
ED
248 DRM_DEBUG_KMS("disabled FBC\n");
249 }
250}
251
1fa61106 252static bool ironlake_fbc_enabled(struct drm_device *dev)
85208be0
ED
253{
254 struct drm_i915_private *dev_priv = dev->dev_private;
255
256 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
257}
258
abe959c7
RV
259static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
260{
261 struct drm_device *dev = crtc->dev;
262 struct drm_i915_private *dev_priv = dev->dev_private;
263 struct drm_framebuffer *fb = crtc->fb;
264 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
265 struct drm_i915_gem_object *obj = intel_fb->obj;
266 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
267
f343c5f6 268 I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
abe959c7
RV
269
270 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
271 IVB_DPFC_CTL_FENCE_EN |
272 intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
273
891348b2 274 if (IS_IVYBRIDGE(dev)) {
7dd23ba0 275 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
891348b2 276 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
7dd23ba0 277 /* WaFbcDisableDpfcClockGating:ivb */
891348b2
RV
278 I915_WRITE(ILK_DSPCLK_GATE_D,
279 I915_READ(ILK_DSPCLK_GATE_D) |
280 ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
28554164 281 } else {
7dd23ba0 282 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
28554164
RV
283 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
284 HSW_BYPASS_FBC_QUEUE);
7dd23ba0 285 /* WaFbcDisableDpfcClockGating:hsw */
d89f2071
RV
286 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
287 I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
288 HSW_DPFC_GATING_DISABLE);
891348b2 289 }
b74ea102 290
abe959c7
RV
291 I915_WRITE(SNB_DPFC_CTL_SA,
292 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
293 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
294
295 sandybridge_blit_fbc_update(dev);
296
297 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
298}
299
85208be0
ED
300bool intel_fbc_enabled(struct drm_device *dev)
301{
302 struct drm_i915_private *dev_priv = dev->dev_private;
303
304 if (!dev_priv->display.fbc_enabled)
305 return false;
306
307 return dev_priv->display.fbc_enabled(dev);
308}
309
310static void intel_fbc_work_fn(struct work_struct *__work)
311{
312 struct intel_fbc_work *work =
313 container_of(to_delayed_work(__work),
314 struct intel_fbc_work, work);
315 struct drm_device *dev = work->crtc->dev;
316 struct drm_i915_private *dev_priv = dev->dev_private;
317
318 mutex_lock(&dev->struct_mutex);
5c3fe8b0 319 if (work == dev_priv->fbc.fbc_work) {
85208be0
ED
320 /* Double check that we haven't switched fb without cancelling
321 * the prior work.
322 */
323 if (work->crtc->fb == work->fb) {
324 dev_priv->display.enable_fbc(work->crtc,
325 work->interval);
326
5c3fe8b0
BW
327 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
328 dev_priv->fbc.fb_id = work->crtc->fb->base.id;
329 dev_priv->fbc.y = work->crtc->y;
85208be0
ED
330 }
331
5c3fe8b0 332 dev_priv->fbc.fbc_work = NULL;
85208be0
ED
333 }
334 mutex_unlock(&dev->struct_mutex);
335
336 kfree(work);
337}
338
339static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
340{
5c3fe8b0 341 if (dev_priv->fbc.fbc_work == NULL)
85208be0
ED
342 return;
343
344 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
345
346 /* Synchronisation is provided by struct_mutex and checking of
5c3fe8b0 347 * dev_priv->fbc.fbc_work, so we can perform the cancellation
85208be0
ED
348 * entirely asynchronously.
349 */
5c3fe8b0 350 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
85208be0 351 /* tasklet was killed before being run, clean up */
5c3fe8b0 352 kfree(dev_priv->fbc.fbc_work);
85208be0
ED
353
354 /* Mark the work as no longer wanted so that if it does
355 * wake-up (because the work was already running and waiting
356 * for our mutex), it will discover that is no longer
357 * necessary to run.
358 */
5c3fe8b0 359 dev_priv->fbc.fbc_work = NULL;
85208be0
ED
360}
361
b63fb44c 362static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
85208be0
ED
363{
364 struct intel_fbc_work *work;
365 struct drm_device *dev = crtc->dev;
366 struct drm_i915_private *dev_priv = dev->dev_private;
367
368 if (!dev_priv->display.enable_fbc)
369 return;
370
371 intel_cancel_fbc_work(dev_priv);
372
b14c5679 373 work = kzalloc(sizeof(*work), GFP_KERNEL);
85208be0 374 if (work == NULL) {
6cdcb5e7 375 DRM_ERROR("Failed to allocate FBC work structure\n");
85208be0
ED
376 dev_priv->display.enable_fbc(crtc, interval);
377 return;
378 }
379
380 work->crtc = crtc;
381 work->fb = crtc->fb;
382 work->interval = interval;
383 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
384
5c3fe8b0 385 dev_priv->fbc.fbc_work = work;
85208be0 386
85208be0
ED
387 /* Delay the actual enabling to let pageflipping cease and the
388 * display to settle before starting the compression. Note that
389 * this delay also serves a second purpose: it allows for a
390 * vblank to pass after disabling the FBC before we attempt
391 * to modify the control registers.
392 *
393 * A more complicated solution would involve tracking vblanks
394 * following the termination of the page-flipping sequence
395 * and indeed performing the enable as a co-routine and not
396 * waiting synchronously upon the vblank.
7457d617
DL
397 *
398 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
85208be0
ED
399 */
400 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
401}
402
403void intel_disable_fbc(struct drm_device *dev)
404{
405 struct drm_i915_private *dev_priv = dev->dev_private;
406
407 intel_cancel_fbc_work(dev_priv);
408
409 if (!dev_priv->display.disable_fbc)
410 return;
411
412 dev_priv->display.disable_fbc(dev);
5c3fe8b0 413 dev_priv->fbc.plane = -1;
85208be0
ED
414}
415
29ebf90f
CW
416static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
417 enum no_fbc_reason reason)
418{
419 if (dev_priv->fbc.no_fbc_reason == reason)
420 return false;
421
422 dev_priv->fbc.no_fbc_reason = reason;
423 return true;
424}
425
85208be0
ED
426/**
427 * intel_update_fbc - enable/disable FBC as needed
428 * @dev: the drm_device
429 *
430 * Set up the framebuffer compression hardware at mode set time. We
431 * enable it if possible:
432 * - plane A only (on pre-965)
433 * - no pixel mulitply/line duplication
434 * - no alpha buffer discard
435 * - no dual wide
f85da868 436 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
85208be0
ED
437 *
438 * We can't assume that any compression will take place (worst case),
439 * so the compressed buffer has to be the same size as the uncompressed
440 * one. It also must reside (along with the line length buffer) in
441 * stolen memory.
442 *
443 * We need to enable/disable FBC on a global basis.
444 */
445void intel_update_fbc(struct drm_device *dev)
446{
447 struct drm_i915_private *dev_priv = dev->dev_private;
448 struct drm_crtc *crtc = NULL, *tmp_crtc;
449 struct intel_crtc *intel_crtc;
450 struct drm_framebuffer *fb;
451 struct intel_framebuffer *intel_fb;
452 struct drm_i915_gem_object *obj;
ef644fda 453 const struct drm_display_mode *adjusted_mode;
37327abd 454 unsigned int max_width, max_height;
85208be0 455
29ebf90f
CW
456 if (!I915_HAS_FBC(dev)) {
457 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
85208be0 458 return;
29ebf90f 459 }
85208be0 460
29ebf90f
CW
461 if (!i915_powersave) {
462 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
463 DRM_DEBUG_KMS("fbc disabled per module param\n");
85208be0 464 return;
29ebf90f 465 }
85208be0
ED
466
467 /*
468 * If FBC is already on, we just have to verify that we can
469 * keep it that way...
470 * Need to disable if:
471 * - more than one pipe is active
472 * - changing FBC params (stride, fence, mode)
473 * - new fb is too large to fit in compressed buffer
474 * - going to an unsupported config (interlace, pixel multiply, etc.)
475 */
476 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
3490ea5d 477 if (intel_crtc_active(tmp_crtc) &&
4c445e0e 478 to_intel_crtc(tmp_crtc)->primary_enabled) {
85208be0 479 if (crtc) {
29ebf90f
CW
480 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
481 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
85208be0
ED
482 goto out_disable;
483 }
484 crtc = tmp_crtc;
485 }
486 }
487
488 if (!crtc || crtc->fb == NULL) {
29ebf90f
CW
489 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
490 DRM_DEBUG_KMS("no output, disabling\n");
85208be0
ED
491 goto out_disable;
492 }
493
494 intel_crtc = to_intel_crtc(crtc);
495 fb = crtc->fb;
496 intel_fb = to_intel_framebuffer(fb);
497 obj = intel_fb->obj;
ef644fda 498 adjusted_mode = &intel_crtc->config.adjusted_mode;
85208be0 499
8a5729a3
DL
500 if (i915_enable_fbc < 0 &&
501 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
29ebf90f
CW
502 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
503 DRM_DEBUG_KMS("disabled per chip default\n");
8a5729a3 504 goto out_disable;
85208be0 505 }
8a5729a3 506 if (!i915_enable_fbc) {
29ebf90f
CW
507 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
508 DRM_DEBUG_KMS("fbc disabled per module param\n");
85208be0
ED
509 goto out_disable;
510 }
ef644fda
VS
511 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
512 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
29ebf90f
CW
513 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
514 DRM_DEBUG_KMS("mode incompatible with compression, "
515 "disabling\n");
85208be0
ED
516 goto out_disable;
517 }
f85da868
PZ
518
519 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
37327abd
VS
520 max_width = 4096;
521 max_height = 2048;
f85da868 522 } else {
37327abd
VS
523 max_width = 2048;
524 max_height = 1536;
f85da868 525 }
37327abd
VS
526 if (intel_crtc->config.pipe_src_w > max_width ||
527 intel_crtc->config.pipe_src_h > max_height) {
29ebf90f
CW
528 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
529 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
85208be0
ED
530 goto out_disable;
531 }
891348b2
RV
532 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
533 intel_crtc->plane != 0) {
29ebf90f
CW
534 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
535 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
85208be0
ED
536 goto out_disable;
537 }
538
539 /* The use of a CPU fence is mandatory in order to detect writes
540 * by the CPU to the scanout and trigger updates to the FBC.
541 */
542 if (obj->tiling_mode != I915_TILING_X ||
543 obj->fence_reg == I915_FENCE_REG_NONE) {
29ebf90f
CW
544 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
545 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
85208be0
ED
546 goto out_disable;
547 }
548
549 /* If the kernel debugger is active, always disable compression */
550 if (in_dbg_master())
551 goto out_disable;
552
11be49eb 553 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
29ebf90f
CW
554 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
555 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
11be49eb
CW
556 goto out_disable;
557 }
558
85208be0
ED
559 /* If the scanout has not changed, don't modify the FBC settings.
560 * Note that we make the fundamental assumption that the fb->obj
561 * cannot be unpinned (and have its GTT offset and fence revoked)
562 * without first being decoupled from the scanout and FBC disabled.
563 */
5c3fe8b0
BW
564 if (dev_priv->fbc.plane == intel_crtc->plane &&
565 dev_priv->fbc.fb_id == fb->base.id &&
566 dev_priv->fbc.y == crtc->y)
85208be0
ED
567 return;
568
569 if (intel_fbc_enabled(dev)) {
570 /* We update FBC along two paths, after changing fb/crtc
571 * configuration (modeswitching) and after page-flipping
572 * finishes. For the latter, we know that not only did
573 * we disable the FBC at the start of the page-flip
574 * sequence, but also more than one vblank has passed.
575 *
576 * For the former case of modeswitching, it is possible
577 * to switch between two FBC valid configurations
578 * instantaneously so we do need to disable the FBC
579 * before we can modify its control registers. We also
580 * have to wait for the next vblank for that to take
581 * effect. However, since we delay enabling FBC we can
582 * assume that a vblank has passed since disabling and
583 * that we can safely alter the registers in the deferred
584 * callback.
585 *
586 * In the scenario that we go from a valid to invalid
587 * and then back to valid FBC configuration we have
588 * no strict enforcement that a vblank occurred since
589 * disabling the FBC. However, along all current pipe
590 * disabling paths we do need to wait for a vblank at
591 * some point. And we wait before enabling FBC anyway.
592 */
593 DRM_DEBUG_KMS("disabling active FBC for update\n");
594 intel_disable_fbc(dev);
595 }
596
597 intel_enable_fbc(crtc, 500);
29ebf90f 598 dev_priv->fbc.no_fbc_reason = FBC_OK;
85208be0
ED
599 return;
600
601out_disable:
602 /* Multiple disables should be harmless */
603 if (intel_fbc_enabled(dev)) {
604 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
605 intel_disable_fbc(dev);
606 }
11be49eb 607 i915_gem_stolen_cleanup_compression(dev);
85208be0
ED
608}
609
c921aba8
DV
610static void i915_pineview_get_mem_freq(struct drm_device *dev)
611{
612 drm_i915_private_t *dev_priv = dev->dev_private;
613 u32 tmp;
614
615 tmp = I915_READ(CLKCFG);
616
617 switch (tmp & CLKCFG_FSB_MASK) {
618 case CLKCFG_FSB_533:
619 dev_priv->fsb_freq = 533; /* 133*4 */
620 break;
621 case CLKCFG_FSB_800:
622 dev_priv->fsb_freq = 800; /* 200*4 */
623 break;
624 case CLKCFG_FSB_667:
625 dev_priv->fsb_freq = 667; /* 167*4 */
626 break;
627 case CLKCFG_FSB_400:
628 dev_priv->fsb_freq = 400; /* 100*4 */
629 break;
630 }
631
632 switch (tmp & CLKCFG_MEM_MASK) {
633 case CLKCFG_MEM_533:
634 dev_priv->mem_freq = 533;
635 break;
636 case CLKCFG_MEM_667:
637 dev_priv->mem_freq = 667;
638 break;
639 case CLKCFG_MEM_800:
640 dev_priv->mem_freq = 800;
641 break;
642 }
643
644 /* detect pineview DDR3 setting */
645 tmp = I915_READ(CSHRDDR3CTL);
646 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
647}
648
649static void i915_ironlake_get_mem_freq(struct drm_device *dev)
650{
651 drm_i915_private_t *dev_priv = dev->dev_private;
652 u16 ddrpll, csipll;
653
654 ddrpll = I915_READ16(DDRMPLL1);
655 csipll = I915_READ16(CSIPLL0);
656
657 switch (ddrpll & 0xff) {
658 case 0xc:
659 dev_priv->mem_freq = 800;
660 break;
661 case 0x10:
662 dev_priv->mem_freq = 1066;
663 break;
664 case 0x14:
665 dev_priv->mem_freq = 1333;
666 break;
667 case 0x18:
668 dev_priv->mem_freq = 1600;
669 break;
670 default:
671 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
672 ddrpll & 0xff);
673 dev_priv->mem_freq = 0;
674 break;
675 }
676
20e4d407 677 dev_priv->ips.r_t = dev_priv->mem_freq;
c921aba8
DV
678
679 switch (csipll & 0x3ff) {
680 case 0x00c:
681 dev_priv->fsb_freq = 3200;
682 break;
683 case 0x00e:
684 dev_priv->fsb_freq = 3733;
685 break;
686 case 0x010:
687 dev_priv->fsb_freq = 4266;
688 break;
689 case 0x012:
690 dev_priv->fsb_freq = 4800;
691 break;
692 case 0x014:
693 dev_priv->fsb_freq = 5333;
694 break;
695 case 0x016:
696 dev_priv->fsb_freq = 5866;
697 break;
698 case 0x018:
699 dev_priv->fsb_freq = 6400;
700 break;
701 default:
702 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
703 csipll & 0x3ff);
704 dev_priv->fsb_freq = 0;
705 break;
706 }
707
708 if (dev_priv->fsb_freq == 3200) {
20e4d407 709 dev_priv->ips.c_m = 0;
c921aba8 710 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
20e4d407 711 dev_priv->ips.c_m = 1;
c921aba8 712 } else {
20e4d407 713 dev_priv->ips.c_m = 2;
c921aba8
DV
714 }
715}
716
b445e3b0
ED
717static const struct cxsr_latency cxsr_latency_table[] = {
718 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
719 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
720 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
721 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
722 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
723
724 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
725 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
726 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
727 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
728 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
729
730 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
731 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
732 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
733 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
734 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
735
736 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
737 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
738 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
739 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
740 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
741
742 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
743 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
744 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
745 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
746 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
747
748 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
749 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
750 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
751 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
752 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
753};
754
63c62275 755static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
b445e3b0
ED
756 int is_ddr3,
757 int fsb,
758 int mem)
759{
760 const struct cxsr_latency *latency;
761 int i;
762
763 if (fsb == 0 || mem == 0)
764 return NULL;
765
766 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
767 latency = &cxsr_latency_table[i];
768 if (is_desktop == latency->is_desktop &&
769 is_ddr3 == latency->is_ddr3 &&
770 fsb == latency->fsb_freq && mem == latency->mem_freq)
771 return latency;
772 }
773
774 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
775
776 return NULL;
777}
778
1fa61106 779static void pineview_disable_cxsr(struct drm_device *dev)
b445e3b0
ED
780{
781 struct drm_i915_private *dev_priv = dev->dev_private;
782
783 /* deactivate cxsr */
784 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
785}
786
787/*
788 * Latency for FIFO fetches is dependent on several factors:
789 * - memory configuration (speed, channels)
790 * - chipset
791 * - current MCH state
792 * It can be fairly high in some situations, so here we assume a fairly
793 * pessimal value. It's a tradeoff between extra memory fetches (if we
794 * set this value too high, the FIFO will fetch frequently to stay full)
795 * and power consumption (set it too low to save power and we might see
796 * FIFO underruns and display "flicker").
797 *
798 * A value of 5us seems to be a good balance; safe for very low end
799 * platforms but not overly aggressive on lower latency configs.
800 */
801static const int latency_ns = 5000;
802
1fa61106 803static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
804{
805 struct drm_i915_private *dev_priv = dev->dev_private;
806 uint32_t dsparb = I915_READ(DSPARB);
807 int size;
808
809 size = dsparb & 0x7f;
810 if (plane)
811 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
812
813 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
814 plane ? "B" : "A", size);
815
816 return size;
817}
818
1fa61106 819static int i85x_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
820{
821 struct drm_i915_private *dev_priv = dev->dev_private;
822 uint32_t dsparb = I915_READ(DSPARB);
823 int size;
824
825 size = dsparb & 0x1ff;
826 if (plane)
827 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
828 size >>= 1; /* Convert to cachelines */
829
830 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
831 plane ? "B" : "A", size);
832
833 return size;
834}
835
1fa61106 836static int i845_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
837{
838 struct drm_i915_private *dev_priv = dev->dev_private;
839 uint32_t dsparb = I915_READ(DSPARB);
840 int size;
841
842 size = dsparb & 0x7f;
843 size >>= 2; /* Convert to cachelines */
844
845 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
846 plane ? "B" : "A",
847 size);
848
849 return size;
850}
851
1fa61106 852static int i830_get_fifo_size(struct drm_device *dev, int plane)
b445e3b0
ED
853{
854 struct drm_i915_private *dev_priv = dev->dev_private;
855 uint32_t dsparb = I915_READ(DSPARB);
856 int size;
857
858 size = dsparb & 0x7f;
859 size >>= 1; /* Convert to cachelines */
860
861 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
862 plane ? "B" : "A", size);
863
864 return size;
865}
866
867/* Pineview has different values for various configs */
868static const struct intel_watermark_params pineview_display_wm = {
869 PINEVIEW_DISPLAY_FIFO,
870 PINEVIEW_MAX_WM,
871 PINEVIEW_DFT_WM,
872 PINEVIEW_GUARD_WM,
873 PINEVIEW_FIFO_LINE_SIZE
874};
875static const struct intel_watermark_params pineview_display_hplloff_wm = {
876 PINEVIEW_DISPLAY_FIFO,
877 PINEVIEW_MAX_WM,
878 PINEVIEW_DFT_HPLLOFF_WM,
879 PINEVIEW_GUARD_WM,
880 PINEVIEW_FIFO_LINE_SIZE
881};
882static const struct intel_watermark_params pineview_cursor_wm = {
883 PINEVIEW_CURSOR_FIFO,
884 PINEVIEW_CURSOR_MAX_WM,
885 PINEVIEW_CURSOR_DFT_WM,
886 PINEVIEW_CURSOR_GUARD_WM,
887 PINEVIEW_FIFO_LINE_SIZE,
888};
889static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
890 PINEVIEW_CURSOR_FIFO,
891 PINEVIEW_CURSOR_MAX_WM,
892 PINEVIEW_CURSOR_DFT_WM,
893 PINEVIEW_CURSOR_GUARD_WM,
894 PINEVIEW_FIFO_LINE_SIZE
895};
896static const struct intel_watermark_params g4x_wm_info = {
897 G4X_FIFO_SIZE,
898 G4X_MAX_WM,
899 G4X_MAX_WM,
900 2,
901 G4X_FIFO_LINE_SIZE,
902};
903static const struct intel_watermark_params g4x_cursor_wm_info = {
904 I965_CURSOR_FIFO,
905 I965_CURSOR_MAX_WM,
906 I965_CURSOR_DFT_WM,
907 2,
908 G4X_FIFO_LINE_SIZE,
909};
910static const struct intel_watermark_params valleyview_wm_info = {
911 VALLEYVIEW_FIFO_SIZE,
912 VALLEYVIEW_MAX_WM,
913 VALLEYVIEW_MAX_WM,
914 2,
915 G4X_FIFO_LINE_SIZE,
916};
917static const struct intel_watermark_params valleyview_cursor_wm_info = {
918 I965_CURSOR_FIFO,
919 VALLEYVIEW_CURSOR_MAX_WM,
920 I965_CURSOR_DFT_WM,
921 2,
922 G4X_FIFO_LINE_SIZE,
923};
924static const struct intel_watermark_params i965_cursor_wm_info = {
925 I965_CURSOR_FIFO,
926 I965_CURSOR_MAX_WM,
927 I965_CURSOR_DFT_WM,
928 2,
929 I915_FIFO_LINE_SIZE,
930};
931static const struct intel_watermark_params i945_wm_info = {
932 I945_FIFO_SIZE,
933 I915_MAX_WM,
934 1,
935 2,
936 I915_FIFO_LINE_SIZE
937};
938static const struct intel_watermark_params i915_wm_info = {
939 I915_FIFO_SIZE,
940 I915_MAX_WM,
941 1,
942 2,
943 I915_FIFO_LINE_SIZE
944};
945static const struct intel_watermark_params i855_wm_info = {
946 I855GM_FIFO_SIZE,
947 I915_MAX_WM,
948 1,
949 2,
950 I830_FIFO_LINE_SIZE
951};
952static const struct intel_watermark_params i830_wm_info = {
953 I830_FIFO_SIZE,
954 I915_MAX_WM,
955 1,
956 2,
957 I830_FIFO_LINE_SIZE
958};
959
960static const struct intel_watermark_params ironlake_display_wm_info = {
961 ILK_DISPLAY_FIFO,
962 ILK_DISPLAY_MAXWM,
963 ILK_DISPLAY_DFTWM,
964 2,
965 ILK_FIFO_LINE_SIZE
966};
967static const struct intel_watermark_params ironlake_cursor_wm_info = {
968 ILK_CURSOR_FIFO,
969 ILK_CURSOR_MAXWM,
970 ILK_CURSOR_DFTWM,
971 2,
972 ILK_FIFO_LINE_SIZE
973};
974static const struct intel_watermark_params ironlake_display_srwm_info = {
975 ILK_DISPLAY_SR_FIFO,
976 ILK_DISPLAY_MAX_SRWM,
977 ILK_DISPLAY_DFT_SRWM,
978 2,
979 ILK_FIFO_LINE_SIZE
980};
981static const struct intel_watermark_params ironlake_cursor_srwm_info = {
982 ILK_CURSOR_SR_FIFO,
983 ILK_CURSOR_MAX_SRWM,
984 ILK_CURSOR_DFT_SRWM,
985 2,
986 ILK_FIFO_LINE_SIZE
987};
988
989static const struct intel_watermark_params sandybridge_display_wm_info = {
990 SNB_DISPLAY_FIFO,
991 SNB_DISPLAY_MAXWM,
992 SNB_DISPLAY_DFTWM,
993 2,
994 SNB_FIFO_LINE_SIZE
995};
996static const struct intel_watermark_params sandybridge_cursor_wm_info = {
997 SNB_CURSOR_FIFO,
998 SNB_CURSOR_MAXWM,
999 SNB_CURSOR_DFTWM,
1000 2,
1001 SNB_FIFO_LINE_SIZE
1002};
1003static const struct intel_watermark_params sandybridge_display_srwm_info = {
1004 SNB_DISPLAY_SR_FIFO,
1005 SNB_DISPLAY_MAX_SRWM,
1006 SNB_DISPLAY_DFT_SRWM,
1007 2,
1008 SNB_FIFO_LINE_SIZE
1009};
1010static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
1011 SNB_CURSOR_SR_FIFO,
1012 SNB_CURSOR_MAX_SRWM,
1013 SNB_CURSOR_DFT_SRWM,
1014 2,
1015 SNB_FIFO_LINE_SIZE
1016};
1017
1018
1019/**
1020 * intel_calculate_wm - calculate watermark level
1021 * @clock_in_khz: pixel clock
1022 * @wm: chip FIFO params
1023 * @pixel_size: display pixel size
1024 * @latency_ns: memory latency for the platform
1025 *
1026 * Calculate the watermark level (the level at which the display plane will
1027 * start fetching from memory again). Each chip has a different display
1028 * FIFO size and allocation, so the caller needs to figure that out and pass
1029 * in the correct intel_watermark_params structure.
1030 *
1031 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1032 * on the pixel size. When it reaches the watermark level, it'll start
1033 * fetching FIFO line sized based chunks from memory until the FIFO fills
1034 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1035 * will occur, and a display engine hang could result.
1036 */
1037static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1038 const struct intel_watermark_params *wm,
1039 int fifo_size,
1040 int pixel_size,
1041 unsigned long latency_ns)
1042{
1043 long entries_required, wm_size;
1044
1045 /*
1046 * Note: we need to make sure we don't overflow for various clock &
1047 * latency values.
1048 * clocks go from a few thousand to several hundred thousand.
1049 * latency is usually a few thousand
1050 */
1051 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1052 1000;
1053 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1054
1055 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1056
1057 wm_size = fifo_size - (entries_required + wm->guard_size);
1058
1059 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1060
1061 /* Don't promote wm_size to unsigned... */
1062 if (wm_size > (long)wm->max_wm)
1063 wm_size = wm->max_wm;
1064 if (wm_size <= 0)
1065 wm_size = wm->default_wm;
1066 return wm_size;
1067}
1068
1069static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1070{
1071 struct drm_crtc *crtc, *enabled = NULL;
1072
1073 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3490ea5d 1074 if (intel_crtc_active(crtc)) {
b445e3b0
ED
1075 if (enabled)
1076 return NULL;
1077 enabled = crtc;
1078 }
1079 }
1080
1081 return enabled;
1082}
1083
46ba614c 1084static void pineview_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1085{
46ba614c 1086 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1087 struct drm_i915_private *dev_priv = dev->dev_private;
1088 struct drm_crtc *crtc;
1089 const struct cxsr_latency *latency;
1090 u32 reg;
1091 unsigned long wm;
1092
1093 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1094 dev_priv->fsb_freq, dev_priv->mem_freq);
1095 if (!latency) {
1096 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1097 pineview_disable_cxsr(dev);
1098 return;
1099 }
1100
1101 crtc = single_enabled_crtc(dev);
1102 if (crtc) {
241bfc38 1103 const struct drm_display_mode *adjusted_mode;
b445e3b0 1104 int pixel_size = crtc->fb->bits_per_pixel / 8;
241bfc38
DL
1105 int clock;
1106
1107 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1108 clock = adjusted_mode->crtc_clock;
b445e3b0
ED
1109
1110 /* Display SR */
1111 wm = intel_calculate_wm(clock, &pineview_display_wm,
1112 pineview_display_wm.fifo_size,
1113 pixel_size, latency->display_sr);
1114 reg = I915_READ(DSPFW1);
1115 reg &= ~DSPFW_SR_MASK;
1116 reg |= wm << DSPFW_SR_SHIFT;
1117 I915_WRITE(DSPFW1, reg);
1118 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1119
1120 /* cursor SR */
1121 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1122 pineview_display_wm.fifo_size,
1123 pixel_size, latency->cursor_sr);
1124 reg = I915_READ(DSPFW3);
1125 reg &= ~DSPFW_CURSOR_SR_MASK;
1126 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1127 I915_WRITE(DSPFW3, reg);
1128
1129 /* Display HPLL off SR */
1130 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1131 pineview_display_hplloff_wm.fifo_size,
1132 pixel_size, latency->display_hpll_disable);
1133 reg = I915_READ(DSPFW3);
1134 reg &= ~DSPFW_HPLL_SR_MASK;
1135 reg |= wm & DSPFW_HPLL_SR_MASK;
1136 I915_WRITE(DSPFW3, reg);
1137
1138 /* cursor HPLL off SR */
1139 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1140 pineview_display_hplloff_wm.fifo_size,
1141 pixel_size, latency->cursor_hpll_disable);
1142 reg = I915_READ(DSPFW3);
1143 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1144 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1145 I915_WRITE(DSPFW3, reg);
1146 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1147
1148 /* activate cxsr */
1149 I915_WRITE(DSPFW3,
1150 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1151 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1152 } else {
1153 pineview_disable_cxsr(dev);
1154 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1155 }
1156}
1157
1158static bool g4x_compute_wm0(struct drm_device *dev,
1159 int plane,
1160 const struct intel_watermark_params *display,
1161 int display_latency_ns,
1162 const struct intel_watermark_params *cursor,
1163 int cursor_latency_ns,
1164 int *plane_wm,
1165 int *cursor_wm)
1166{
1167 struct drm_crtc *crtc;
4fe8590a 1168 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
1169 int htotal, hdisplay, clock, pixel_size;
1170 int line_time_us, line_count;
1171 int entries, tlb_miss;
1172
1173 crtc = intel_get_crtc_for_plane(dev, plane);
3490ea5d 1174 if (!intel_crtc_active(crtc)) {
b445e3b0
ED
1175 *cursor_wm = cursor->guard_size;
1176 *plane_wm = display->guard_size;
1177 return false;
1178 }
1179
4fe8590a 1180 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
241bfc38 1181 clock = adjusted_mode->crtc_clock;
4fe8590a 1182 htotal = adjusted_mode->htotal;
37327abd 1183 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
b445e3b0
ED
1184 pixel_size = crtc->fb->bits_per_pixel / 8;
1185
1186 /* Use the small buffer method to calculate plane watermark */
1187 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1188 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1189 if (tlb_miss > 0)
1190 entries += tlb_miss;
1191 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1192 *plane_wm = entries + display->guard_size;
1193 if (*plane_wm > (int)display->max_wm)
1194 *plane_wm = display->max_wm;
1195
1196 /* Use the large buffer method to calculate cursor watermark */
1197 line_time_us = ((htotal * 1000) / clock);
1198 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1199 entries = line_count * 64 * pixel_size;
1200 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1201 if (tlb_miss > 0)
1202 entries += tlb_miss;
1203 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1204 *cursor_wm = entries + cursor->guard_size;
1205 if (*cursor_wm > (int)cursor->max_wm)
1206 *cursor_wm = (int)cursor->max_wm;
1207
1208 return true;
1209}
1210
1211/*
1212 * Check the wm result.
1213 *
1214 * If any calculated watermark values is larger than the maximum value that
1215 * can be programmed into the associated watermark register, that watermark
1216 * must be disabled.
1217 */
1218static bool g4x_check_srwm(struct drm_device *dev,
1219 int display_wm, int cursor_wm,
1220 const struct intel_watermark_params *display,
1221 const struct intel_watermark_params *cursor)
1222{
1223 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1224 display_wm, cursor_wm);
1225
1226 if (display_wm > display->max_wm) {
1227 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1228 display_wm, display->max_wm);
1229 return false;
1230 }
1231
1232 if (cursor_wm > cursor->max_wm) {
1233 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1234 cursor_wm, cursor->max_wm);
1235 return false;
1236 }
1237
1238 if (!(display_wm || cursor_wm)) {
1239 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1240 return false;
1241 }
1242
1243 return true;
1244}
1245
1246static bool g4x_compute_srwm(struct drm_device *dev,
1247 int plane,
1248 int latency_ns,
1249 const struct intel_watermark_params *display,
1250 const struct intel_watermark_params *cursor,
1251 int *display_wm, int *cursor_wm)
1252{
1253 struct drm_crtc *crtc;
4fe8590a 1254 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
1255 int hdisplay, htotal, pixel_size, clock;
1256 unsigned long line_time_us;
1257 int line_count, line_size;
1258 int small, large;
1259 int entries;
1260
1261 if (!latency_ns) {
1262 *display_wm = *cursor_wm = 0;
1263 return false;
1264 }
1265
1266 crtc = intel_get_crtc_for_plane(dev, plane);
4fe8590a 1267 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
241bfc38 1268 clock = adjusted_mode->crtc_clock;
4fe8590a 1269 htotal = adjusted_mode->htotal;
37327abd 1270 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
b445e3b0
ED
1271 pixel_size = crtc->fb->bits_per_pixel / 8;
1272
1273 line_time_us = (htotal * 1000) / clock;
1274 line_count = (latency_ns / line_time_us + 1000) / 1000;
1275 line_size = hdisplay * pixel_size;
1276
1277 /* Use the minimum of the small and large buffer method for primary */
1278 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1279 large = line_count * line_size;
1280
1281 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1282 *display_wm = entries + display->guard_size;
1283
1284 /* calculate the self-refresh watermark for display cursor */
1285 entries = line_count * pixel_size * 64;
1286 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1287 *cursor_wm = entries + cursor->guard_size;
1288
1289 return g4x_check_srwm(dev,
1290 *display_wm, *cursor_wm,
1291 display, cursor);
1292}
1293
1294static bool vlv_compute_drain_latency(struct drm_device *dev,
1295 int plane,
1296 int *plane_prec_mult,
1297 int *plane_dl,
1298 int *cursor_prec_mult,
1299 int *cursor_dl)
1300{
1301 struct drm_crtc *crtc;
1302 int clock, pixel_size;
1303 int entries;
1304
1305 crtc = intel_get_crtc_for_plane(dev, plane);
3490ea5d 1306 if (!intel_crtc_active(crtc))
b445e3b0
ED
1307 return false;
1308
241bfc38 1309 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
b445e3b0
ED
1310 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1311
1312 entries = (clock / 1000) * pixel_size;
1313 *plane_prec_mult = (entries > 256) ?
1314 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1315 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1316 pixel_size);
1317
1318 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1319 *cursor_prec_mult = (entries > 256) ?
1320 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1321 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1322
1323 return true;
1324}
1325
1326/*
1327 * Update drain latency registers of memory arbiter
1328 *
1329 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1330 * to be programmed. Each plane has a drain latency multiplier and a drain
1331 * latency value.
1332 */
1333
1334static void vlv_update_drain_latency(struct drm_device *dev)
1335{
1336 struct drm_i915_private *dev_priv = dev->dev_private;
1337 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1338 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1339 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1340 either 16 or 32 */
1341
1342 /* For plane A, Cursor A */
1343 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1344 &cursor_prec_mult, &cursora_dl)) {
1345 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1346 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1347 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1348 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1349
1350 I915_WRITE(VLV_DDL1, cursora_prec |
1351 (cursora_dl << DDL_CURSORA_SHIFT) |
1352 planea_prec | planea_dl);
1353 }
1354
1355 /* For plane B, Cursor B */
1356 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1357 &cursor_prec_mult, &cursorb_dl)) {
1358 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1359 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1360 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1361 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1362
1363 I915_WRITE(VLV_DDL2, cursorb_prec |
1364 (cursorb_dl << DDL_CURSORB_SHIFT) |
1365 planeb_prec | planeb_dl);
1366 }
1367}
1368
1369#define single_plane_enabled(mask) is_power_of_2(mask)
1370
46ba614c 1371static void valleyview_update_wm(struct drm_crtc *crtc)
b445e3b0 1372{
46ba614c 1373 struct drm_device *dev = crtc->dev;
b445e3b0
ED
1374 static const int sr_latency_ns = 12000;
1375 struct drm_i915_private *dev_priv = dev->dev_private;
1376 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1377 int plane_sr, cursor_sr;
af6c4575 1378 int ignore_plane_sr, ignore_cursor_sr;
b445e3b0
ED
1379 unsigned int enabled = 0;
1380
1381 vlv_update_drain_latency(dev);
1382
51cea1f4 1383 if (g4x_compute_wm0(dev, PIPE_A,
b445e3b0
ED
1384 &valleyview_wm_info, latency_ns,
1385 &valleyview_cursor_wm_info, latency_ns,
1386 &planea_wm, &cursora_wm))
51cea1f4 1387 enabled |= 1 << PIPE_A;
b445e3b0 1388
51cea1f4 1389 if (g4x_compute_wm0(dev, PIPE_B,
b445e3b0
ED
1390 &valleyview_wm_info, latency_ns,
1391 &valleyview_cursor_wm_info, latency_ns,
1392 &planeb_wm, &cursorb_wm))
51cea1f4 1393 enabled |= 1 << PIPE_B;
b445e3b0 1394
b445e3b0
ED
1395 if (single_plane_enabled(enabled) &&
1396 g4x_compute_srwm(dev, ffs(enabled) - 1,
1397 sr_latency_ns,
1398 &valleyview_wm_info,
1399 &valleyview_cursor_wm_info,
af6c4575
CW
1400 &plane_sr, &ignore_cursor_sr) &&
1401 g4x_compute_srwm(dev, ffs(enabled) - 1,
1402 2*sr_latency_ns,
1403 &valleyview_wm_info,
1404 &valleyview_cursor_wm_info,
52bd02d8 1405 &ignore_plane_sr, &cursor_sr)) {
b445e3b0 1406 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
52bd02d8 1407 } else {
b445e3b0
ED
1408 I915_WRITE(FW_BLC_SELF_VLV,
1409 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
52bd02d8
CW
1410 plane_sr = cursor_sr = 0;
1411 }
b445e3b0
ED
1412
1413 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1414 planea_wm, cursora_wm,
1415 planeb_wm, cursorb_wm,
1416 plane_sr, cursor_sr);
1417
1418 I915_WRITE(DSPFW1,
1419 (plane_sr << DSPFW_SR_SHIFT) |
1420 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1421 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1422 planea_wm);
1423 I915_WRITE(DSPFW2,
8c919b28 1424 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
b445e3b0
ED
1425 (cursora_wm << DSPFW_CURSORA_SHIFT));
1426 I915_WRITE(DSPFW3,
8c919b28
CW
1427 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1428 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
b445e3b0
ED
1429}
1430
46ba614c 1431static void g4x_update_wm(struct drm_crtc *crtc)
b445e3b0 1432{
46ba614c 1433 struct drm_device *dev = crtc->dev;
b445e3b0
ED
1434 static const int sr_latency_ns = 12000;
1435 struct drm_i915_private *dev_priv = dev->dev_private;
1436 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1437 int plane_sr, cursor_sr;
1438 unsigned int enabled = 0;
1439
51cea1f4 1440 if (g4x_compute_wm0(dev, PIPE_A,
b445e3b0
ED
1441 &g4x_wm_info, latency_ns,
1442 &g4x_cursor_wm_info, latency_ns,
1443 &planea_wm, &cursora_wm))
51cea1f4 1444 enabled |= 1 << PIPE_A;
b445e3b0 1445
51cea1f4 1446 if (g4x_compute_wm0(dev, PIPE_B,
b445e3b0
ED
1447 &g4x_wm_info, latency_ns,
1448 &g4x_cursor_wm_info, latency_ns,
1449 &planeb_wm, &cursorb_wm))
51cea1f4 1450 enabled |= 1 << PIPE_B;
b445e3b0 1451
b445e3b0
ED
1452 if (single_plane_enabled(enabled) &&
1453 g4x_compute_srwm(dev, ffs(enabled) - 1,
1454 sr_latency_ns,
1455 &g4x_wm_info,
1456 &g4x_cursor_wm_info,
52bd02d8 1457 &plane_sr, &cursor_sr)) {
b445e3b0 1458 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
52bd02d8 1459 } else {
b445e3b0
ED
1460 I915_WRITE(FW_BLC_SELF,
1461 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
52bd02d8
CW
1462 plane_sr = cursor_sr = 0;
1463 }
b445e3b0
ED
1464
1465 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1466 planea_wm, cursora_wm,
1467 planeb_wm, cursorb_wm,
1468 plane_sr, cursor_sr);
1469
1470 I915_WRITE(DSPFW1,
1471 (plane_sr << DSPFW_SR_SHIFT) |
1472 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1473 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1474 planea_wm);
1475 I915_WRITE(DSPFW2,
8c919b28 1476 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
b445e3b0
ED
1477 (cursora_wm << DSPFW_CURSORA_SHIFT));
1478 /* HPLL off in SR has some issues on G4x... disable it */
1479 I915_WRITE(DSPFW3,
8c919b28 1480 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
b445e3b0
ED
1481 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1482}
1483
46ba614c 1484static void i965_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1485{
46ba614c 1486 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1487 struct drm_i915_private *dev_priv = dev->dev_private;
1488 struct drm_crtc *crtc;
1489 int srwm = 1;
1490 int cursor_sr = 16;
1491
1492 /* Calc sr entries for one plane configs */
1493 crtc = single_enabled_crtc(dev);
1494 if (crtc) {
1495 /* self-refresh has much higher latency */
1496 static const int sr_latency_ns = 12000;
4fe8590a
VS
1497 const struct drm_display_mode *adjusted_mode =
1498 &to_intel_crtc(crtc)->config.adjusted_mode;
241bfc38 1499 int clock = adjusted_mode->crtc_clock;
4fe8590a 1500 int htotal = adjusted_mode->htotal;
37327abd 1501 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
b445e3b0
ED
1502 int pixel_size = crtc->fb->bits_per_pixel / 8;
1503 unsigned long line_time_us;
1504 int entries;
1505
1506 line_time_us = ((htotal * 1000) / clock);
1507
1508 /* Use ns/us then divide to preserve precision */
1509 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1510 pixel_size * hdisplay;
1511 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1512 srwm = I965_FIFO_SIZE - entries;
1513 if (srwm < 0)
1514 srwm = 1;
1515 srwm &= 0x1ff;
1516 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1517 entries, srwm);
1518
1519 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1520 pixel_size * 64;
1521 entries = DIV_ROUND_UP(entries,
1522 i965_cursor_wm_info.cacheline_size);
1523 cursor_sr = i965_cursor_wm_info.fifo_size -
1524 (entries + i965_cursor_wm_info.guard_size);
1525
1526 if (cursor_sr > i965_cursor_wm_info.max_wm)
1527 cursor_sr = i965_cursor_wm_info.max_wm;
1528
1529 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1530 "cursor %d\n", srwm, cursor_sr);
1531
1532 if (IS_CRESTLINE(dev))
1533 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1534 } else {
1535 /* Turn off self refresh if both pipes are enabled */
1536 if (IS_CRESTLINE(dev))
1537 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1538 & ~FW_BLC_SELF_EN);
1539 }
1540
1541 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1542 srwm);
1543
1544 /* 965 has limitations... */
1545 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1546 (8 << 16) | (8 << 8) | (8 << 0));
1547 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1548 /* update cursor SR watermark */
1549 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1550}
1551
46ba614c 1552static void i9xx_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1553{
46ba614c 1554 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1555 struct drm_i915_private *dev_priv = dev->dev_private;
1556 const struct intel_watermark_params *wm_info;
1557 uint32_t fwater_lo;
1558 uint32_t fwater_hi;
1559 int cwm, srwm = 1;
1560 int fifo_size;
1561 int planea_wm, planeb_wm;
1562 struct drm_crtc *crtc, *enabled = NULL;
1563
1564 if (IS_I945GM(dev))
1565 wm_info = &i945_wm_info;
1566 else if (!IS_GEN2(dev))
1567 wm_info = &i915_wm_info;
1568 else
1569 wm_info = &i855_wm_info;
1570
1571 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1572 crtc = intel_get_crtc_for_plane(dev, 0);
3490ea5d 1573 if (intel_crtc_active(crtc)) {
241bfc38 1574 const struct drm_display_mode *adjusted_mode;
b9e0bda3
CW
1575 int cpp = crtc->fb->bits_per_pixel / 8;
1576 if (IS_GEN2(dev))
1577 cpp = 4;
1578
241bfc38
DL
1579 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1580 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
b9e0bda3 1581 wm_info, fifo_size, cpp,
b445e3b0
ED
1582 latency_ns);
1583 enabled = crtc;
1584 } else
1585 planea_wm = fifo_size - wm_info->guard_size;
1586
1587 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1588 crtc = intel_get_crtc_for_plane(dev, 1);
3490ea5d 1589 if (intel_crtc_active(crtc)) {
241bfc38 1590 const struct drm_display_mode *adjusted_mode;
b9e0bda3
CW
1591 int cpp = crtc->fb->bits_per_pixel / 8;
1592 if (IS_GEN2(dev))
1593 cpp = 4;
1594
241bfc38
DL
1595 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1596 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
b9e0bda3 1597 wm_info, fifo_size, cpp,
b445e3b0
ED
1598 latency_ns);
1599 if (enabled == NULL)
1600 enabled = crtc;
1601 else
1602 enabled = NULL;
1603 } else
1604 planeb_wm = fifo_size - wm_info->guard_size;
1605
1606 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1607
1608 /*
1609 * Overlay gets an aggressive default since video jitter is bad.
1610 */
1611 cwm = 2;
1612
1613 /* Play safe and disable self-refresh before adjusting watermarks. */
1614 if (IS_I945G(dev) || IS_I945GM(dev))
1615 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1616 else if (IS_I915GM(dev))
1617 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1618
1619 /* Calc sr entries for one plane configs */
1620 if (HAS_FW_BLC(dev) && enabled) {
1621 /* self-refresh has much higher latency */
1622 static const int sr_latency_ns = 6000;
4fe8590a
VS
1623 const struct drm_display_mode *adjusted_mode =
1624 &to_intel_crtc(enabled)->config.adjusted_mode;
241bfc38 1625 int clock = adjusted_mode->crtc_clock;
4fe8590a 1626 int htotal = adjusted_mode->htotal;
37327abd 1627 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
b445e3b0
ED
1628 int pixel_size = enabled->fb->bits_per_pixel / 8;
1629 unsigned long line_time_us;
1630 int entries;
1631
1632 line_time_us = (htotal * 1000) / clock;
1633
1634 /* Use ns/us then divide to preserve precision */
1635 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1636 pixel_size * hdisplay;
1637 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1638 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1639 srwm = wm_info->fifo_size - entries;
1640 if (srwm < 0)
1641 srwm = 1;
1642
1643 if (IS_I945G(dev) || IS_I945GM(dev))
1644 I915_WRITE(FW_BLC_SELF,
1645 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1646 else if (IS_I915GM(dev))
1647 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1648 }
1649
1650 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1651 planea_wm, planeb_wm, cwm, srwm);
1652
1653 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1654 fwater_hi = (cwm & 0x1f);
1655
1656 /* Set request length to 8 cachelines per fetch */
1657 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1658 fwater_hi = fwater_hi | (1 << 8);
1659
1660 I915_WRITE(FW_BLC, fwater_lo);
1661 I915_WRITE(FW_BLC2, fwater_hi);
1662
1663 if (HAS_FW_BLC(dev)) {
1664 if (enabled) {
1665 if (IS_I945G(dev) || IS_I945GM(dev))
1666 I915_WRITE(FW_BLC_SELF,
1667 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1668 else if (IS_I915GM(dev))
1669 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1670 DRM_DEBUG_KMS("memory self refresh enabled\n");
1671 } else
1672 DRM_DEBUG_KMS("memory self refresh disabled\n");
1673 }
1674}
1675
46ba614c 1676static void i830_update_wm(struct drm_crtc *unused_crtc)
b445e3b0 1677{
46ba614c 1678 struct drm_device *dev = unused_crtc->dev;
b445e3b0
ED
1679 struct drm_i915_private *dev_priv = dev->dev_private;
1680 struct drm_crtc *crtc;
241bfc38 1681 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
1682 uint32_t fwater_lo;
1683 int planea_wm;
1684
1685 crtc = single_enabled_crtc(dev);
1686 if (crtc == NULL)
1687 return;
1688
241bfc38
DL
1689 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1690 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
4fe8590a 1691 &i830_wm_info,
b445e3b0 1692 dev_priv->display.get_fifo_size(dev, 0),
b9e0bda3 1693 4, latency_ns);
b445e3b0
ED
1694 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1695 fwater_lo |= (3<<8) | planea_wm;
1696
1697 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1698
1699 I915_WRITE(FW_BLC, fwater_lo);
1700}
1701
b445e3b0
ED
1702/*
1703 * Check the wm result.
1704 *
1705 * If any calculated watermark values is larger than the maximum value that
1706 * can be programmed into the associated watermark register, that watermark
1707 * must be disabled.
1708 */
1709static bool ironlake_check_srwm(struct drm_device *dev, int level,
1710 int fbc_wm, int display_wm, int cursor_wm,
1711 const struct intel_watermark_params *display,
1712 const struct intel_watermark_params *cursor)
1713{
1714 struct drm_i915_private *dev_priv = dev->dev_private;
1715
1716 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1717 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1718
1719 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1720 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1721 fbc_wm, SNB_FBC_MAX_SRWM, level);
1722
1723 /* fbc has it's own way to disable FBC WM */
1724 I915_WRITE(DISP_ARB_CTL,
1725 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1726 return false;
615aaa5f
VS
1727 } else if (INTEL_INFO(dev)->gen >= 6) {
1728 /* enable FBC WM (except on ILK, where it must remain off) */
1729 I915_WRITE(DISP_ARB_CTL,
1730 I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
b445e3b0
ED
1731 }
1732
1733 if (display_wm > display->max_wm) {
1734 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1735 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1736 return false;
1737 }
1738
1739 if (cursor_wm > cursor->max_wm) {
1740 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1741 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1742 return false;
1743 }
1744
1745 if (!(fbc_wm || display_wm || cursor_wm)) {
1746 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1747 return false;
1748 }
1749
1750 return true;
1751}
1752
1753/*
1754 * Compute watermark values of WM[1-3],
1755 */
1756static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1757 int latency_ns,
1758 const struct intel_watermark_params *display,
1759 const struct intel_watermark_params *cursor,
1760 int *fbc_wm, int *display_wm, int *cursor_wm)
1761{
1762 struct drm_crtc *crtc;
4fe8590a 1763 const struct drm_display_mode *adjusted_mode;
b445e3b0
ED
1764 unsigned long line_time_us;
1765 int hdisplay, htotal, pixel_size, clock;
1766 int line_count, line_size;
1767 int small, large;
1768 int entries;
1769
1770 if (!latency_ns) {
1771 *fbc_wm = *display_wm = *cursor_wm = 0;
1772 return false;
1773 }
1774
1775 crtc = intel_get_crtc_for_plane(dev, plane);
4fe8590a 1776 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
241bfc38 1777 clock = adjusted_mode->crtc_clock;
4fe8590a 1778 htotal = adjusted_mode->htotal;
37327abd 1779 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
b445e3b0
ED
1780 pixel_size = crtc->fb->bits_per_pixel / 8;
1781
1782 line_time_us = (htotal * 1000) / clock;
1783 line_count = (latency_ns / line_time_us + 1000) / 1000;
1784 line_size = hdisplay * pixel_size;
1785
1786 /* Use the minimum of the small and large buffer method for primary */
1787 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1788 large = line_count * line_size;
1789
1790 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1791 *display_wm = entries + display->guard_size;
1792
1793 /*
1794 * Spec says:
1795 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1796 */
1797 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1798
1799 /* calculate the self-refresh watermark for display cursor */
1800 entries = line_count * pixel_size * 64;
1801 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1802 *cursor_wm = entries + cursor->guard_size;
1803
1804 return ironlake_check_srwm(dev, level,
1805 *fbc_wm, *display_wm, *cursor_wm,
1806 display, cursor);
1807}
1808
46ba614c 1809static void ironlake_update_wm(struct drm_crtc *crtc)
b445e3b0 1810{
46ba614c 1811 struct drm_device *dev = crtc->dev;
b445e3b0
ED
1812 struct drm_i915_private *dev_priv = dev->dev_private;
1813 int fbc_wm, plane_wm, cursor_wm;
1814 unsigned int enabled;
1815
1816 enabled = 0;
51cea1f4 1817 if (g4x_compute_wm0(dev, PIPE_A,
b445e3b0 1818 &ironlake_display_wm_info,
b0aea5dc 1819 dev_priv->wm.pri_latency[0] * 100,
b445e3b0 1820 &ironlake_cursor_wm_info,
b0aea5dc 1821 dev_priv->wm.cur_latency[0] * 100,
b445e3b0
ED
1822 &plane_wm, &cursor_wm)) {
1823 I915_WRITE(WM0_PIPEA_ILK,
1824 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1825 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1826 " plane %d, " "cursor: %d\n",
1827 plane_wm, cursor_wm);
51cea1f4 1828 enabled |= 1 << PIPE_A;
b445e3b0
ED
1829 }
1830
51cea1f4 1831 if (g4x_compute_wm0(dev, PIPE_B,
b445e3b0 1832 &ironlake_display_wm_info,
b0aea5dc 1833 dev_priv->wm.pri_latency[0] * 100,
b445e3b0 1834 &ironlake_cursor_wm_info,
b0aea5dc 1835 dev_priv->wm.cur_latency[0] * 100,
b445e3b0
ED
1836 &plane_wm, &cursor_wm)) {
1837 I915_WRITE(WM0_PIPEB_ILK,
1838 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1839 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1840 " plane %d, cursor: %d\n",
1841 plane_wm, cursor_wm);
51cea1f4 1842 enabled |= 1 << PIPE_B;
b445e3b0
ED
1843 }
1844
1845 /*
1846 * Calculate and update the self-refresh watermark only when one
1847 * display plane is used.
1848 */
1849 I915_WRITE(WM3_LP_ILK, 0);
1850 I915_WRITE(WM2_LP_ILK, 0);
1851 I915_WRITE(WM1_LP_ILK, 0);
1852
1853 if (!single_plane_enabled(enabled))
1854 return;
1855 enabled = ffs(enabled) - 1;
1856
1857 /* WM1 */
1858 if (!ironlake_compute_srwm(dev, 1, enabled,
b0aea5dc 1859 dev_priv->wm.pri_latency[1] * 500,
b445e3b0
ED
1860 &ironlake_display_srwm_info,
1861 &ironlake_cursor_srwm_info,
1862 &fbc_wm, &plane_wm, &cursor_wm))
1863 return;
1864
1865 I915_WRITE(WM1_LP_ILK,
1866 WM1_LP_SR_EN |
b0aea5dc 1867 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
b445e3b0
ED
1868 (fbc_wm << WM1_LP_FBC_SHIFT) |
1869 (plane_wm << WM1_LP_SR_SHIFT) |
1870 cursor_wm);
1871
1872 /* WM2 */
1873 if (!ironlake_compute_srwm(dev, 2, enabled,
b0aea5dc 1874 dev_priv->wm.pri_latency[2] * 500,
b445e3b0
ED
1875 &ironlake_display_srwm_info,
1876 &ironlake_cursor_srwm_info,
1877 &fbc_wm, &plane_wm, &cursor_wm))
1878 return;
1879
1880 I915_WRITE(WM2_LP_ILK,
1881 WM2_LP_EN |
b0aea5dc 1882 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
b445e3b0
ED
1883 (fbc_wm << WM1_LP_FBC_SHIFT) |
1884 (plane_wm << WM1_LP_SR_SHIFT) |
1885 cursor_wm);
1886
1887 /*
1888 * WM3 is unsupported on ILK, probably because we don't have latency
1889 * data for that power state
1890 */
1891}
1892
46ba614c 1893static void sandybridge_update_wm(struct drm_crtc *crtc)
b445e3b0 1894{
46ba614c 1895 struct drm_device *dev = crtc->dev;
b445e3b0 1896 struct drm_i915_private *dev_priv = dev->dev_private;
b0aea5dc 1897 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
b445e3b0
ED
1898 u32 val;
1899 int fbc_wm, plane_wm, cursor_wm;
1900 unsigned int enabled;
1901
1902 enabled = 0;
51cea1f4 1903 if (g4x_compute_wm0(dev, PIPE_A,
b445e3b0
ED
1904 &sandybridge_display_wm_info, latency,
1905 &sandybridge_cursor_wm_info, latency,
1906 &plane_wm, &cursor_wm)) {
1907 val = I915_READ(WM0_PIPEA_ILK);
1908 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1909 I915_WRITE(WM0_PIPEA_ILK, val |
1910 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1911 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1912 " plane %d, " "cursor: %d\n",
1913 plane_wm, cursor_wm);
51cea1f4 1914 enabled |= 1 << PIPE_A;
b445e3b0
ED
1915 }
1916
51cea1f4 1917 if (g4x_compute_wm0(dev, PIPE_B,
b445e3b0
ED
1918 &sandybridge_display_wm_info, latency,
1919 &sandybridge_cursor_wm_info, latency,
1920 &plane_wm, &cursor_wm)) {
1921 val = I915_READ(WM0_PIPEB_ILK);
1922 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1923 I915_WRITE(WM0_PIPEB_ILK, val |
1924 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1925 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1926 " plane %d, cursor: %d\n",
1927 plane_wm, cursor_wm);
51cea1f4 1928 enabled |= 1 << PIPE_B;
b445e3b0
ED
1929 }
1930
c43d0188
CW
1931 /*
1932 * Calculate and update the self-refresh watermark only when one
1933 * display plane is used.
1934 *
1935 * SNB support 3 levels of watermark.
1936 *
1937 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1938 * and disabled in the descending order
1939 *
1940 */
1941 I915_WRITE(WM3_LP_ILK, 0);
1942 I915_WRITE(WM2_LP_ILK, 0);
1943 I915_WRITE(WM1_LP_ILK, 0);
1944
1945 if (!single_plane_enabled(enabled) ||
1946 dev_priv->sprite_scaling_enabled)
1947 return;
1948 enabled = ffs(enabled) - 1;
1949
1950 /* WM1 */
1951 if (!ironlake_compute_srwm(dev, 1, enabled,
b0aea5dc 1952 dev_priv->wm.pri_latency[1] * 500,
c43d0188
CW
1953 &sandybridge_display_srwm_info,
1954 &sandybridge_cursor_srwm_info,
1955 &fbc_wm, &plane_wm, &cursor_wm))
1956 return;
1957
1958 I915_WRITE(WM1_LP_ILK,
1959 WM1_LP_SR_EN |
b0aea5dc 1960 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
c43d0188
CW
1961 (fbc_wm << WM1_LP_FBC_SHIFT) |
1962 (plane_wm << WM1_LP_SR_SHIFT) |
1963 cursor_wm);
1964
1965 /* WM2 */
1966 if (!ironlake_compute_srwm(dev, 2, enabled,
b0aea5dc 1967 dev_priv->wm.pri_latency[2] * 500,
c43d0188
CW
1968 &sandybridge_display_srwm_info,
1969 &sandybridge_cursor_srwm_info,
1970 &fbc_wm, &plane_wm, &cursor_wm))
1971 return;
1972
1973 I915_WRITE(WM2_LP_ILK,
1974 WM2_LP_EN |
b0aea5dc 1975 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
c43d0188
CW
1976 (fbc_wm << WM1_LP_FBC_SHIFT) |
1977 (plane_wm << WM1_LP_SR_SHIFT) |
1978 cursor_wm);
1979
1980 /* WM3 */
1981 if (!ironlake_compute_srwm(dev, 3, enabled,
b0aea5dc 1982 dev_priv->wm.pri_latency[3] * 500,
c43d0188
CW
1983 &sandybridge_display_srwm_info,
1984 &sandybridge_cursor_srwm_info,
1985 &fbc_wm, &plane_wm, &cursor_wm))
1986 return;
1987
1988 I915_WRITE(WM3_LP_ILK,
1989 WM3_LP_EN |
b0aea5dc 1990 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
c43d0188
CW
1991 (fbc_wm << WM1_LP_FBC_SHIFT) |
1992 (plane_wm << WM1_LP_SR_SHIFT) |
1993 cursor_wm);
1994}
1995
46ba614c 1996static void ivybridge_update_wm(struct drm_crtc *crtc)
c43d0188 1997{
46ba614c 1998 struct drm_device *dev = crtc->dev;
c43d0188 1999 struct drm_i915_private *dev_priv = dev->dev_private;
b0aea5dc 2000 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
c43d0188
CW
2001 u32 val;
2002 int fbc_wm, plane_wm, cursor_wm;
2003 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
2004 unsigned int enabled;
2005
2006 enabled = 0;
51cea1f4 2007 if (g4x_compute_wm0(dev, PIPE_A,
c43d0188
CW
2008 &sandybridge_display_wm_info, latency,
2009 &sandybridge_cursor_wm_info, latency,
2010 &plane_wm, &cursor_wm)) {
2011 val = I915_READ(WM0_PIPEA_ILK);
2012 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2013 I915_WRITE(WM0_PIPEA_ILK, val |
2014 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2015 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
2016 " plane %d, " "cursor: %d\n",
2017 plane_wm, cursor_wm);
51cea1f4 2018 enabled |= 1 << PIPE_A;
c43d0188
CW
2019 }
2020
51cea1f4 2021 if (g4x_compute_wm0(dev, PIPE_B,
c43d0188
CW
2022 &sandybridge_display_wm_info, latency,
2023 &sandybridge_cursor_wm_info, latency,
2024 &plane_wm, &cursor_wm)) {
2025 val = I915_READ(WM0_PIPEB_ILK);
2026 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2027 I915_WRITE(WM0_PIPEB_ILK, val |
2028 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2029 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
2030 " plane %d, cursor: %d\n",
2031 plane_wm, cursor_wm);
51cea1f4 2032 enabled |= 1 << PIPE_B;
c43d0188
CW
2033 }
2034
51cea1f4 2035 if (g4x_compute_wm0(dev, PIPE_C,
b445e3b0
ED
2036 &sandybridge_display_wm_info, latency,
2037 &sandybridge_cursor_wm_info, latency,
2038 &plane_wm, &cursor_wm)) {
2039 val = I915_READ(WM0_PIPEC_IVB);
2040 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2041 I915_WRITE(WM0_PIPEC_IVB, val |
2042 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2043 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2044 " plane %d, cursor: %d\n",
2045 plane_wm, cursor_wm);
51cea1f4 2046 enabled |= 1 << PIPE_C;
b445e3b0
ED
2047 }
2048
2049 /*
2050 * Calculate and update the self-refresh watermark only when one
2051 * display plane is used.
2052 *
2053 * SNB support 3 levels of watermark.
2054 *
2055 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
2056 * and disabled in the descending order
2057 *
2058 */
2059 I915_WRITE(WM3_LP_ILK, 0);
2060 I915_WRITE(WM2_LP_ILK, 0);
2061 I915_WRITE(WM1_LP_ILK, 0);
2062
2063 if (!single_plane_enabled(enabled) ||
2064 dev_priv->sprite_scaling_enabled)
2065 return;
2066 enabled = ffs(enabled) - 1;
2067
2068 /* WM1 */
2069 if (!ironlake_compute_srwm(dev, 1, enabled,
b0aea5dc 2070 dev_priv->wm.pri_latency[1] * 500,
b445e3b0
ED
2071 &sandybridge_display_srwm_info,
2072 &sandybridge_cursor_srwm_info,
2073 &fbc_wm, &plane_wm, &cursor_wm))
2074 return;
2075
2076 I915_WRITE(WM1_LP_ILK,
2077 WM1_LP_SR_EN |
b0aea5dc 2078 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
b445e3b0
ED
2079 (fbc_wm << WM1_LP_FBC_SHIFT) |
2080 (plane_wm << WM1_LP_SR_SHIFT) |
2081 cursor_wm);
2082
2083 /* WM2 */
2084 if (!ironlake_compute_srwm(dev, 2, enabled,
b0aea5dc 2085 dev_priv->wm.pri_latency[2] * 500,
b445e3b0
ED
2086 &sandybridge_display_srwm_info,
2087 &sandybridge_cursor_srwm_info,
2088 &fbc_wm, &plane_wm, &cursor_wm))
2089 return;
2090
2091 I915_WRITE(WM2_LP_ILK,
2092 WM2_LP_EN |
b0aea5dc 2093 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
b445e3b0
ED
2094 (fbc_wm << WM1_LP_FBC_SHIFT) |
2095 (plane_wm << WM1_LP_SR_SHIFT) |
2096 cursor_wm);
2097
c43d0188 2098 /* WM3, note we have to correct the cursor latency */
b445e3b0 2099 if (!ironlake_compute_srwm(dev, 3, enabled,
b0aea5dc 2100 dev_priv->wm.pri_latency[3] * 500,
b445e3b0
ED
2101 &sandybridge_display_srwm_info,
2102 &sandybridge_cursor_srwm_info,
c43d0188
CW
2103 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2104 !ironlake_compute_srwm(dev, 3, enabled,
b0aea5dc 2105 dev_priv->wm.cur_latency[3] * 500,
c43d0188
CW
2106 &sandybridge_display_srwm_info,
2107 &sandybridge_cursor_srwm_info,
2108 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
b445e3b0
ED
2109 return;
2110
2111 I915_WRITE(WM3_LP_ILK,
2112 WM3_LP_EN |
b0aea5dc 2113 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
b445e3b0
ED
2114 (fbc_wm << WM1_LP_FBC_SHIFT) |
2115 (plane_wm << WM1_LP_SR_SHIFT) |
2116 cursor_wm);
2117}
2118
3658729a
VS
2119static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2120 struct drm_crtc *crtc)
801bcfff
PZ
2121{
2122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
fd4daa9c 2123 uint32_t pixel_rate;
801bcfff 2124
241bfc38 2125 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
801bcfff
PZ
2126
2127 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2128 * adjust the pixel_rate here. */
2129
fd4daa9c 2130 if (intel_crtc->config.pch_pfit.enabled) {
801bcfff 2131 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
fd4daa9c 2132 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
801bcfff 2133
37327abd
VS
2134 pipe_w = intel_crtc->config.pipe_src_w;
2135 pipe_h = intel_crtc->config.pipe_src_h;
801bcfff
PZ
2136 pfit_w = (pfit_size >> 16) & 0xFFFF;
2137 pfit_h = pfit_size & 0xFFFF;
2138 if (pipe_w < pfit_w)
2139 pipe_w = pfit_w;
2140 if (pipe_h < pfit_h)
2141 pipe_h = pfit_h;
2142
2143 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
2144 pfit_w * pfit_h);
2145 }
2146
2147 return pixel_rate;
2148}
2149
37126462 2150/* latency must be in 0.1us units. */
23297044 2151static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
801bcfff
PZ
2152 uint32_t latency)
2153{
2154 uint64_t ret;
2155
3312ba65
VS
2156 if (WARN(latency == 0, "Latency value missing\n"))
2157 return UINT_MAX;
2158
801bcfff
PZ
2159 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2160 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2161
2162 return ret;
2163}
2164
37126462 2165/* latency must be in 0.1us units. */
23297044 2166static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
801bcfff
PZ
2167 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2168 uint32_t latency)
2169{
2170 uint32_t ret;
2171
3312ba65
VS
2172 if (WARN(latency == 0, "Latency value missing\n"))
2173 return UINT_MAX;
2174
801bcfff
PZ
2175 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2176 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2177 ret = DIV_ROUND_UP(ret, 64) + 2;
2178 return ret;
2179}
2180
23297044 2181static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
cca32e9a
PZ
2182 uint8_t bytes_per_pixel)
2183{
2184 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2185}
2186
801bcfff
PZ
2187struct hsw_pipe_wm_parameters {
2188 bool active;
801bcfff
PZ
2189 uint32_t pipe_htotal;
2190 uint32_t pixel_rate;
c35426d2
VS
2191 struct intel_plane_wm_parameters pri;
2192 struct intel_plane_wm_parameters spr;
2193 struct intel_plane_wm_parameters cur;
801bcfff
PZ
2194};
2195
cca32e9a
PZ
2196struct hsw_wm_maximums {
2197 uint16_t pri;
2198 uint16_t spr;
2199 uint16_t cur;
2200 uint16_t fbc;
2201};
2202
801bcfff
PZ
2203struct hsw_wm_values {
2204 uint32_t wm_pipe[3];
2205 uint32_t wm_lp[3];
2206 uint32_t wm_lp_spr[3];
2207 uint32_t wm_linetime[3];
cca32e9a 2208 bool enable_fbc_wm;
801bcfff
PZ
2209};
2210
240264f4
VS
2211/* used in computing the new watermarks state */
2212struct intel_wm_config {
2213 unsigned int num_pipes_active;
2214 bool sprites_enabled;
2215 bool sprites_scaled;
2216 bool fbc_wm_enabled;
2217};
2218
37126462
VS
2219/*
2220 * For both WM_PIPE and WM_LP.
2221 * mem_value must be in 0.1us units.
2222 */
ac830fe1 2223static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
cca32e9a
PZ
2224 uint32_t mem_value,
2225 bool is_lp)
801bcfff 2226{
cca32e9a
PZ
2227 uint32_t method1, method2;
2228
c35426d2 2229 if (!params->active || !params->pri.enabled)
801bcfff
PZ
2230 return 0;
2231
23297044 2232 method1 = ilk_wm_method1(params->pixel_rate,
c35426d2 2233 params->pri.bytes_per_pixel,
cca32e9a
PZ
2234 mem_value);
2235
2236 if (!is_lp)
2237 return method1;
2238
23297044 2239 method2 = ilk_wm_method2(params->pixel_rate,
cca32e9a 2240 params->pipe_htotal,
c35426d2
VS
2241 params->pri.horiz_pixels,
2242 params->pri.bytes_per_pixel,
cca32e9a
PZ
2243 mem_value);
2244
2245 return min(method1, method2);
801bcfff
PZ
2246}
2247
37126462
VS
2248/*
2249 * For both WM_PIPE and WM_LP.
2250 * mem_value must be in 0.1us units.
2251 */
ac830fe1 2252static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
801bcfff
PZ
2253 uint32_t mem_value)
2254{
2255 uint32_t method1, method2;
2256
c35426d2 2257 if (!params->active || !params->spr.enabled)
801bcfff
PZ
2258 return 0;
2259
23297044 2260 method1 = ilk_wm_method1(params->pixel_rate,
c35426d2 2261 params->spr.bytes_per_pixel,
801bcfff 2262 mem_value);
23297044 2263 method2 = ilk_wm_method2(params->pixel_rate,
801bcfff 2264 params->pipe_htotal,
c35426d2
VS
2265 params->spr.horiz_pixels,
2266 params->spr.bytes_per_pixel,
801bcfff
PZ
2267 mem_value);
2268 return min(method1, method2);
2269}
2270
37126462
VS
2271/*
2272 * For both WM_PIPE and WM_LP.
2273 * mem_value must be in 0.1us units.
2274 */
ac830fe1 2275static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
801bcfff
PZ
2276 uint32_t mem_value)
2277{
c35426d2 2278 if (!params->active || !params->cur.enabled)
801bcfff
PZ
2279 return 0;
2280
23297044 2281 return ilk_wm_method2(params->pixel_rate,
801bcfff 2282 params->pipe_htotal,
c35426d2
VS
2283 params->cur.horiz_pixels,
2284 params->cur.bytes_per_pixel,
801bcfff
PZ
2285 mem_value);
2286}
2287
cca32e9a 2288/* Only for WM_LP. */
ac830fe1 2289static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
1fda9882 2290 uint32_t pri_val)
cca32e9a 2291{
c35426d2 2292 if (!params->active || !params->pri.enabled)
cca32e9a
PZ
2293 return 0;
2294
23297044 2295 return ilk_wm_fbc(pri_val,
c35426d2
VS
2296 params->pri.horiz_pixels,
2297 params->pri.bytes_per_pixel);
cca32e9a
PZ
2298}
2299
158ae64f
VS
2300static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2301{
2302 if (INTEL_INFO(dev)->gen >= 7)
2303 return 768;
2304 else
2305 return 512;
2306}
2307
2308/* Calculate the maximum primary/sprite plane watermark */
2309static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2310 int level,
240264f4 2311 const struct intel_wm_config *config,
158ae64f
VS
2312 enum intel_ddb_partitioning ddb_partitioning,
2313 bool is_sprite)
2314{
2315 unsigned int fifo_size = ilk_display_fifo_size(dev);
2316 unsigned int max;
2317
2318 /* if sprites aren't enabled, sprites get nothing */
240264f4 2319 if (is_sprite && !config->sprites_enabled)
158ae64f
VS
2320 return 0;
2321
2322 /* HSW allows LP1+ watermarks even with multiple pipes */
240264f4 2323 if (level == 0 || config->num_pipes_active > 1) {
158ae64f
VS
2324 fifo_size /= INTEL_INFO(dev)->num_pipes;
2325
2326 /*
2327 * For some reason the non self refresh
2328 * FIFO size is only half of the self
2329 * refresh FIFO size on ILK/SNB.
2330 */
2331 if (INTEL_INFO(dev)->gen <= 6)
2332 fifo_size /= 2;
2333 }
2334
240264f4 2335 if (config->sprites_enabled) {
158ae64f
VS
2336 /* level 0 is always calculated with 1:1 split */
2337 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2338 if (is_sprite)
2339 fifo_size *= 5;
2340 fifo_size /= 6;
2341 } else {
2342 fifo_size /= 2;
2343 }
2344 }
2345
2346 /* clamp to max that the registers can hold */
2347 if (INTEL_INFO(dev)->gen >= 7)
2348 /* IVB/HSW primary/sprite plane watermarks */
2349 max = level == 0 ? 127 : 1023;
2350 else if (!is_sprite)
2351 /* ILK/SNB primary plane watermarks */
2352 max = level == 0 ? 127 : 511;
2353 else
2354 /* ILK/SNB sprite plane watermarks */
2355 max = level == 0 ? 63 : 255;
2356
2357 return min(fifo_size, max);
2358}
2359
2360/* Calculate the maximum cursor plane watermark */
2361static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
240264f4
VS
2362 int level,
2363 const struct intel_wm_config *config)
158ae64f
VS
2364{
2365 /* HSW LP1+ watermarks w/ multiple pipes */
240264f4 2366 if (level > 0 && config->num_pipes_active > 1)
158ae64f
VS
2367 return 64;
2368
2369 /* otherwise just report max that registers can hold */
2370 if (INTEL_INFO(dev)->gen >= 7)
2371 return level == 0 ? 63 : 255;
2372 else
2373 return level == 0 ? 31 : 63;
2374}
2375
2376/* Calculate the maximum FBC watermark */
2377static unsigned int ilk_fbc_wm_max(void)
2378{
2379 /* max that registers can hold */
2380 return 15;
2381}
2382
2383static void ilk_wm_max(struct drm_device *dev,
2384 int level,
240264f4 2385 const struct intel_wm_config *config,
158ae64f
VS
2386 enum intel_ddb_partitioning ddb_partitioning,
2387 struct hsw_wm_maximums *max)
2388{
240264f4
VS
2389 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2390 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2391 max->cur = ilk_cursor_wm_max(dev, level, config);
158ae64f
VS
2392 max->fbc = ilk_fbc_wm_max();
2393}
2394
a9786a11
VS
2395static bool ilk_check_wm(int level,
2396 const struct hsw_wm_maximums *max,
1fd527cc 2397 struct intel_wm_level *result)
a9786a11
VS
2398{
2399 bool ret;
2400
2401 /* already determined to be invalid? */
2402 if (!result->enable)
2403 return false;
2404
2405 result->enable = result->pri_val <= max->pri &&
2406 result->spr_val <= max->spr &&
2407 result->cur_val <= max->cur;
2408
2409 ret = result->enable;
2410
2411 /*
2412 * HACK until we can pre-compute everything,
2413 * and thus fail gracefully if LP0 watermarks
2414 * are exceeded...
2415 */
2416 if (level == 0 && !result->enable) {
2417 if (result->pri_val > max->pri)
2418 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2419 level, result->pri_val, max->pri);
2420 if (result->spr_val > max->spr)
2421 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2422 level, result->spr_val, max->spr);
2423 if (result->cur_val > max->cur)
2424 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2425 level, result->cur_val, max->cur);
2426
2427 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2428 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2429 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2430 result->enable = true;
2431 }
2432
2433 DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
2434
2435 return ret;
2436}
2437
6f5ddd17
VS
2438static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2439 int level,
ac830fe1 2440 const struct hsw_pipe_wm_parameters *p,
1fd527cc 2441 struct intel_wm_level *result)
6f5ddd17
VS
2442{
2443 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2444 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2445 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2446
2447 /* WM1+ latency values stored in 0.5us units */
2448 if (level > 0) {
2449 pri_latency *= 5;
2450 spr_latency *= 5;
2451 cur_latency *= 5;
2452 }
2453
2454 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2455 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2456 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2457 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2458 result->enable = true;
2459}
2460
801bcfff
PZ
2461static uint32_t
2462hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1f8eeabf
ED
2463{
2464 struct drm_i915_private *dev_priv = dev->dev_private;
1011d8c4 2465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1011d8c4 2466 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
85a02deb 2467 u32 linetime, ips_linetime;
1f8eeabf 2468
801bcfff
PZ
2469 if (!intel_crtc_active(crtc))
2470 return 0;
1011d8c4 2471
1f8eeabf
ED
2472 /* The WM are computed with base on how long it takes to fill a single
2473 * row at the given clock rate, multiplied by 8.
2474 * */
85a02deb
PZ
2475 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2476 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2477 intel_ddi_get_cdclk_freq(dev_priv));
1f8eeabf 2478
801bcfff
PZ
2479 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2480 PIPE_WM_LINETIME_TIME(linetime);
1f8eeabf
ED
2481}
2482
12b134df
VS
2483static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2484{
2485 struct drm_i915_private *dev_priv = dev->dev_private;
2486
2487 if (IS_HASWELL(dev)) {
2488 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2489
2490 wm[0] = (sskpd >> 56) & 0xFF;
2491 if (wm[0] == 0)
2492 wm[0] = sskpd & 0xF;
e5d5019e
VS
2493 wm[1] = (sskpd >> 4) & 0xFF;
2494 wm[2] = (sskpd >> 12) & 0xFF;
2495 wm[3] = (sskpd >> 20) & 0x1FF;
2496 wm[4] = (sskpd >> 32) & 0x1FF;
63cf9a13
VS
2497 } else if (INTEL_INFO(dev)->gen >= 6) {
2498 uint32_t sskpd = I915_READ(MCH_SSKPD);
2499
2500 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2501 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2502 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2503 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
3a88d0ac
VS
2504 } else if (INTEL_INFO(dev)->gen >= 5) {
2505 uint32_t mltr = I915_READ(MLTR_ILK);
2506
2507 /* ILK primary LP0 latency is 700 ns */
2508 wm[0] = 7;
2509 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2510 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
12b134df
VS
2511 }
2512}
2513
53615a5e
VS
2514static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2515{
2516 /* ILK sprite LP0 latency is 1300 ns */
2517 if (INTEL_INFO(dev)->gen == 5)
2518 wm[0] = 13;
2519}
2520
2521static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2522{
2523 /* ILK cursor LP0 latency is 1300 ns */
2524 if (INTEL_INFO(dev)->gen == 5)
2525 wm[0] = 13;
2526
2527 /* WaDoubleCursorLP3Latency:ivb */
2528 if (IS_IVYBRIDGE(dev))
2529 wm[3] *= 2;
2530}
2531
ad0d6dc4 2532static int ilk_wm_max_level(const struct drm_device *dev)
26ec971e 2533{
26ec971e
VS
2534 /* how many WM levels are we expecting */
2535 if (IS_HASWELL(dev))
ad0d6dc4 2536 return 4;
26ec971e 2537 else if (INTEL_INFO(dev)->gen >= 6)
ad0d6dc4 2538 return 3;
26ec971e 2539 else
ad0d6dc4
VS
2540 return 2;
2541}
2542
2543static void intel_print_wm_latency(struct drm_device *dev,
2544 const char *name,
2545 const uint16_t wm[5])
2546{
2547 int level, max_level = ilk_wm_max_level(dev);
26ec971e
VS
2548
2549 for (level = 0; level <= max_level; level++) {
2550 unsigned int latency = wm[level];
2551
2552 if (latency == 0) {
2553 DRM_ERROR("%s WM%d latency not provided\n",
2554 name, level);
2555 continue;
2556 }
2557
2558 /* WM1+ latency values in 0.5us units */
2559 if (level > 0)
2560 latency *= 5;
2561
2562 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2563 name, level, wm[level],
2564 latency / 10, latency % 10);
2565 }
2566}
2567
53615a5e
VS
2568static void intel_setup_wm_latency(struct drm_device *dev)
2569{
2570 struct drm_i915_private *dev_priv = dev->dev_private;
2571
2572 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2573
2574 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2575 sizeof(dev_priv->wm.pri_latency));
2576 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2577 sizeof(dev_priv->wm.pri_latency));
2578
2579 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2580 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
26ec971e
VS
2581
2582 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2583 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2584 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
53615a5e
VS
2585}
2586
7c4a395f
VS
2587static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2588 struct hsw_pipe_wm_parameters *p,
861f3389
PZ
2589 struct hsw_wm_maximums *lp_max_1_2,
2590 struct hsw_wm_maximums *lp_max_5_6)
1011d8c4 2591{
7c4a395f
VS
2592 struct drm_device *dev = crtc->dev;
2593 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2594 enum pipe pipe = intel_crtc->pipe;
240264f4 2595 struct intel_wm_config config = {};
7c4a395f 2596 struct drm_plane *plane;
1011d8c4 2597
7c4a395f
VS
2598 p->active = intel_crtc_active(crtc);
2599 if (p->active) {
801bcfff 2600 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
3658729a 2601 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
c35426d2
VS
2602 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2603 p->cur.bytes_per_pixel = 4;
37327abd 2604 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
c35426d2
VS
2605 p->cur.horiz_pixels = 64;
2606 /* TODO: for now, assume primary and cursor planes are always enabled. */
2607 p->pri.enabled = true;
2608 p->cur.enabled = true;
801bcfff
PZ
2609 }
2610
7c4a395f
VS
2611 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2612 config.num_pipes_active += intel_crtc_active(crtc);
2613
801bcfff
PZ
2614 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2615 struct intel_plane *intel_plane = to_intel_plane(plane);
801bcfff 2616
7c4a395f
VS
2617 if (intel_plane->pipe == pipe)
2618 p->spr = intel_plane->wm;
cca32e9a 2619
7c4a395f
VS
2620 config.sprites_enabled |= intel_plane->wm.enabled;
2621 config.sprites_scaled |= intel_plane->wm.scaled;
cca32e9a
PZ
2622 }
2623
240264f4 2624 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
158ae64f
VS
2625
2626 /* 5/6 split only in single pipe config on IVB+ */
240264f4
VS
2627 if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
2628 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
158ae64f
VS
2629 else
2630 *lp_max_5_6 = *lp_max_1_2;
801bcfff
PZ
2631}
2632
0b2ae6d7
VS
2633/* Compute new watermarks for the pipe */
2634static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2635 const struct hsw_pipe_wm_parameters *params,
2636 struct intel_pipe_wm *pipe_wm)
2637{
2638 struct drm_device *dev = crtc->dev;
2639 struct drm_i915_private *dev_priv = dev->dev_private;
2640 int level, max_level = ilk_wm_max_level(dev);
2641 /* LP0 watermark maximums depend on this pipe alone */
2642 struct intel_wm_config config = {
2643 .num_pipes_active = 1,
2644 .sprites_enabled = params->spr.enabled,
2645 .sprites_scaled = params->spr.scaled,
2646 };
2647 struct hsw_wm_maximums max;
2648
0b2ae6d7
VS
2649 /* LP0 watermarks always use 1/2 DDB partitioning */
2650 ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2651
2652 for (level = 0; level <= max_level; level++)
2653 ilk_compute_wm_level(dev_priv, level, params,
2654 &pipe_wm->wm[level]);
2655
2656 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2657
2658 /* At least LP0 must be valid */
2659 return ilk_check_wm(0, &max, &pipe_wm->wm[0]);
2660}
2661
2662/*
2663 * Merge the watermarks from all active pipes for a specific level.
2664 */
2665static void ilk_merge_wm_level(struct drm_device *dev,
2666 int level,
2667 struct intel_wm_level *ret_wm)
2668{
2669 const struct intel_crtc *intel_crtc;
2670
2671 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2672 const struct intel_wm_level *wm =
2673 &intel_crtc->wm.active.wm[level];
2674
2675 if (!wm->enable)
2676 return;
2677
2678 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2679 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2680 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2681 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2682 }
2683
2684 ret_wm->enable = true;
2685}
2686
2687/*
2688 * Merge all low power watermarks for all active pipes.
2689 */
2690static void ilk_wm_merge(struct drm_device *dev,
2691 const struct hsw_wm_maximums *max,
2692 struct intel_pipe_wm *merged)
2693{
2694 int level, max_level = ilk_wm_max_level(dev);
2695
2696 merged->fbc_wm_enabled = true;
2697
2698 /* merge each WM1+ level */
2699 for (level = 1; level <= max_level; level++) {
2700 struct intel_wm_level *wm = &merged->wm[level];
2701
2702 ilk_merge_wm_level(dev, level, wm);
2703
2704 if (!ilk_check_wm(level, max, wm))
2705 break;
2706
2707 /*
2708 * The spec says it is preferred to disable
2709 * FBC WMs instead of disabling a WM level.
2710 */
2711 if (wm->fbc_val > max->fbc) {
2712 merged->fbc_wm_enabled = false;
2713 wm->fbc_val = 0;
2714 }
2715 }
2716}
2717
801bcfff 2718static void hsw_compute_wm_results(struct drm_device *dev,
ac830fe1 2719 const struct hsw_wm_maximums *lp_maximums,
801bcfff
PZ
2720 struct hsw_wm_values *results)
2721{
0b2ae6d7
VS
2722 struct intel_crtc *intel_crtc;
2723 int level, wm_lp;
2724 struct intel_pipe_wm merged = {};
cca32e9a 2725
0b2ae6d7 2726 ilk_wm_merge(dev, lp_maximums, &merged);
cca32e9a 2727
5c536613
VS
2728 memset(results, 0, sizeof(*results));
2729
0b2ae6d7 2730 results->enable_fbc_wm = merged.fbc_wm_enabled;
cca32e9a 2731
0b2ae6d7 2732 /* LP1+ register values */
cca32e9a 2733 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
1fd527cc 2734 const struct intel_wm_level *r;
801bcfff 2735
0b2ae6d7
VS
2736 level = wm_lp + (wm_lp >= 2 && merged.wm[4].enable);
2737
2738 r = &merged.wm[level];
2739 if (!r->enable)
cca32e9a
PZ
2740 break;
2741
cca32e9a
PZ
2742 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2743 r->fbc_val,
2744 r->pri_val,
2745 r->cur_val);
2746 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2747 }
801bcfff 2748
0b2ae6d7
VS
2749 /* LP0 register values */
2750 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2751 enum pipe pipe = intel_crtc->pipe;
2752 const struct intel_wm_level *r =
2753 &intel_crtc->wm.active.wm[0];
2754
2755 if (WARN_ON(!r->enable))
2756 continue;
2757
2758 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
1011d8c4 2759
0b2ae6d7
VS
2760 results->wm_pipe[pipe] =
2761 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2762 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2763 r->cur_val;
801bcfff
PZ
2764 }
2765}
2766
861f3389
PZ
2767/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2768 * case both are at the same level. Prefer r1 in case they're the same. */
f4db9321
DL
2769static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2770 struct hsw_wm_values *r2)
861f3389
PZ
2771{
2772 int i, val_r1 = 0, val_r2 = 0;
2773
2774 for (i = 0; i < 3; i++) {
2775 if (r1->wm_lp[i] & WM3_LP_EN)
2776 val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
2777 if (r2->wm_lp[i] & WM3_LP_EN)
2778 val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
2779 }
2780
2781 if (val_r1 == val_r2) {
2782 if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
2783 return r2;
2784 else
2785 return r1;
2786 } else if (val_r1 > val_r2) {
2787 return r1;
2788 } else {
2789 return r2;
2790 }
2791}
2792
801bcfff
PZ
2793/*
2794 * The spec says we shouldn't write when we don't need, because every write
2795 * causes WMs to be re-evaluated, expending some power.
2796 */
2797static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2798 struct hsw_wm_values *results,
77c122bc 2799 enum intel_ddb_partitioning partitioning)
801bcfff
PZ
2800{
2801 struct hsw_wm_values previous;
2802 uint32_t val;
77c122bc 2803 enum intel_ddb_partitioning prev_partitioning;
cca32e9a 2804 bool prev_enable_fbc_wm;
801bcfff
PZ
2805
2806 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2807 previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2808 previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2809 previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2810 previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2811 previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2812 previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2813 previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2814 previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2815 previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2816 previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2817 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2818
2819 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
77c122bc 2820 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
801bcfff 2821
cca32e9a
PZ
2822 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2823
801bcfff
PZ
2824 if (memcmp(results->wm_pipe, previous.wm_pipe,
2825 sizeof(results->wm_pipe)) == 0 &&
2826 memcmp(results->wm_lp, previous.wm_lp,
2827 sizeof(results->wm_lp)) == 0 &&
2828 memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2829 sizeof(results->wm_lp_spr)) == 0 &&
2830 memcmp(results->wm_linetime, previous.wm_linetime,
2831 sizeof(results->wm_linetime)) == 0 &&
cca32e9a
PZ
2832 partitioning == prev_partitioning &&
2833 results->enable_fbc_wm == prev_enable_fbc_wm)
801bcfff
PZ
2834 return;
2835
2836 if (previous.wm_lp[2] != 0)
2837 I915_WRITE(WM3_LP_ILK, 0);
2838 if (previous.wm_lp[1] != 0)
2839 I915_WRITE(WM2_LP_ILK, 0);
2840 if (previous.wm_lp[0] != 0)
2841 I915_WRITE(WM1_LP_ILK, 0);
2842
2843 if (previous.wm_pipe[0] != results->wm_pipe[0])
2844 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2845 if (previous.wm_pipe[1] != results->wm_pipe[1])
2846 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2847 if (previous.wm_pipe[2] != results->wm_pipe[2])
2848 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2849
2850 if (previous.wm_linetime[0] != results->wm_linetime[0])
2851 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2852 if (previous.wm_linetime[1] != results->wm_linetime[1])
2853 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2854 if (previous.wm_linetime[2] != results->wm_linetime[2])
2855 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2856
2857 if (prev_partitioning != partitioning) {
2858 val = I915_READ(WM_MISC);
77c122bc 2859 if (partitioning == INTEL_DDB_PART_1_2)
801bcfff
PZ
2860 val &= ~WM_MISC_DATA_PARTITION_5_6;
2861 else
2862 val |= WM_MISC_DATA_PARTITION_5_6;
2863 I915_WRITE(WM_MISC, val);
1011d8c4
PZ
2864 }
2865
cca32e9a
PZ
2866 if (prev_enable_fbc_wm != results->enable_fbc_wm) {
2867 val = I915_READ(DISP_ARB_CTL);
2868 if (results->enable_fbc_wm)
2869 val &= ~DISP_FBC_WM_DIS;
2870 else
2871 val |= DISP_FBC_WM_DIS;
2872 I915_WRITE(DISP_ARB_CTL, val);
2873 }
2874
801bcfff
PZ
2875 if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2876 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2877 if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2878 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2879 if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2880 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2881
2882 if (results->wm_lp[0] != 0)
2883 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2884 if (results->wm_lp[1] != 0)
2885 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2886 if (results->wm_lp[2] != 0)
2887 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2888}
2889
46ba614c 2890static void haswell_update_wm(struct drm_crtc *crtc)
801bcfff 2891{
7c4a395f 2892 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
46ba614c 2893 struct drm_device *dev = crtc->dev;
801bcfff 2894 struct drm_i915_private *dev_priv = dev->dev_private;
861f3389 2895 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
7c4a395f 2896 struct hsw_pipe_wm_parameters params = {};
861f3389 2897 struct hsw_wm_values results_1_2, results_5_6, *best_results;
77c122bc 2898 enum intel_ddb_partitioning partitioning;
7c4a395f
VS
2899 struct intel_pipe_wm pipe_wm = {};
2900
2901 hsw_compute_wm_parameters(crtc, &params, &lp_max_1_2, &lp_max_5_6);
2902
2903 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2904
2905 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2906 return;
861f3389 2907
7c4a395f 2908 intel_crtc->wm.active = pipe_wm;
861f3389 2909
7c4a395f 2910 hsw_compute_wm_results(dev, &lp_max_1_2, &results_1_2);
861f3389 2911 if (lp_max_1_2.pri != lp_max_5_6.pri) {
7c4a395f 2912 hsw_compute_wm_results(dev, &lp_max_5_6, &results_5_6);
861f3389
PZ
2913 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2914 } else {
2915 best_results = &results_1_2;
2916 }
2917
2918 partitioning = (best_results == &results_1_2) ?
77c122bc 2919 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
801bcfff 2920
861f3389 2921 hsw_write_wm_values(dev_priv, best_results, partitioning);
1011d8c4
PZ
2922}
2923
adf3d35e
VS
2924static void haswell_update_sprite_wm(struct drm_plane *plane,
2925 struct drm_crtc *crtc,
526682e9 2926 uint32_t sprite_width, int pixel_size,
bdd57d03 2927 bool enabled, bool scaled)
526682e9 2928{
adf3d35e 2929 struct intel_plane *intel_plane = to_intel_plane(plane);
526682e9 2930
adf3d35e
VS
2931 intel_plane->wm.enabled = enabled;
2932 intel_plane->wm.scaled = scaled;
2933 intel_plane->wm.horiz_pixels = sprite_width;
2934 intel_plane->wm.bytes_per_pixel = pixel_size;
526682e9 2935
46ba614c 2936 haswell_update_wm(crtc);
526682e9
PZ
2937}
2938
b445e3b0
ED
2939static bool
2940sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2941 uint32_t sprite_width, int pixel_size,
2942 const struct intel_watermark_params *display,
2943 int display_latency_ns, int *sprite_wm)
2944{
2945 struct drm_crtc *crtc;
2946 int clock;
2947 int entries, tlb_miss;
2948
2949 crtc = intel_get_crtc_for_plane(dev, plane);
3490ea5d 2950 if (!intel_crtc_active(crtc)) {
b445e3b0
ED
2951 *sprite_wm = display->guard_size;
2952 return false;
2953 }
2954
241bfc38 2955 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
b445e3b0
ED
2956
2957 /* Use the small buffer method to calculate the sprite watermark */
2958 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
2959 tlb_miss = display->fifo_size*display->cacheline_size -
2960 sprite_width * 8;
2961 if (tlb_miss > 0)
2962 entries += tlb_miss;
2963 entries = DIV_ROUND_UP(entries, display->cacheline_size);
2964 *sprite_wm = entries + display->guard_size;
2965 if (*sprite_wm > (int)display->max_wm)
2966 *sprite_wm = display->max_wm;
2967
2968 return true;
2969}
2970
2971static bool
2972sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2973 uint32_t sprite_width, int pixel_size,
2974 const struct intel_watermark_params *display,
2975 int latency_ns, int *sprite_wm)
2976{
2977 struct drm_crtc *crtc;
2978 unsigned long line_time_us;
2979 int clock;
2980 int line_count, line_size;
2981 int small, large;
2982 int entries;
2983
2984 if (!latency_ns) {
2985 *sprite_wm = 0;
2986 return false;
2987 }
2988
2989 crtc = intel_get_crtc_for_plane(dev, plane);
241bfc38 2990 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
b445e3b0
ED
2991 if (!clock) {
2992 *sprite_wm = 0;
2993 return false;
2994 }
2995
2996 line_time_us = (sprite_width * 1000) / clock;
2997 if (!line_time_us) {
2998 *sprite_wm = 0;
2999 return false;
3000 }
3001
3002 line_count = (latency_ns / line_time_us + 1000) / 1000;
3003 line_size = sprite_width * pixel_size;
3004
3005 /* Use the minimum of the small and large buffer method for primary */
3006 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3007 large = line_count * line_size;
3008
3009 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3010 *sprite_wm = entries + display->guard_size;
3011
3012 return *sprite_wm > 0x3ff ? false : true;
3013}
3014
adf3d35e
VS
3015static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3016 struct drm_crtc *crtc,
4c4ff43a 3017 uint32_t sprite_width, int pixel_size,
39db4a4d 3018 bool enabled, bool scaled)
b445e3b0 3019{
adf3d35e 3020 struct drm_device *dev = plane->dev;
b445e3b0 3021 struct drm_i915_private *dev_priv = dev->dev_private;
adf3d35e 3022 int pipe = to_intel_plane(plane)->pipe;
b0aea5dc 3023 int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
b445e3b0
ED
3024 u32 val;
3025 int sprite_wm, reg;
3026 int ret;
3027
39db4a4d 3028 if (!enabled)
4c4ff43a
PZ
3029 return;
3030
b445e3b0
ED
3031 switch (pipe) {
3032 case 0:
3033 reg = WM0_PIPEA_ILK;
3034 break;
3035 case 1:
3036 reg = WM0_PIPEB_ILK;
3037 break;
3038 case 2:
3039 reg = WM0_PIPEC_IVB;
3040 break;
3041 default:
3042 return; /* bad pipe */
3043 }
3044
3045 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
3046 &sandybridge_display_wm_info,
3047 latency, &sprite_wm);
3048 if (!ret) {
84f44ce7
VS
3049 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
3050 pipe_name(pipe));
b445e3b0
ED
3051 return;
3052 }
3053
3054 val = I915_READ(reg);
3055 val &= ~WM0_PIPE_SPRITE_MASK;
3056 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
84f44ce7 3057 DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
b445e3b0
ED
3058
3059
3060 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3061 pixel_size,
3062 &sandybridge_display_srwm_info,
b0aea5dc 3063 dev_priv->wm.spr_latency[1] * 500,
b445e3b0
ED
3064 &sprite_wm);
3065 if (!ret) {
84f44ce7
VS
3066 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
3067 pipe_name(pipe));
b445e3b0
ED
3068 return;
3069 }
3070 I915_WRITE(WM1S_LP_ILK, sprite_wm);
3071
3072 /* Only IVB has two more LP watermarks for sprite */
3073 if (!IS_IVYBRIDGE(dev))
3074 return;
3075
3076 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3077 pixel_size,
3078 &sandybridge_display_srwm_info,
b0aea5dc 3079 dev_priv->wm.spr_latency[2] * 500,
b445e3b0
ED
3080 &sprite_wm);
3081 if (!ret) {
84f44ce7
VS
3082 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
3083 pipe_name(pipe));
b445e3b0
ED
3084 return;
3085 }
3086 I915_WRITE(WM2S_LP_IVB, sprite_wm);
3087
3088 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3089 pixel_size,
3090 &sandybridge_display_srwm_info,
b0aea5dc 3091 dev_priv->wm.spr_latency[3] * 500,
b445e3b0
ED
3092 &sprite_wm);
3093 if (!ret) {
84f44ce7
VS
3094 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
3095 pipe_name(pipe));
b445e3b0
ED
3096 return;
3097 }
3098 I915_WRITE(WM3S_LP_IVB, sprite_wm);
3099}
3100
3101/**
3102 * intel_update_watermarks - update FIFO watermark values based on current modes
3103 *
3104 * Calculate watermark values for the various WM regs based on current mode
3105 * and plane configuration.
3106 *
3107 * There are several cases to deal with here:
3108 * - normal (i.e. non-self-refresh)
3109 * - self-refresh (SR) mode
3110 * - lines are large relative to FIFO size (buffer can hold up to 2)
3111 * - lines are small relative to FIFO size (buffer can hold more than 2
3112 * lines), so need to account for TLB latency
3113 *
3114 * The normal calculation is:
3115 * watermark = dotclock * bytes per pixel * latency
3116 * where latency is platform & configuration dependent (we assume pessimal
3117 * values here).
3118 *
3119 * The SR calculation is:
3120 * watermark = (trunc(latency/line time)+1) * surface width *
3121 * bytes per pixel
3122 * where
3123 * line time = htotal / dotclock
3124 * surface width = hdisplay for normal plane and 64 for cursor
3125 * and latency is assumed to be high, as above.
3126 *
3127 * The final value programmed to the register should always be rounded up,
3128 * and include an extra 2 entries to account for clock crossings.
3129 *
3130 * We don't use the sprite, so we can ignore that. And on Crestline we have
3131 * to set the non-SR watermarks to 8.
3132 */
46ba614c 3133void intel_update_watermarks(struct drm_crtc *crtc)
b445e3b0 3134{
46ba614c 3135 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
b445e3b0
ED
3136
3137 if (dev_priv->display.update_wm)
46ba614c 3138 dev_priv->display.update_wm(crtc);
b445e3b0
ED
3139}
3140
adf3d35e
VS
3141void intel_update_sprite_watermarks(struct drm_plane *plane,
3142 struct drm_crtc *crtc,
4c4ff43a 3143 uint32_t sprite_width, int pixel_size,
39db4a4d 3144 bool enabled, bool scaled)
b445e3b0 3145{
adf3d35e 3146 struct drm_i915_private *dev_priv = plane->dev->dev_private;
b445e3b0
ED
3147
3148 if (dev_priv->display.update_sprite_wm)
adf3d35e 3149 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
39db4a4d 3150 pixel_size, enabled, scaled);
b445e3b0
ED
3151}
3152
2b4e57bd
ED
3153static struct drm_i915_gem_object *
3154intel_alloc_context_page(struct drm_device *dev)
3155{
3156 struct drm_i915_gem_object *ctx;
3157 int ret;
3158
3159 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3160
3161 ctx = i915_gem_alloc_object(dev, 4096);
3162 if (!ctx) {
3163 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3164 return NULL;
3165 }
3166
c37e2204 3167 ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
2b4e57bd
ED
3168 if (ret) {
3169 DRM_ERROR("failed to pin power context: %d\n", ret);
3170 goto err_unref;
3171 }
3172
3173 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
3174 if (ret) {
3175 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
3176 goto err_unpin;
3177 }
3178
3179 return ctx;
3180
3181err_unpin:
3182 i915_gem_object_unpin(ctx);
3183err_unref:
3184 drm_gem_object_unreference(&ctx->base);
2b4e57bd
ED
3185 return NULL;
3186}
3187
9270388e
DV
3188/**
3189 * Lock protecting IPS related data structures
9270388e
DV
3190 */
3191DEFINE_SPINLOCK(mchdev_lock);
3192
3193/* Global for IPS driver to get at the current i915 device. Protected by
3194 * mchdev_lock. */
3195static struct drm_i915_private *i915_mch_dev;
3196
2b4e57bd
ED
3197bool ironlake_set_drps(struct drm_device *dev, u8 val)
3198{
3199 struct drm_i915_private *dev_priv = dev->dev_private;
3200 u16 rgvswctl;
3201
9270388e
DV
3202 assert_spin_locked(&mchdev_lock);
3203
2b4e57bd
ED
3204 rgvswctl = I915_READ16(MEMSWCTL);
3205 if (rgvswctl & MEMCTL_CMD_STS) {
3206 DRM_DEBUG("gpu busy, RCS change rejected\n");
3207 return false; /* still busy with another command */
3208 }
3209
3210 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3211 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3212 I915_WRITE16(MEMSWCTL, rgvswctl);
3213 POSTING_READ16(MEMSWCTL);
3214
3215 rgvswctl |= MEMCTL_CMD_STS;
3216 I915_WRITE16(MEMSWCTL, rgvswctl);
3217
3218 return true;
3219}
3220
8090c6b9 3221static void ironlake_enable_drps(struct drm_device *dev)
2b4e57bd
ED
3222{
3223 struct drm_i915_private *dev_priv = dev->dev_private;
3224 u32 rgvmodectl = I915_READ(MEMMODECTL);
3225 u8 fmax, fmin, fstart, vstart;
3226
9270388e
DV
3227 spin_lock_irq(&mchdev_lock);
3228
2b4e57bd
ED
3229 /* Enable temp reporting */
3230 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3231 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3232
3233 /* 100ms RC evaluation intervals */
3234 I915_WRITE(RCUPEI, 100000);
3235 I915_WRITE(RCDNEI, 100000);
3236
3237 /* Set max/min thresholds to 90ms and 80ms respectively */
3238 I915_WRITE(RCBMAXAVG, 90000);
3239 I915_WRITE(RCBMINAVG, 80000);
3240
3241 I915_WRITE(MEMIHYST, 1);
3242
3243 /* Set up min, max, and cur for interrupt handling */
3244 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3245 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3246 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3247 MEMMODE_FSTART_SHIFT;
3248
3249 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3250 PXVFREQ_PX_SHIFT;
3251
20e4d407
DV
3252 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3253 dev_priv->ips.fstart = fstart;
2b4e57bd 3254
20e4d407
DV
3255 dev_priv->ips.max_delay = fstart;
3256 dev_priv->ips.min_delay = fmin;
3257 dev_priv->ips.cur_delay = fstart;
2b4e57bd
ED
3258
3259 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3260 fmax, fmin, fstart);
3261
3262 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3263
3264 /*
3265 * Interrupts will be enabled in ironlake_irq_postinstall
3266 */
3267
3268 I915_WRITE(VIDSTART, vstart);
3269 POSTING_READ(VIDSTART);
3270
3271 rgvmodectl |= MEMMODE_SWMODE_EN;
3272 I915_WRITE(MEMMODECTL, rgvmodectl);
3273
9270388e 3274 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2b4e57bd 3275 DRM_ERROR("stuck trying to change perf mode\n");
9270388e 3276 mdelay(1);
2b4e57bd
ED
3277
3278 ironlake_set_drps(dev, fstart);
3279
20e4d407 3280 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2b4e57bd 3281 I915_READ(0x112e0);
20e4d407
DV
3282 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3283 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3284 getrawmonotonic(&dev_priv->ips.last_time2);
9270388e
DV
3285
3286 spin_unlock_irq(&mchdev_lock);
2b4e57bd
ED
3287}
3288
8090c6b9 3289static void ironlake_disable_drps(struct drm_device *dev)
2b4e57bd
ED
3290{
3291 struct drm_i915_private *dev_priv = dev->dev_private;
9270388e
DV
3292 u16 rgvswctl;
3293
3294 spin_lock_irq(&mchdev_lock);
3295
3296 rgvswctl = I915_READ16(MEMSWCTL);
2b4e57bd
ED
3297
3298 /* Ack interrupts, disable EFC interrupt */
3299 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3300 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3301 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3302 I915_WRITE(DEIIR, DE_PCU_EVENT);
3303 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3304
3305 /* Go back to the starting frequency */
20e4d407 3306 ironlake_set_drps(dev, dev_priv->ips.fstart);
9270388e 3307 mdelay(1);
2b4e57bd
ED
3308 rgvswctl |= MEMCTL_CMD_STS;
3309 I915_WRITE(MEMSWCTL, rgvswctl);
9270388e 3310 mdelay(1);
2b4e57bd 3311
9270388e 3312 spin_unlock_irq(&mchdev_lock);
2b4e57bd
ED
3313}
3314
acbe9475
DV
3315/* There's a funny hw issue where the hw returns all 0 when reading from
3316 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3317 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3318 * all limits and the gpu stuck at whatever frequency it is at atm).
3319 */
65bccb5c 3320static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
2b4e57bd 3321{
7b9e0ae6 3322 u32 limits;
2b4e57bd 3323
7b9e0ae6 3324 limits = 0;
c6a828d3
DV
3325
3326 if (*val >= dev_priv->rps.max_delay)
3327 *val = dev_priv->rps.max_delay;
3328 limits |= dev_priv->rps.max_delay << 24;
20b46e59
DV
3329
3330 /* Only set the down limit when we've reached the lowest level to avoid
3331 * getting more interrupts, otherwise leave this clear. This prevents a
3332 * race in the hw when coming out of rc6: There's a tiny window where
3333 * the hw runs at the minimal clock before selecting the desired
3334 * frequency, if the down threshold expires in that window we will not
3335 * receive a down interrupt. */
c6a828d3
DV
3336 if (*val <= dev_priv->rps.min_delay) {
3337 *val = dev_priv->rps.min_delay;
3338 limits |= dev_priv->rps.min_delay << 16;
20b46e59
DV
3339 }
3340
3341 return limits;
3342}
3343
dd75fdc8
CW
3344static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3345{
3346 int new_power;
3347
3348 new_power = dev_priv->rps.power;
3349 switch (dev_priv->rps.power) {
3350 case LOW_POWER:
3351 if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
3352 new_power = BETWEEN;
3353 break;
3354
3355 case BETWEEN:
3356 if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
3357 new_power = LOW_POWER;
3358 else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
3359 new_power = HIGH_POWER;
3360 break;
3361
3362 case HIGH_POWER:
3363 if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
3364 new_power = BETWEEN;
3365 break;
3366 }
3367 /* Max/min bins are special */
3368 if (val == dev_priv->rps.min_delay)
3369 new_power = LOW_POWER;
3370 if (val == dev_priv->rps.max_delay)
3371 new_power = HIGH_POWER;
3372 if (new_power == dev_priv->rps.power)
3373 return;
3374
3375 /* Note the units here are not exactly 1us, but 1280ns. */
3376 switch (new_power) {
3377 case LOW_POWER:
3378 /* Upclock if more than 95% busy over 16ms */
3379 I915_WRITE(GEN6_RP_UP_EI, 12500);
3380 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3381
3382 /* Downclock if less than 85% busy over 32ms */
3383 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3384 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3385
3386 I915_WRITE(GEN6_RP_CONTROL,
3387 GEN6_RP_MEDIA_TURBO |
3388 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3389 GEN6_RP_MEDIA_IS_GFX |
3390 GEN6_RP_ENABLE |
3391 GEN6_RP_UP_BUSY_AVG |
3392 GEN6_RP_DOWN_IDLE_AVG);
3393 break;
3394
3395 case BETWEEN:
3396 /* Upclock if more than 90% busy over 13ms */
3397 I915_WRITE(GEN6_RP_UP_EI, 10250);
3398 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3399
3400 /* Downclock if less than 75% busy over 32ms */
3401 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3402 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3403
3404 I915_WRITE(GEN6_RP_CONTROL,
3405 GEN6_RP_MEDIA_TURBO |
3406 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3407 GEN6_RP_MEDIA_IS_GFX |
3408 GEN6_RP_ENABLE |
3409 GEN6_RP_UP_BUSY_AVG |
3410 GEN6_RP_DOWN_IDLE_AVG);
3411 break;
3412
3413 case HIGH_POWER:
3414 /* Upclock if more than 85% busy over 10ms */
3415 I915_WRITE(GEN6_RP_UP_EI, 8000);
3416 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3417
3418 /* Downclock if less than 60% busy over 32ms */
3419 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3420 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3421
3422 I915_WRITE(GEN6_RP_CONTROL,
3423 GEN6_RP_MEDIA_TURBO |
3424 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3425 GEN6_RP_MEDIA_IS_GFX |
3426 GEN6_RP_ENABLE |
3427 GEN6_RP_UP_BUSY_AVG |
3428 GEN6_RP_DOWN_IDLE_AVG);
3429 break;
3430 }
3431
3432 dev_priv->rps.power = new_power;
3433 dev_priv->rps.last_adj = 0;
3434}
3435
20b46e59
DV
3436void gen6_set_rps(struct drm_device *dev, u8 val)
3437{
3438 struct drm_i915_private *dev_priv = dev->dev_private;
65bccb5c 3439 u32 limits = gen6_rps_limits(dev_priv, &val);
7b9e0ae6 3440
4fc688ce 3441 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79249636
BW
3442 WARN_ON(val > dev_priv->rps.max_delay);
3443 WARN_ON(val < dev_priv->rps.min_delay);
004777cb 3444
c6a828d3 3445 if (val == dev_priv->rps.cur_delay)
7b9e0ae6
CW
3446 return;
3447
dd75fdc8
CW
3448 gen6_set_rps_thresholds(dev_priv, val);
3449
92bd1bf0
RV
3450 if (IS_HASWELL(dev))
3451 I915_WRITE(GEN6_RPNSWREQ,
3452 HSW_FREQUENCY(val));
3453 else
3454 I915_WRITE(GEN6_RPNSWREQ,
3455 GEN6_FREQUENCY(val) |
3456 GEN6_OFFSET(0) |
3457 GEN6_AGGRESSIVE_TURBO);
7b9e0ae6
CW
3458
3459 /* Make sure we continue to get interrupts
3460 * until we hit the minimum or maximum frequencies.
3461 */
3462 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3463
d5570a72
BW
3464 POSTING_READ(GEN6_RPNSWREQ);
3465
c6a828d3 3466 dev_priv->rps.cur_delay = val;
be2cde9a
DV
3467
3468 trace_intel_gpu_freq_change(val * 50);
2b4e57bd
ED
3469}
3470
b29c19b6
CW
3471void gen6_rps_idle(struct drm_i915_private *dev_priv)
3472{
3473 mutex_lock(&dev_priv->rps.hw_lock);
c0951f0c
CW
3474 if (dev_priv->rps.enabled) {
3475 if (dev_priv->info->is_valleyview)
3476 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3477 else
3478 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3479 dev_priv->rps.last_adj = 0;
3480 }
b29c19b6
CW
3481 mutex_unlock(&dev_priv->rps.hw_lock);
3482}
3483
3484void gen6_rps_boost(struct drm_i915_private *dev_priv)
3485{
3486 mutex_lock(&dev_priv->rps.hw_lock);
c0951f0c
CW
3487 if (dev_priv->rps.enabled) {
3488 if (dev_priv->info->is_valleyview)
3489 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3490 else
3491 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3492 dev_priv->rps.last_adj = 0;
3493 }
b29c19b6
CW
3494 mutex_unlock(&dev_priv->rps.hw_lock);
3495}
3496
80814ae4
VS
3497/*
3498 * Wait until the previous freq change has completed,
3499 * or the timeout elapsed, and then update our notion
3500 * of the current GPU frequency.
3501 */
3502static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3503{
80814ae4
VS
3504 u32 pval;
3505
3506 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3507
e8474409
VS
3508 if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
3509 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
80814ae4
VS
3510
3511 pval >>= 8;
3512
3513 if (pval != dev_priv->rps.cur_delay)
3514 DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3515 vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3516 dev_priv->rps.cur_delay,
3517 vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
3518
3519 dev_priv->rps.cur_delay = pval;
3520}
3521
0a073b84
JB
3522void valleyview_set_rps(struct drm_device *dev, u8 val)
3523{
3524 struct drm_i915_private *dev_priv = dev->dev_private;
7a67092a
VS
3525
3526 gen6_rps_limits(dev_priv, &val);
0a073b84
JB
3527
3528 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3529 WARN_ON(val > dev_priv->rps.max_delay);
3530 WARN_ON(val < dev_priv->rps.min_delay);
3531
80814ae4
VS
3532 vlv_update_rps_cur_delay(dev_priv);
3533
73008b98 3534 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
0a073b84
JB
3535 vlv_gpu_freq(dev_priv->mem_freq,
3536 dev_priv->rps.cur_delay),
73008b98
VS
3537 dev_priv->rps.cur_delay,
3538 vlv_gpu_freq(dev_priv->mem_freq, val), val);
0a073b84
JB
3539
3540 if (val == dev_priv->rps.cur_delay)
3541 return;
3542
ae99258f 3543 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
0a073b84 3544
80814ae4 3545 dev_priv->rps.cur_delay = val;
0a073b84
JB
3546
3547 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3548}
3549
44fc7d5c 3550static void gen6_disable_rps_interrupts(struct drm_device *dev)
2b4e57bd
ED
3551{
3552 struct drm_i915_private *dev_priv = dev->dev_private;
3553
2b4e57bd 3554 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4848405c 3555 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
2b4e57bd
ED
3556 /* Complete PM interrupt masking here doesn't race with the rps work
3557 * item again unmasking PM interrupts because that is using a different
3558 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3559 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3560
59cdb63d 3561 spin_lock_irq(&dev_priv->irq_lock);
c6a828d3 3562 dev_priv->rps.pm_iir = 0;
59cdb63d 3563 spin_unlock_irq(&dev_priv->irq_lock);
2b4e57bd 3564
4848405c 3565 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
2b4e57bd
ED
3566}
3567
44fc7d5c 3568static void gen6_disable_rps(struct drm_device *dev)
d20d4f0c
JB
3569{
3570 struct drm_i915_private *dev_priv = dev->dev_private;
3571
3572 I915_WRITE(GEN6_RC_CONTROL, 0);
44fc7d5c 3573 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
d20d4f0c 3574
44fc7d5c
DV
3575 gen6_disable_rps_interrupts(dev);
3576}
3577
3578static void valleyview_disable_rps(struct drm_device *dev)
3579{
3580 struct drm_i915_private *dev_priv = dev->dev_private;
3581
3582 I915_WRITE(GEN6_RC_CONTROL, 0);
d20d4f0c 3583
44fc7d5c 3584 gen6_disable_rps_interrupts(dev);
c9cddffc
JB
3585
3586 if (dev_priv->vlv_pctx) {
3587 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3588 dev_priv->vlv_pctx = NULL;
3589 }
d20d4f0c
JB
3590}
3591
2b4e57bd
ED
3592int intel_enable_rc6(const struct drm_device *dev)
3593{
eb4926e4
DL
3594 /* No RC6 before Ironlake */
3595 if (INTEL_INFO(dev)->gen < 5)
3596 return 0;
3597
456470eb 3598 /* Respect the kernel parameter if it is set */
2b4e57bd
ED
3599 if (i915_enable_rc6 >= 0)
3600 return i915_enable_rc6;
3601
6567d748
CW
3602 /* Disable RC6 on Ironlake */
3603 if (INTEL_INFO(dev)->gen == 5)
3604 return 0;
2b4e57bd 3605
456470eb
DV
3606 if (IS_HASWELL(dev)) {
3607 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
4a637c2c 3608 return INTEL_RC6_ENABLE;
456470eb 3609 }
2b4e57bd 3610
456470eb 3611 /* snb/ivb have more than one rc6 state. */
2b4e57bd
ED
3612 if (INTEL_INFO(dev)->gen == 6) {
3613 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3614 return INTEL_RC6_ENABLE;
3615 }
456470eb 3616
2b4e57bd
ED
3617 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3618 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3619}
3620
44fc7d5c
DV
3621static void gen6_enable_rps_interrupts(struct drm_device *dev)
3622{
3623 struct drm_i915_private *dev_priv = dev->dev_private;
a9c1f90c 3624 u32 enabled_intrs;
44fc7d5c
DV
3625
3626 spin_lock_irq(&dev_priv->irq_lock);
a0b3335a 3627 WARN_ON(dev_priv->rps.pm_iir);
edbfdb45 3628 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
44fc7d5c
DV
3629 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3630 spin_unlock_irq(&dev_priv->irq_lock);
a9c1f90c 3631
fd547d25 3632 /* only unmask PM interrupts we need. Mask all others. */
a9c1f90c
MK
3633 enabled_intrs = GEN6_PM_RPS_EVENTS;
3634
3635 /* IVB and SNB hard hangs on looping batchbuffer
3636 * if GEN6_PM_UP_EI_EXPIRED is masked.
3637 */
3638 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3639 enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3640
3641 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
44fc7d5c
DV
3642}
3643
79f5b2c7 3644static void gen6_enable_rps(struct drm_device *dev)
2b4e57bd 3645{
79f5b2c7 3646 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 3647 struct intel_ring_buffer *ring;
7b9e0ae6
CW
3648 u32 rp_state_cap;
3649 u32 gt_perf_status;
31643d54 3650 u32 rc6vids, pcu_mbox, rc6_mask = 0;
2b4e57bd 3651 u32 gtfifodbg;
2b4e57bd 3652 int rc6_mode;
42c0526c 3653 int i, ret;
2b4e57bd 3654
4fc688ce 3655 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79f5b2c7 3656
2b4e57bd
ED
3657 /* Here begins a magic sequence of register writes to enable
3658 * auto-downclocking.
3659 *
3660 * Perhaps there might be some value in exposing these to
3661 * userspace...
3662 */
3663 I915_WRITE(GEN6_RC_STATE, 0);
2b4e57bd
ED
3664
3665 /* Clear the DBG now so we don't confuse earlier errors */
3666 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3667 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3668 I915_WRITE(GTFIFODBG, gtfifodbg);
3669 }
3670
3671 gen6_gt_force_wake_get(dev_priv);
3672
7b9e0ae6
CW
3673 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3674 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3675
31c77388
BW
3676 /* In units of 50MHz */
3677 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
dd75fdc8
CW
3678 dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
3679 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
3680 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
3681 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
c6a828d3 3682 dev_priv->rps.cur_delay = 0;
7b9e0ae6 3683
2b4e57bd
ED
3684 /* disable the counters and set deterministic thresholds */
3685 I915_WRITE(GEN6_RC_CONTROL, 0);
3686
3687 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3688 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3689 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3690 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3691 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3692
b4519513
CW
3693 for_each_ring(ring, dev_priv, i)
3694 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2b4e57bd
ED
3695
3696 I915_WRITE(GEN6_RC_SLEEP, 0);
3697 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
351aa566
SM
3698 if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
3699 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3700 else
3701 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
0920a487 3702 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
2b4e57bd
ED
3703 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3704
5a7dc92a 3705 /* Check if we are enabling RC6 */
2b4e57bd
ED
3706 rc6_mode = intel_enable_rc6(dev_priv->dev);
3707 if (rc6_mode & INTEL_RC6_ENABLE)
3708 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3709
5a7dc92a
ED
3710 /* We don't use those on Haswell */
3711 if (!IS_HASWELL(dev)) {
3712 if (rc6_mode & INTEL_RC6p_ENABLE)
3713 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2b4e57bd 3714
5a7dc92a
ED
3715 if (rc6_mode & INTEL_RC6pp_ENABLE)
3716 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3717 }
2b4e57bd
ED
3718
3719 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
5a7dc92a
ED
3720 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3721 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3722 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
2b4e57bd
ED
3723
3724 I915_WRITE(GEN6_RC_CONTROL,
3725 rc6_mask |
3726 GEN6_RC_CTL_EI_MODE(1) |
3727 GEN6_RC_CTL_HW_ENABLE);
3728
dd75fdc8
CW
3729 /* Power down if completely idle for over 50ms */
3730 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
2b4e57bd 3731 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2b4e57bd 3732
42c0526c 3733 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
988b36e5 3734 if (!ret) {
42c0526c
BW
3735 pcu_mbox = 0;
3736 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
a2b3fc01 3737 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
10e08497 3738 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
a2b3fc01
BW
3739 (dev_priv->rps.max_delay & 0xff) * 50,
3740 (pcu_mbox & 0xff) * 50);
31c77388 3741 dev_priv->rps.hw_max = pcu_mbox & 0xff;
42c0526c
BW
3742 }
3743 } else {
3744 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
2b4e57bd
ED
3745 }
3746
dd75fdc8
CW
3747 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3748 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
2b4e57bd 3749
44fc7d5c 3750 gen6_enable_rps_interrupts(dev);
2b4e57bd 3751
31643d54
BW
3752 rc6vids = 0;
3753 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3754 if (IS_GEN6(dev) && ret) {
3755 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3756 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3757 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3758 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3759 rc6vids &= 0xffff00;
3760 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3761 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3762 if (ret)
3763 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3764 }
3765
2b4e57bd 3766 gen6_gt_force_wake_put(dev_priv);
2b4e57bd
ED
3767}
3768
c67a470b 3769void gen6_update_ring_freq(struct drm_device *dev)
2b4e57bd 3770{
79f5b2c7 3771 struct drm_i915_private *dev_priv = dev->dev_private;
2b4e57bd 3772 int min_freq = 15;
3ebecd07
CW
3773 unsigned int gpu_freq;
3774 unsigned int max_ia_freq, min_ring_freq;
2b4e57bd 3775 int scaling_factor = 180;
eda79642 3776 struct cpufreq_policy *policy;
2b4e57bd 3777
4fc688ce 3778 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
79f5b2c7 3779
eda79642
BW
3780 policy = cpufreq_cpu_get(0);
3781 if (policy) {
3782 max_ia_freq = policy->cpuinfo.max_freq;
3783 cpufreq_cpu_put(policy);
3784 } else {
3785 /*
3786 * Default to measured freq if none found, PCU will ensure we
3787 * don't go over
3788 */
2b4e57bd 3789 max_ia_freq = tsc_khz;
eda79642 3790 }
2b4e57bd
ED
3791
3792 /* Convert from kHz to MHz */
3793 max_ia_freq /= 1000;
3794
f6aca45c
BW
3795 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf;
3796 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3797 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3ebecd07 3798
2b4e57bd
ED
3799 /*
3800 * For each potential GPU frequency, load a ring frequency we'd like
3801 * to use for memory access. We do this by specifying the IA frequency
3802 * the PCU should use as a reference to determine the ring frequency.
3803 */
c6a828d3 3804 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2b4e57bd 3805 gpu_freq--) {
c6a828d3 3806 int diff = dev_priv->rps.max_delay - gpu_freq;
3ebecd07
CW
3807 unsigned int ia_freq = 0, ring_freq = 0;
3808
3809 if (IS_HASWELL(dev)) {
f6aca45c 3810 ring_freq = mult_frac(gpu_freq, 5, 4);
3ebecd07
CW
3811 ring_freq = max(min_ring_freq, ring_freq);
3812 /* leave ia_freq as the default, chosen by cpufreq */
3813 } else {
3814 /* On older processors, there is no separate ring
3815 * clock domain, so in order to boost the bandwidth
3816 * of the ring, we need to upclock the CPU (ia_freq).
3817 *
3818 * For GPU frequencies less than 750MHz,
3819 * just use the lowest ring freq.
3820 */
3821 if (gpu_freq < min_freq)
3822 ia_freq = 800;
3823 else
3824 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3825 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3826 }
2b4e57bd 3827
42c0526c
BW
3828 sandybridge_pcode_write(dev_priv,
3829 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3ebecd07
CW
3830 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3831 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3832 gpu_freq);
2b4e57bd 3833 }
2b4e57bd
ED
3834}
3835
0a073b84
JB
3836int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3837{
3838 u32 val, rp0;
3839
64936258 3840 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
0a073b84
JB
3841
3842 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3843 /* Clamp to max */
3844 rp0 = min_t(u32, rp0, 0xea);
3845
3846 return rp0;
3847}
3848
3849static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3850{
3851 u32 val, rpe;
3852
64936258 3853 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
0a073b84 3854 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
64936258 3855 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
0a073b84
JB
3856 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3857
3858 return rpe;
3859}
3860
3861int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3862{
64936258 3863 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
0a073b84
JB
3864}
3865
c9cddffc
JB
3866static void valleyview_setup_pctx(struct drm_device *dev)
3867{
3868 struct drm_i915_private *dev_priv = dev->dev_private;
3869 struct drm_i915_gem_object *pctx;
3870 unsigned long pctx_paddr;
3871 u32 pcbr;
3872 int pctx_size = 24*1024;
3873
3874 pcbr = I915_READ(VLV_PCBR);
3875 if (pcbr) {
3876 /* BIOS set it up already, grab the pre-alloc'd space */
3877 int pcbr_offset;
3878
3879 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3880 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3881 pcbr_offset,
190d6cd5 3882 I915_GTT_OFFSET_NONE,
c9cddffc
JB
3883 pctx_size);
3884 goto out;
3885 }
3886
3887 /*
3888 * From the Gunit register HAS:
3889 * The Gfx driver is expected to program this register and ensure
3890 * proper allocation within Gfx stolen memory. For example, this
3891 * register should be programmed such than the PCBR range does not
3892 * overlap with other ranges, such as the frame buffer, protected
3893 * memory, or any other relevant ranges.
3894 */
3895 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3896 if (!pctx) {
3897 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3898 return;
3899 }
3900
3901 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3902 I915_WRITE(VLV_PCBR, pctx_paddr);
3903
3904out:
3905 dev_priv->vlv_pctx = pctx;
3906}
3907
0a073b84
JB
3908static void valleyview_enable_rps(struct drm_device *dev)
3909{
3910 struct drm_i915_private *dev_priv = dev->dev_private;
3911 struct intel_ring_buffer *ring;
a2b23fe0 3912 u32 gtfifodbg, val, rc6_mode = 0;
0a073b84
JB
3913 int i;
3914
3915 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3916
3917 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
f7d85c1e
JB
3918 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3919 gtfifodbg);
0a073b84
JB
3920 I915_WRITE(GTFIFODBG, gtfifodbg);
3921 }
3922
c9cddffc
JB
3923 valleyview_setup_pctx(dev);
3924
0a073b84
JB
3925 gen6_gt_force_wake_get(dev_priv);
3926
3927 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3928 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3929 I915_WRITE(GEN6_RP_UP_EI, 66000);
3930 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3931
3932 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3933
3934 I915_WRITE(GEN6_RP_CONTROL,
3935 GEN6_RP_MEDIA_TURBO |
3936 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3937 GEN6_RP_MEDIA_IS_GFX |
3938 GEN6_RP_ENABLE |
3939 GEN6_RP_UP_BUSY_AVG |
3940 GEN6_RP_DOWN_IDLE_CONT);
3941
3942 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3943 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3944 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3945
3946 for_each_ring(ring, dev_priv, i)
3947 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3948
3949 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3950
3951 /* allows RC6 residency counter to work */
49798eb2
JB
3952 I915_WRITE(VLV_COUNTER_CONTROL,
3953 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3954 VLV_MEDIA_RC6_COUNT_EN |
3955 VLV_RENDER_RC6_COUNT_EN));
a2b23fe0
JB
3956 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3957 rc6_mode = GEN7_RC_CTL_TO_MODE;
3958 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
0a073b84 3959
64936258 3960 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
2445966e
JB
3961 switch ((val >> 6) & 3) {
3962 case 0:
3963 case 1:
3964 dev_priv->mem_freq = 800;
3965 break;
3966 case 2:
3967 dev_priv->mem_freq = 1066;
3968 break;
3969 case 3:
3970 dev_priv->mem_freq = 1333;
3971 break;
3972 }
0a073b84
JB
3973 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
3974
3975 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3976 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3977
0a073b84 3978 dev_priv->rps.cur_delay = (val >> 8) & 0xff;
73008b98
VS
3979 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3980 vlv_gpu_freq(dev_priv->mem_freq,
3981 dev_priv->rps.cur_delay),
3982 dev_priv->rps.cur_delay);
0a073b84
JB
3983
3984 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3985 dev_priv->rps.hw_max = dev_priv->rps.max_delay;
73008b98
VS
3986 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3987 vlv_gpu_freq(dev_priv->mem_freq,
3988 dev_priv->rps.max_delay),
3989 dev_priv->rps.max_delay);
0a073b84 3990
73008b98
VS
3991 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3992 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3993 vlv_gpu_freq(dev_priv->mem_freq,
3994 dev_priv->rps.rpe_delay),
3995 dev_priv->rps.rpe_delay);
0a073b84 3996
73008b98
VS
3997 dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3998 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3999 vlv_gpu_freq(dev_priv->mem_freq,
4000 dev_priv->rps.min_delay),
4001 dev_priv->rps.min_delay);
0a073b84 4002
73008b98
VS
4003 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4004 vlv_gpu_freq(dev_priv->mem_freq,
4005 dev_priv->rps.rpe_delay),
4006 dev_priv->rps.rpe_delay);
0a073b84 4007
73008b98 4008 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
0a073b84 4009
44fc7d5c 4010 gen6_enable_rps_interrupts(dev);
0a073b84
JB
4011
4012 gen6_gt_force_wake_put(dev_priv);
4013}
4014
930ebb46 4015void ironlake_teardown_rc6(struct drm_device *dev)
2b4e57bd
ED
4016{
4017 struct drm_i915_private *dev_priv = dev->dev_private;
4018
3e373948
DV
4019 if (dev_priv->ips.renderctx) {
4020 i915_gem_object_unpin(dev_priv->ips.renderctx);
4021 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4022 dev_priv->ips.renderctx = NULL;
2b4e57bd
ED
4023 }
4024
3e373948
DV
4025 if (dev_priv->ips.pwrctx) {
4026 i915_gem_object_unpin(dev_priv->ips.pwrctx);
4027 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4028 dev_priv->ips.pwrctx = NULL;
2b4e57bd
ED
4029 }
4030}
4031
930ebb46 4032static void ironlake_disable_rc6(struct drm_device *dev)
2b4e57bd
ED
4033{
4034 struct drm_i915_private *dev_priv = dev->dev_private;
4035
4036 if (I915_READ(PWRCTXA)) {
4037 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4038 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4039 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4040 50);
4041
4042 I915_WRITE(PWRCTXA, 0);
4043 POSTING_READ(PWRCTXA);
4044
4045 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4046 POSTING_READ(RSTDBYCTL);
4047 }
2b4e57bd
ED
4048}
4049
4050static int ironlake_setup_rc6(struct drm_device *dev)
4051{
4052 struct drm_i915_private *dev_priv = dev->dev_private;
4053
3e373948
DV
4054 if (dev_priv->ips.renderctx == NULL)
4055 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4056 if (!dev_priv->ips.renderctx)
2b4e57bd
ED
4057 return -ENOMEM;
4058
3e373948
DV
4059 if (dev_priv->ips.pwrctx == NULL)
4060 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4061 if (!dev_priv->ips.pwrctx) {
2b4e57bd
ED
4062 ironlake_teardown_rc6(dev);
4063 return -ENOMEM;
4064 }
4065
4066 return 0;
4067}
4068
930ebb46 4069static void ironlake_enable_rc6(struct drm_device *dev)
2b4e57bd
ED
4070{
4071 struct drm_i915_private *dev_priv = dev->dev_private;
6d90c952 4072 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
3e960501 4073 bool was_interruptible;
2b4e57bd
ED
4074 int ret;
4075
4076 /* rc6 disabled by default due to repeated reports of hanging during
4077 * boot and resume.
4078 */
4079 if (!intel_enable_rc6(dev))
4080 return;
4081
79f5b2c7
DV
4082 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4083
2b4e57bd 4084 ret = ironlake_setup_rc6(dev);
79f5b2c7 4085 if (ret)
2b4e57bd 4086 return;
2b4e57bd 4087
3e960501
CW
4088 was_interruptible = dev_priv->mm.interruptible;
4089 dev_priv->mm.interruptible = false;
4090
2b4e57bd
ED
4091 /*
4092 * GPU can automatically power down the render unit if given a page
4093 * to save state.
4094 */
6d90c952 4095 ret = intel_ring_begin(ring, 6);
2b4e57bd
ED
4096 if (ret) {
4097 ironlake_teardown_rc6(dev);
3e960501 4098 dev_priv->mm.interruptible = was_interruptible;
2b4e57bd
ED
4099 return;
4100 }
4101
6d90c952
DV
4102 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
4103 intel_ring_emit(ring, MI_SET_CONTEXT);
f343c5f6 4104 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
6d90c952
DV
4105 MI_MM_SPACE_GTT |
4106 MI_SAVE_EXT_STATE_EN |
4107 MI_RESTORE_EXT_STATE_EN |
4108 MI_RESTORE_INHIBIT);
4109 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
4110 intel_ring_emit(ring, MI_NOOP);
4111 intel_ring_emit(ring, MI_FLUSH);
4112 intel_ring_advance(ring);
2b4e57bd
ED
4113
4114 /*
4115 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4116 * does an implicit flush, combined with MI_FLUSH above, it should be
4117 * safe to assume that renderctx is valid
4118 */
3e960501
CW
4119 ret = intel_ring_idle(ring);
4120 dev_priv->mm.interruptible = was_interruptible;
2b4e57bd 4121 if (ret) {
def27a58 4122 DRM_ERROR("failed to enable ironlake power savings\n");
2b4e57bd 4123 ironlake_teardown_rc6(dev);
2b4e57bd
ED
4124 return;
4125 }
4126
f343c5f6 4127 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
2b4e57bd 4128 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2b4e57bd
ED
4129}
4130
dde18883
ED
4131static unsigned long intel_pxfreq(u32 vidfreq)
4132{
4133 unsigned long freq;
4134 int div = (vidfreq & 0x3f0000) >> 16;
4135 int post = (vidfreq & 0x3000) >> 12;
4136 int pre = (vidfreq & 0x7);
4137
4138 if (!pre)
4139 return 0;
4140
4141 freq = ((div * 133333) / ((1<<post) * pre));
4142
4143 return freq;
4144}
4145
eb48eb00
DV
4146static const struct cparams {
4147 u16 i;
4148 u16 t;
4149 u16 m;
4150 u16 c;
4151} cparams[] = {
4152 { 1, 1333, 301, 28664 },
4153 { 1, 1066, 294, 24460 },
4154 { 1, 800, 294, 25192 },
4155 { 0, 1333, 276, 27605 },
4156 { 0, 1066, 276, 27605 },
4157 { 0, 800, 231, 23784 },
4158};
4159
f531dcb2 4160static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
4161{
4162 u64 total_count, diff, ret;
4163 u32 count1, count2, count3, m = 0, c = 0;
4164 unsigned long now = jiffies_to_msecs(jiffies), diff1;
4165 int i;
4166
02d71956
DV
4167 assert_spin_locked(&mchdev_lock);
4168
20e4d407 4169 diff1 = now - dev_priv->ips.last_time1;
eb48eb00
DV
4170
4171 /* Prevent division-by-zero if we are asking too fast.
4172 * Also, we don't get interesting results if we are polling
4173 * faster than once in 10ms, so just return the saved value
4174 * in such cases.
4175 */
4176 if (diff1 <= 10)
20e4d407 4177 return dev_priv->ips.chipset_power;
eb48eb00
DV
4178
4179 count1 = I915_READ(DMIEC);
4180 count2 = I915_READ(DDREC);
4181 count3 = I915_READ(CSIEC);
4182
4183 total_count = count1 + count2 + count3;
4184
4185 /* FIXME: handle per-counter overflow */
20e4d407
DV
4186 if (total_count < dev_priv->ips.last_count1) {
4187 diff = ~0UL - dev_priv->ips.last_count1;
eb48eb00
DV
4188 diff += total_count;
4189 } else {
20e4d407 4190 diff = total_count - dev_priv->ips.last_count1;
eb48eb00
DV
4191 }
4192
4193 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
20e4d407
DV
4194 if (cparams[i].i == dev_priv->ips.c_m &&
4195 cparams[i].t == dev_priv->ips.r_t) {
eb48eb00
DV
4196 m = cparams[i].m;
4197 c = cparams[i].c;
4198 break;
4199 }
4200 }
4201
4202 diff = div_u64(diff, diff1);
4203 ret = ((m * diff) + c);
4204 ret = div_u64(ret, 10);
4205
20e4d407
DV
4206 dev_priv->ips.last_count1 = total_count;
4207 dev_priv->ips.last_time1 = now;
eb48eb00 4208
20e4d407 4209 dev_priv->ips.chipset_power = ret;
eb48eb00
DV
4210
4211 return ret;
4212}
4213
f531dcb2
CW
4214unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4215{
4216 unsigned long val;
4217
4218 if (dev_priv->info->gen != 5)
4219 return 0;
4220
4221 spin_lock_irq(&mchdev_lock);
4222
4223 val = __i915_chipset_val(dev_priv);
4224
4225 spin_unlock_irq(&mchdev_lock);
4226
4227 return val;
4228}
4229
eb48eb00
DV
4230unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4231{
4232 unsigned long m, x, b;
4233 u32 tsfs;
4234
4235 tsfs = I915_READ(TSFS);
4236
4237 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4238 x = I915_READ8(TR1);
4239
4240 b = tsfs & TSFS_INTR_MASK;
4241
4242 return ((m * x) / 127) - b;
4243}
4244
4245static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4246{
4247 static const struct v_table {
4248 u16 vd; /* in .1 mil */
4249 u16 vm; /* in .1 mil */
4250 } v_table[] = {
4251 { 0, 0, },
4252 { 375, 0, },
4253 { 500, 0, },
4254 { 625, 0, },
4255 { 750, 0, },
4256 { 875, 0, },
4257 { 1000, 0, },
4258 { 1125, 0, },
4259 { 4125, 3000, },
4260 { 4125, 3000, },
4261 { 4125, 3000, },
4262 { 4125, 3000, },
4263 { 4125, 3000, },
4264 { 4125, 3000, },
4265 { 4125, 3000, },
4266 { 4125, 3000, },
4267 { 4125, 3000, },
4268 { 4125, 3000, },
4269 { 4125, 3000, },
4270 { 4125, 3000, },
4271 { 4125, 3000, },
4272 { 4125, 3000, },
4273 { 4125, 3000, },
4274 { 4125, 3000, },
4275 { 4125, 3000, },
4276 { 4125, 3000, },
4277 { 4125, 3000, },
4278 { 4125, 3000, },
4279 { 4125, 3000, },
4280 { 4125, 3000, },
4281 { 4125, 3000, },
4282 { 4125, 3000, },
4283 { 4250, 3125, },
4284 { 4375, 3250, },
4285 { 4500, 3375, },
4286 { 4625, 3500, },
4287 { 4750, 3625, },
4288 { 4875, 3750, },
4289 { 5000, 3875, },
4290 { 5125, 4000, },
4291 { 5250, 4125, },
4292 { 5375, 4250, },
4293 { 5500, 4375, },
4294 { 5625, 4500, },
4295 { 5750, 4625, },
4296 { 5875, 4750, },
4297 { 6000, 4875, },
4298 { 6125, 5000, },
4299 { 6250, 5125, },
4300 { 6375, 5250, },
4301 { 6500, 5375, },
4302 { 6625, 5500, },
4303 { 6750, 5625, },
4304 { 6875, 5750, },
4305 { 7000, 5875, },
4306 { 7125, 6000, },
4307 { 7250, 6125, },
4308 { 7375, 6250, },
4309 { 7500, 6375, },
4310 { 7625, 6500, },
4311 { 7750, 6625, },
4312 { 7875, 6750, },
4313 { 8000, 6875, },
4314 { 8125, 7000, },
4315 { 8250, 7125, },
4316 { 8375, 7250, },
4317 { 8500, 7375, },
4318 { 8625, 7500, },
4319 { 8750, 7625, },
4320 { 8875, 7750, },
4321 { 9000, 7875, },
4322 { 9125, 8000, },
4323 { 9250, 8125, },
4324 { 9375, 8250, },
4325 { 9500, 8375, },
4326 { 9625, 8500, },
4327 { 9750, 8625, },
4328 { 9875, 8750, },
4329 { 10000, 8875, },
4330 { 10125, 9000, },
4331 { 10250, 9125, },
4332 { 10375, 9250, },
4333 { 10500, 9375, },
4334 { 10625, 9500, },
4335 { 10750, 9625, },
4336 { 10875, 9750, },
4337 { 11000, 9875, },
4338 { 11125, 10000, },
4339 { 11250, 10125, },
4340 { 11375, 10250, },
4341 { 11500, 10375, },
4342 { 11625, 10500, },
4343 { 11750, 10625, },
4344 { 11875, 10750, },
4345 { 12000, 10875, },
4346 { 12125, 11000, },
4347 { 12250, 11125, },
4348 { 12375, 11250, },
4349 { 12500, 11375, },
4350 { 12625, 11500, },
4351 { 12750, 11625, },
4352 { 12875, 11750, },
4353 { 13000, 11875, },
4354 { 13125, 12000, },
4355 { 13250, 12125, },
4356 { 13375, 12250, },
4357 { 13500, 12375, },
4358 { 13625, 12500, },
4359 { 13750, 12625, },
4360 { 13875, 12750, },
4361 { 14000, 12875, },
4362 { 14125, 13000, },
4363 { 14250, 13125, },
4364 { 14375, 13250, },
4365 { 14500, 13375, },
4366 { 14625, 13500, },
4367 { 14750, 13625, },
4368 { 14875, 13750, },
4369 { 15000, 13875, },
4370 { 15125, 14000, },
4371 { 15250, 14125, },
4372 { 15375, 14250, },
4373 { 15500, 14375, },
4374 { 15625, 14500, },
4375 { 15750, 14625, },
4376 { 15875, 14750, },
4377 { 16000, 14875, },
4378 { 16125, 15000, },
4379 };
4380 if (dev_priv->info->is_mobile)
4381 return v_table[pxvid].vm;
4382 else
4383 return v_table[pxvid].vd;
4384}
4385
02d71956 4386static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
4387{
4388 struct timespec now, diff1;
4389 u64 diff;
4390 unsigned long diffms;
4391 u32 count;
4392
02d71956 4393 assert_spin_locked(&mchdev_lock);
eb48eb00
DV
4394
4395 getrawmonotonic(&now);
20e4d407 4396 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
eb48eb00
DV
4397
4398 /* Don't divide by 0 */
4399 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4400 if (!diffms)
4401 return;
4402
4403 count = I915_READ(GFXEC);
4404
20e4d407
DV
4405 if (count < dev_priv->ips.last_count2) {
4406 diff = ~0UL - dev_priv->ips.last_count2;
eb48eb00
DV
4407 diff += count;
4408 } else {
20e4d407 4409 diff = count - dev_priv->ips.last_count2;
eb48eb00
DV
4410 }
4411
20e4d407
DV
4412 dev_priv->ips.last_count2 = count;
4413 dev_priv->ips.last_time2 = now;
eb48eb00
DV
4414
4415 /* More magic constants... */
4416 diff = diff * 1181;
4417 diff = div_u64(diff, diffms * 10);
20e4d407 4418 dev_priv->ips.gfx_power = diff;
eb48eb00
DV
4419}
4420
02d71956
DV
4421void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4422{
4423 if (dev_priv->info->gen != 5)
4424 return;
4425
9270388e 4426 spin_lock_irq(&mchdev_lock);
02d71956
DV
4427
4428 __i915_update_gfx_val(dev_priv);
4429
9270388e 4430 spin_unlock_irq(&mchdev_lock);
02d71956
DV
4431}
4432
f531dcb2 4433static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
eb48eb00
DV
4434{
4435 unsigned long t, corr, state1, corr2, state2;
4436 u32 pxvid, ext_v;
4437
02d71956
DV
4438 assert_spin_locked(&mchdev_lock);
4439
c6a828d3 4440 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
eb48eb00
DV
4441 pxvid = (pxvid >> 24) & 0x7f;
4442 ext_v = pvid_to_extvid(dev_priv, pxvid);
4443
4444 state1 = ext_v;
4445
4446 t = i915_mch_val(dev_priv);
4447
4448 /* Revel in the empirically derived constants */
4449
4450 /* Correction factor in 1/100000 units */
4451 if (t > 80)
4452 corr = ((t * 2349) + 135940);
4453 else if (t >= 50)
4454 corr = ((t * 964) + 29317);
4455 else /* < 50 */
4456 corr = ((t * 301) + 1004);
4457
4458 corr = corr * ((150142 * state1) / 10000 - 78642);
4459 corr /= 100000;
20e4d407 4460 corr2 = (corr * dev_priv->ips.corr);
eb48eb00
DV
4461
4462 state2 = (corr2 * state1) / 10000;
4463 state2 /= 100; /* convert to mW */
4464
02d71956 4465 __i915_update_gfx_val(dev_priv);
eb48eb00 4466
20e4d407 4467 return dev_priv->ips.gfx_power + state2;
eb48eb00
DV
4468}
4469
f531dcb2
CW
4470unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4471{
4472 unsigned long val;
4473
4474 if (dev_priv->info->gen != 5)
4475 return 0;
4476
4477 spin_lock_irq(&mchdev_lock);
4478
4479 val = __i915_gfx_val(dev_priv);
4480
4481 spin_unlock_irq(&mchdev_lock);
4482
4483 return val;
4484}
4485
eb48eb00
DV
4486/**
4487 * i915_read_mch_val - return value for IPS use
4488 *
4489 * Calculate and return a value for the IPS driver to use when deciding whether
4490 * we have thermal and power headroom to increase CPU or GPU power budget.
4491 */
4492unsigned long i915_read_mch_val(void)
4493{
4494 struct drm_i915_private *dev_priv;
4495 unsigned long chipset_val, graphics_val, ret = 0;
4496
9270388e 4497 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
4498 if (!i915_mch_dev)
4499 goto out_unlock;
4500 dev_priv = i915_mch_dev;
4501
f531dcb2
CW
4502 chipset_val = __i915_chipset_val(dev_priv);
4503 graphics_val = __i915_gfx_val(dev_priv);
eb48eb00
DV
4504
4505 ret = chipset_val + graphics_val;
4506
4507out_unlock:
9270388e 4508 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
4509
4510 return ret;
4511}
4512EXPORT_SYMBOL_GPL(i915_read_mch_val);
4513
4514/**
4515 * i915_gpu_raise - raise GPU frequency limit
4516 *
4517 * Raise the limit; IPS indicates we have thermal headroom.
4518 */
4519bool i915_gpu_raise(void)
4520{
4521 struct drm_i915_private *dev_priv;
4522 bool ret = true;
4523
9270388e 4524 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
4525 if (!i915_mch_dev) {
4526 ret = false;
4527 goto out_unlock;
4528 }
4529 dev_priv = i915_mch_dev;
4530
20e4d407
DV
4531 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4532 dev_priv->ips.max_delay--;
eb48eb00
DV
4533
4534out_unlock:
9270388e 4535 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
4536
4537 return ret;
4538}
4539EXPORT_SYMBOL_GPL(i915_gpu_raise);
4540
4541/**
4542 * i915_gpu_lower - lower GPU frequency limit
4543 *
4544 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4545 * frequency maximum.
4546 */
4547bool i915_gpu_lower(void)
4548{
4549 struct drm_i915_private *dev_priv;
4550 bool ret = true;
4551
9270388e 4552 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
4553 if (!i915_mch_dev) {
4554 ret = false;
4555 goto out_unlock;
4556 }
4557 dev_priv = i915_mch_dev;
4558
20e4d407
DV
4559 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4560 dev_priv->ips.max_delay++;
eb48eb00
DV
4561
4562out_unlock:
9270388e 4563 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
4564
4565 return ret;
4566}
4567EXPORT_SYMBOL_GPL(i915_gpu_lower);
4568
4569/**
4570 * i915_gpu_busy - indicate GPU business to IPS
4571 *
4572 * Tell the IPS driver whether or not the GPU is busy.
4573 */
4574bool i915_gpu_busy(void)
4575{
4576 struct drm_i915_private *dev_priv;
f047e395 4577 struct intel_ring_buffer *ring;
eb48eb00 4578 bool ret = false;
f047e395 4579 int i;
eb48eb00 4580
9270388e 4581 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
4582 if (!i915_mch_dev)
4583 goto out_unlock;
4584 dev_priv = i915_mch_dev;
4585
f047e395
CW
4586 for_each_ring(ring, dev_priv, i)
4587 ret |= !list_empty(&ring->request_list);
eb48eb00
DV
4588
4589out_unlock:
9270388e 4590 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
4591
4592 return ret;
4593}
4594EXPORT_SYMBOL_GPL(i915_gpu_busy);
4595
4596/**
4597 * i915_gpu_turbo_disable - disable graphics turbo
4598 *
4599 * Disable graphics turbo by resetting the max frequency and setting the
4600 * current frequency to the default.
4601 */
4602bool i915_gpu_turbo_disable(void)
4603{
4604 struct drm_i915_private *dev_priv;
4605 bool ret = true;
4606
9270388e 4607 spin_lock_irq(&mchdev_lock);
eb48eb00
DV
4608 if (!i915_mch_dev) {
4609 ret = false;
4610 goto out_unlock;
4611 }
4612 dev_priv = i915_mch_dev;
4613
20e4d407 4614 dev_priv->ips.max_delay = dev_priv->ips.fstart;
eb48eb00 4615
20e4d407 4616 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
eb48eb00
DV
4617 ret = false;
4618
4619out_unlock:
9270388e 4620 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
4621
4622 return ret;
4623}
4624EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4625
4626/**
4627 * Tells the intel_ips driver that the i915 driver is now loaded, if
4628 * IPS got loaded first.
4629 *
4630 * This awkward dance is so that neither module has to depend on the
4631 * other in order for IPS to do the appropriate communication of
4632 * GPU turbo limits to i915.
4633 */
4634static void
4635ips_ping_for_i915_load(void)
4636{
4637 void (*link)(void);
4638
4639 link = symbol_get(ips_link_to_i915_driver);
4640 if (link) {
4641 link();
4642 symbol_put(ips_link_to_i915_driver);
4643 }
4644}
4645
4646void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4647{
02d71956
DV
4648 /* We only register the i915 ips part with intel-ips once everything is
4649 * set up, to avoid intel-ips sneaking in and reading bogus values. */
9270388e 4650 spin_lock_irq(&mchdev_lock);
eb48eb00 4651 i915_mch_dev = dev_priv;
9270388e 4652 spin_unlock_irq(&mchdev_lock);
eb48eb00
DV
4653
4654 ips_ping_for_i915_load();
4655}
4656
4657void intel_gpu_ips_teardown(void)
4658{
9270388e 4659 spin_lock_irq(&mchdev_lock);
eb48eb00 4660 i915_mch_dev = NULL;
9270388e 4661 spin_unlock_irq(&mchdev_lock);
eb48eb00 4662}
8090c6b9 4663static void intel_init_emon(struct drm_device *dev)
dde18883
ED
4664{
4665 struct drm_i915_private *dev_priv = dev->dev_private;
4666 u32 lcfuse;
4667 u8 pxw[16];
4668 int i;
4669
4670 /* Disable to program */
4671 I915_WRITE(ECR, 0);
4672 POSTING_READ(ECR);
4673
4674 /* Program energy weights for various events */
4675 I915_WRITE(SDEW, 0x15040d00);
4676 I915_WRITE(CSIEW0, 0x007f0000);
4677 I915_WRITE(CSIEW1, 0x1e220004);
4678 I915_WRITE(CSIEW2, 0x04000004);
4679
4680 for (i = 0; i < 5; i++)
4681 I915_WRITE(PEW + (i * 4), 0);
4682 for (i = 0; i < 3; i++)
4683 I915_WRITE(DEW + (i * 4), 0);
4684
4685 /* Program P-state weights to account for frequency power adjustment */
4686 for (i = 0; i < 16; i++) {
4687 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4688 unsigned long freq = intel_pxfreq(pxvidfreq);
4689 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4690 PXVFREQ_PX_SHIFT;
4691 unsigned long val;
4692
4693 val = vid * vid;
4694 val *= (freq / 1000);
4695 val *= 255;
4696 val /= (127*127*900);
4697 if (val > 0xff)
4698 DRM_ERROR("bad pxval: %ld\n", val);
4699 pxw[i] = val;
4700 }
4701 /* Render standby states get 0 weight */
4702 pxw[14] = 0;
4703 pxw[15] = 0;
4704
4705 for (i = 0; i < 4; i++) {
4706 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4707 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4708 I915_WRITE(PXW + (i * 4), val);
4709 }
4710
4711 /* Adjust magic regs to magic values (more experimental results) */
4712 I915_WRITE(OGW0, 0);
4713 I915_WRITE(OGW1, 0);
4714 I915_WRITE(EG0, 0x00007f00);
4715 I915_WRITE(EG1, 0x0000000e);
4716 I915_WRITE(EG2, 0x000e0000);
4717 I915_WRITE(EG3, 0x68000300);
4718 I915_WRITE(EG4, 0x42000000);
4719 I915_WRITE(EG5, 0x00140031);
4720 I915_WRITE(EG6, 0);
4721 I915_WRITE(EG7, 0);
4722
4723 for (i = 0; i < 8; i++)
4724 I915_WRITE(PXWL + (i * 4), 0);
4725
4726 /* Enable PMON + select events */
4727 I915_WRITE(ECR, 0x80000019);
4728
4729 lcfuse = I915_READ(LCFUSE02);
4730
20e4d407 4731 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
dde18883
ED
4732}
4733
8090c6b9
DV
4734void intel_disable_gt_powersave(struct drm_device *dev)
4735{
1a01ab3b
JB
4736 struct drm_i915_private *dev_priv = dev->dev_private;
4737
fd0c0642
DV
4738 /* Interrupts should be disabled already to avoid re-arming. */
4739 WARN_ON(dev->irq_enabled);
4740
930ebb46 4741 if (IS_IRONLAKE_M(dev)) {
8090c6b9 4742 ironlake_disable_drps(dev);
930ebb46 4743 ironlake_disable_rc6(dev);
0a073b84 4744 } else if (INTEL_INFO(dev)->gen >= 6) {
1a01ab3b 4745 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
250848ca 4746 cancel_work_sync(&dev_priv->rps.work);
4fc688ce 4747 mutex_lock(&dev_priv->rps.hw_lock);
d20d4f0c
JB
4748 if (IS_VALLEYVIEW(dev))
4749 valleyview_disable_rps(dev);
4750 else
4751 gen6_disable_rps(dev);
c0951f0c 4752 dev_priv->rps.enabled = false;
4fc688ce 4753 mutex_unlock(&dev_priv->rps.hw_lock);
930ebb46 4754 }
8090c6b9
DV
4755}
4756
1a01ab3b
JB
4757static void intel_gen6_powersave_work(struct work_struct *work)
4758{
4759 struct drm_i915_private *dev_priv =
4760 container_of(work, struct drm_i915_private,
4761 rps.delayed_resume_work.work);
4762 struct drm_device *dev = dev_priv->dev;
4763
4fc688ce 4764 mutex_lock(&dev_priv->rps.hw_lock);
0a073b84
JB
4765
4766 if (IS_VALLEYVIEW(dev)) {
4767 valleyview_enable_rps(dev);
4768 } else {
4769 gen6_enable_rps(dev);
4770 gen6_update_ring_freq(dev);
4771 }
c0951f0c 4772 dev_priv->rps.enabled = true;
4fc688ce 4773 mutex_unlock(&dev_priv->rps.hw_lock);
1a01ab3b
JB
4774}
4775
8090c6b9
DV
4776void intel_enable_gt_powersave(struct drm_device *dev)
4777{
1a01ab3b
JB
4778 struct drm_i915_private *dev_priv = dev->dev_private;
4779
8090c6b9
DV
4780 if (IS_IRONLAKE_M(dev)) {
4781 ironlake_enable_drps(dev);
4782 ironlake_enable_rc6(dev);
4783 intel_init_emon(dev);
0a073b84 4784 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1a01ab3b
JB
4785 /*
4786 * PCU communication is slow and this doesn't need to be
4787 * done at any specific time, so do this out of our fast path
4788 * to make resume and init faster.
4789 */
4790 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4791 round_jiffies_up_relative(HZ));
8090c6b9
DV
4792 }
4793}
4794
3107bd48
DV
4795static void ibx_init_clock_gating(struct drm_device *dev)
4796{
4797 struct drm_i915_private *dev_priv = dev->dev_private;
4798
4799 /*
4800 * On Ibex Peak and Cougar Point, we need to disable clock
4801 * gating for the panel power sequencer or it will fail to
4802 * start up when no ports are active.
4803 */
4804 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4805}
4806
0e088b8f
VS
4807static void g4x_disable_trickle_feed(struct drm_device *dev)
4808{
4809 struct drm_i915_private *dev_priv = dev->dev_private;
4810 int pipe;
4811
4812 for_each_pipe(pipe) {
4813 I915_WRITE(DSPCNTR(pipe),
4814 I915_READ(DSPCNTR(pipe)) |
4815 DISPPLANE_TRICKLE_FEED_DISABLE);
1dba99f4 4816 intel_flush_primary_plane(dev_priv, pipe);
0e088b8f
VS
4817 }
4818}
4819
1fa61106 4820static void ironlake_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
4821{
4822 struct drm_i915_private *dev_priv = dev->dev_private;
231e54f6 4823 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6f1d69b0 4824
f1e8fa56
DL
4825 /*
4826 * Required for FBC
4827 * WaFbcDisableDpfcClockGating:ilk
4828 */
4d47e4f5
DL
4829 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4830 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4831 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6f1d69b0
ED
4832
4833 I915_WRITE(PCH_3DCGDIS0,
4834 MARIUNIT_CLOCK_GATE_DISABLE |
4835 SVSMUNIT_CLOCK_GATE_DISABLE);
4836 I915_WRITE(PCH_3DCGDIS1,
4837 VFMUNIT_CLOCK_GATE_DISABLE);
4838
6f1d69b0
ED
4839 /*
4840 * According to the spec the following bits should be set in
4841 * order to enable memory self-refresh
4842 * The bit 22/21 of 0x42004
4843 * The bit 5 of 0x42020
4844 * The bit 15 of 0x45000
4845 */
4846 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4847 (I915_READ(ILK_DISPLAY_CHICKEN2) |
4848 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4d47e4f5 4849 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6f1d69b0
ED
4850 I915_WRITE(DISP_ARB_CTL,
4851 (I915_READ(DISP_ARB_CTL) |
4852 DISP_FBC_WM_DIS));
4853 I915_WRITE(WM3_LP_ILK, 0);
4854 I915_WRITE(WM2_LP_ILK, 0);
4855 I915_WRITE(WM1_LP_ILK, 0);
4856
4857 /*
4858 * Based on the document from hardware guys the following bits
4859 * should be set unconditionally in order to enable FBC.
4860 * The bit 22 of 0x42000
4861 * The bit 22 of 0x42004
4862 * The bit 7,8,9 of 0x42020.
4863 */
4864 if (IS_IRONLAKE_M(dev)) {
4bb35334 4865 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6f1d69b0
ED
4866 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4867 I915_READ(ILK_DISPLAY_CHICKEN1) |
4868 ILK_FBCQ_DIS);
4869 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4870 I915_READ(ILK_DISPLAY_CHICKEN2) |
4871 ILK_DPARB_GATE);
6f1d69b0
ED
4872 }
4873
4d47e4f5
DL
4874 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4875
6f1d69b0
ED
4876 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4877 I915_READ(ILK_DISPLAY_CHICKEN2) |
4878 ILK_ELPIN_409_SELECT);
4879 I915_WRITE(_3D_CHICKEN2,
4880 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
4881 _3D_CHICKEN2_WM_READ_PIPELINED);
4358a374 4882
ecdb4eb7 4883 /* WaDisableRenderCachePipelinedFlush:ilk */
4358a374
DV
4884 I915_WRITE(CACHE_MODE_0,
4885 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3107bd48 4886
0e088b8f 4887 g4x_disable_trickle_feed(dev);
bdad2b2f 4888
3107bd48
DV
4889 ibx_init_clock_gating(dev);
4890}
4891
4892static void cpt_init_clock_gating(struct drm_device *dev)
4893{
4894 struct drm_i915_private *dev_priv = dev->dev_private;
4895 int pipe;
3f704fa2 4896 uint32_t val;
3107bd48
DV
4897
4898 /*
4899 * On Ibex Peak and Cougar Point, we need to disable clock
4900 * gating for the panel power sequencer or it will fail to
4901 * start up when no ports are active.
4902 */
4903 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4904 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4905 DPLS_EDP_PPS_FIX_DIS);
335c07b7
TI
4906 /* The below fixes the weird display corruption, a few pixels shifted
4907 * downward, on (only) LVDS of some HP laptops with IVY.
4908 */
3f704fa2 4909 for_each_pipe(pipe) {
dc4bd2d1
PZ
4910 val = I915_READ(TRANS_CHICKEN2(pipe));
4911 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4912 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
41aa3448 4913 if (dev_priv->vbt.fdi_rx_polarity_inverted)
3f704fa2 4914 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
dc4bd2d1
PZ
4915 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4916 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4917 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
3f704fa2
PZ
4918 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4919 }
3107bd48
DV
4920 /* WADP0ClockGatingDisable */
4921 for_each_pipe(pipe) {
4922 I915_WRITE(TRANS_CHICKEN1(pipe),
4923 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4924 }
6f1d69b0
ED
4925}
4926
1d7aaa0c
DV
4927static void gen6_check_mch_setup(struct drm_device *dev)
4928{
4929 struct drm_i915_private *dev_priv = dev->dev_private;
4930 uint32_t tmp;
4931
4932 tmp = I915_READ(MCH_SSKPD);
4933 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4934 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4935 DRM_INFO("This can cause pipe underruns and display issues.\n");
4936 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4937 }
4938}
4939
1fa61106 4940static void gen6_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
4941{
4942 struct drm_i915_private *dev_priv = dev->dev_private;
231e54f6 4943 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6f1d69b0 4944
231e54f6 4945 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6f1d69b0
ED
4946
4947 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4948 I915_READ(ILK_DISPLAY_CHICKEN2) |
4949 ILK_ELPIN_409_SELECT);
4950
ecdb4eb7 4951 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4283908e
DV
4952 I915_WRITE(_3D_CHICKEN,
4953 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
4954
ecdb4eb7 4955 /* WaSetupGtModeTdRowDispatch:snb */
6547fbdb
DV
4956 if (IS_SNB_GT1(dev))
4957 I915_WRITE(GEN6_GT_MODE,
4958 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4959
6f1d69b0
ED
4960 I915_WRITE(WM3_LP_ILK, 0);
4961 I915_WRITE(WM2_LP_ILK, 0);
4962 I915_WRITE(WM1_LP_ILK, 0);
4963
6f1d69b0 4964 I915_WRITE(CACHE_MODE_0,
50743298 4965 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6f1d69b0
ED
4966
4967 I915_WRITE(GEN6_UCGCTL1,
4968 I915_READ(GEN6_UCGCTL1) |
4969 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
4970 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
4971
4972 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4973 * gating disable must be set. Failure to set it results in
4974 * flickering pixels due to Z write ordering failures after
4975 * some amount of runtime in the Mesa "fire" demo, and Unigine
4976 * Sanctuary and Tropics, and apparently anything else with
4977 * alpha test or pixel discard.
4978 *
4979 * According to the spec, bit 11 (RCCUNIT) must also be set,
4980 * but we didn't debug actual testcases to find it out.
0f846f81 4981 *
ecdb4eb7
DL
4982 * Also apply WaDisableVDSUnitClockGating:snb and
4983 * WaDisableRCPBUnitClockGating:snb.
6f1d69b0
ED
4984 */
4985 I915_WRITE(GEN6_UCGCTL2,
0f846f81 4986 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
6f1d69b0
ED
4987 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4988 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4989
4990 /* Bspec says we need to always set all mask bits. */
26b6e44a
KG
4991 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
4992 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
6f1d69b0
ED
4993
4994 /*
4995 * According to the spec the following bits should be
4996 * set in order to enable memory self-refresh and fbc:
4997 * The bit21 and bit22 of 0x42000
4998 * The bit21 and bit22 of 0x42004
4999 * The bit5 and bit7 of 0x42020
5000 * The bit14 of 0x70180
5001 * The bit14 of 0x71180
4bb35334
DL
5002 *
5003 * WaFbcAsynchFlipDisableFbcQueue:snb
6f1d69b0
ED
5004 */
5005 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5006 I915_READ(ILK_DISPLAY_CHICKEN1) |
5007 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5008 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5009 I915_READ(ILK_DISPLAY_CHICKEN2) |
5010 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
231e54f6
DL
5011 I915_WRITE(ILK_DSPCLK_GATE_D,
5012 I915_READ(ILK_DSPCLK_GATE_D) |
5013 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
5014 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6f1d69b0 5015
0e088b8f 5016 g4x_disable_trickle_feed(dev);
f8f2ac9a
BW
5017
5018 /* The default value should be 0x200 according to docs, but the two
5019 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
5020 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
5021 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
3107bd48
DV
5022
5023 cpt_init_clock_gating(dev);
1d7aaa0c
DV
5024
5025 gen6_check_mch_setup(dev);
6f1d69b0
ED
5026}
5027
5028static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5029{
5030 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5031
5032 reg &= ~GEN7_FF_SCHED_MASK;
5033 reg |= GEN7_FF_TS_SCHED_HW;
5034 reg |= GEN7_FF_VS_SCHED_HW;
5035 reg |= GEN7_FF_DS_SCHED_HW;
5036
41c0b3a8
BW
5037 if (IS_HASWELL(dev_priv->dev))
5038 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
5039
6f1d69b0
ED
5040 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5041}
5042
17a303ec
PZ
5043static void lpt_init_clock_gating(struct drm_device *dev)
5044{
5045 struct drm_i915_private *dev_priv = dev->dev_private;
5046
5047 /*
5048 * TODO: this bit should only be enabled when really needed, then
5049 * disabled when not needed anymore in order to save power.
5050 */
5051 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5052 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5053 I915_READ(SOUTH_DSPCLK_GATE_D) |
5054 PCH_LP_PARTITION_LEVEL_DISABLE);
0a790cdb
PZ
5055
5056 /* WADPOClockGatingDisable:hsw */
5057 I915_WRITE(_TRANSA_CHICKEN1,
5058 I915_READ(_TRANSA_CHICKEN1) |
5059 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
17a303ec
PZ
5060}
5061
7d708ee4
ID
5062static void lpt_suspend_hw(struct drm_device *dev)
5063{
5064 struct drm_i915_private *dev_priv = dev->dev_private;
5065
5066 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5067 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
5068
5069 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5070 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5071 }
5072}
5073
cad2a2d7
ED
5074static void haswell_init_clock_gating(struct drm_device *dev)
5075{
5076 struct drm_i915_private *dev_priv = dev->dev_private;
cad2a2d7
ED
5077
5078 I915_WRITE(WM3_LP_ILK, 0);
5079 I915_WRITE(WM2_LP_ILK, 0);
5080 I915_WRITE(WM1_LP_ILK, 0);
5081
5082 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 5083 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
cad2a2d7
ED
5084 */
5085 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5086
ecdb4eb7 5087 /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
cad2a2d7
ED
5088 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5089 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5090
ecdb4eb7 5091 /* WaApplyL3ControlAndL3ChickenMode:hsw */
cad2a2d7
ED
5092 I915_WRITE(GEN7_L3CNTLREG1,
5093 GEN7_WA_FOR_GEN7_L3_CONTROL);
5094 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5095 GEN7_WA_L3_CHICKEN_MODE);
5096
ecdb4eb7 5097 /* This is required by WaCatErrorRejectionIssue:hsw */
cad2a2d7
ED
5098 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5099 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5100 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5101
ecdb4eb7 5102 /* WaVSRefCountFullforceMissDisable:hsw */
cad2a2d7
ED
5103 gen7_setup_fixed_func_scheduler(dev_priv);
5104
ecdb4eb7 5105 /* WaDisable4x2SubspanOptimization:hsw */
cad2a2d7
ED
5106 I915_WRITE(CACHE_MODE_1,
5107 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
1544d9d5 5108
ecdb4eb7 5109 /* WaSwitchSolVfFArbitrationPriority:hsw */
e3dff585
BW
5110 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5111
90a88643
PZ
5112 /* WaRsPkgCStateDisplayPMReq:hsw */
5113 I915_WRITE(CHICKEN_PAR1_1,
5114 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
1544d9d5 5115
17a303ec 5116 lpt_init_clock_gating(dev);
cad2a2d7
ED
5117}
5118
1fa61106 5119static void ivybridge_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
5120{
5121 struct drm_i915_private *dev_priv = dev->dev_private;
20848223 5122 uint32_t snpcr;
6f1d69b0 5123
6f1d69b0
ED
5124 I915_WRITE(WM3_LP_ILK, 0);
5125 I915_WRITE(WM2_LP_ILK, 0);
5126 I915_WRITE(WM1_LP_ILK, 0);
5127
231e54f6 5128 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6f1d69b0 5129
ecdb4eb7 5130 /* WaDisableEarlyCull:ivb */
87f8020e
JB
5131 I915_WRITE(_3D_CHICKEN3,
5132 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5133
ecdb4eb7 5134 /* WaDisableBackToBackFlipFix:ivb */
6f1d69b0
ED
5135 I915_WRITE(IVB_CHICKEN3,
5136 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5137 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5138
ecdb4eb7 5139 /* WaDisablePSDDualDispatchEnable:ivb */
12f3382b
JB
5140 if (IS_IVB_GT1(dev))
5141 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5142 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5143 else
5144 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
5145 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5146
ecdb4eb7 5147 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6f1d69b0
ED
5148 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5149 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5150
ecdb4eb7 5151 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6f1d69b0
ED
5152 I915_WRITE(GEN7_L3CNTLREG1,
5153 GEN7_WA_FOR_GEN7_L3_CONTROL);
5154 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8ab43976
JB
5155 GEN7_WA_L3_CHICKEN_MODE);
5156 if (IS_IVB_GT1(dev))
5157 I915_WRITE(GEN7_ROW_CHICKEN2,
5158 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5159 else
5160 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5161 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5162
6f1d69b0 5163
ecdb4eb7 5164 /* WaForceL3Serialization:ivb */
61939d97
JB
5165 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5166 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5167
0f846f81
JB
5168 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5169 * gating disable must be set. Failure to set it results in
5170 * flickering pixels due to Z write ordering failures after
5171 * some amount of runtime in the Mesa "fire" demo, and Unigine
5172 * Sanctuary and Tropics, and apparently anything else with
5173 * alpha test or pixel discard.
5174 *
5175 * According to the spec, bit 11 (RCCUNIT) must also be set,
5176 * but we didn't debug actual testcases to find it out.
5177 *
5178 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 5179 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
0f846f81
JB
5180 */
5181 I915_WRITE(GEN6_UCGCTL2,
5182 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
5183 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5184
ecdb4eb7 5185 /* This is required by WaCatErrorRejectionIssue:ivb */
6f1d69b0
ED
5186 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5187 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5188 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5189
0e088b8f 5190 g4x_disable_trickle_feed(dev);
6f1d69b0 5191
ecdb4eb7 5192 /* WaVSRefCountFullforceMissDisable:ivb */
6f1d69b0 5193 gen7_setup_fixed_func_scheduler(dev_priv);
97e1930f 5194
ecdb4eb7 5195 /* WaDisable4x2SubspanOptimization:ivb */
97e1930f
DV
5196 I915_WRITE(CACHE_MODE_1,
5197 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
20848223
BW
5198
5199 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5200 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5201 snpcr |= GEN6_MBC_SNPCR_MED;
5202 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3107bd48 5203
ab5c608b
BW
5204 if (!HAS_PCH_NOP(dev))
5205 cpt_init_clock_gating(dev);
1d7aaa0c
DV
5206
5207 gen6_check_mch_setup(dev);
6f1d69b0
ED
5208}
5209
1fa61106 5210static void valleyview_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
5211{
5212 struct drm_i915_private *dev_priv = dev->dev_private;
6f1d69b0 5213
d7fe0cc0 5214 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6f1d69b0 5215
ecdb4eb7 5216 /* WaDisableEarlyCull:vlv */
87f8020e
JB
5217 I915_WRITE(_3D_CHICKEN3,
5218 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5219
ecdb4eb7 5220 /* WaDisableBackToBackFlipFix:vlv */
6f1d69b0
ED
5221 I915_WRITE(IVB_CHICKEN3,
5222 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5223 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5224
ecdb4eb7 5225 /* WaDisablePSDDualDispatchEnable:vlv */
12f3382b 5226 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
d3bc0303
JB
5227 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5228 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
12f3382b 5229
ecdb4eb7 5230 /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
6f1d69b0
ED
5231 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5232 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5233
ecdb4eb7 5234 /* WaApplyL3ControlAndL3ChickenMode:vlv */
d0cf5ead 5235 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
6f1d69b0
ED
5236 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
5237
ecdb4eb7 5238 /* WaForceL3Serialization:vlv */
61939d97
JB
5239 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5240 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5241
ecdb4eb7 5242 /* WaDisableDopClockGating:vlv */
8ab43976
JB
5243 I915_WRITE(GEN7_ROW_CHICKEN2,
5244 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5245
ecdb4eb7 5246 /* This is required by WaCatErrorRejectionIssue:vlv */
6f1d69b0
ED
5247 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5248 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5249 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5250
0f846f81
JB
5251 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5252 * gating disable must be set. Failure to set it results in
5253 * flickering pixels due to Z write ordering failures after
5254 * some amount of runtime in the Mesa "fire" demo, and Unigine
5255 * Sanctuary and Tropics, and apparently anything else with
5256 * alpha test or pixel discard.
5257 *
5258 * According to the spec, bit 11 (RCCUNIT) must also be set,
5259 * but we didn't debug actual testcases to find it out.
5260 *
5261 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
ecdb4eb7 5262 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
0f846f81 5263 *
ecdb4eb7
DL
5264 * Also apply WaDisableVDSUnitClockGating:vlv and
5265 * WaDisableRCPBUnitClockGating:vlv.
0f846f81
JB
5266 */
5267 I915_WRITE(GEN6_UCGCTL2,
5268 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
6edaa7fc 5269 GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
0f846f81
JB
5270 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
5271 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5272 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5273
e3f33d46
JB
5274 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5275
e0d8d59b 5276 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6f1d69b0 5277
6b26c86d
DV
5278 I915_WRITE(CACHE_MODE_1,
5279 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7983117f 5280
2d809570 5281 /*
ecdb4eb7 5282 * WaDisableVLVClockGating_VBIIssue:vlv
2d809570
JB
5283 * Disable clock gating on th GCFG unit to prevent a delay
5284 * in the reporting of vblank events.
5285 */
4e8c84a5
JB
5286 I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
5287
5288 /* Conservative clock gating settings for now */
5289 I915_WRITE(0x9400, 0xffffffff);
5290 I915_WRITE(0x9404, 0xffffffff);
5291 I915_WRITE(0x9408, 0xffffffff);
5292 I915_WRITE(0x940c, 0xffffffff);
5293 I915_WRITE(0x9410, 0xffffffff);
5294 I915_WRITE(0x9414, 0xffffffff);
5295 I915_WRITE(0x9418, 0xffffffff);
6f1d69b0
ED
5296}
5297
1fa61106 5298static void g4x_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
5299{
5300 struct drm_i915_private *dev_priv = dev->dev_private;
5301 uint32_t dspclk_gate;
5302
5303 I915_WRITE(RENCLK_GATE_D1, 0);
5304 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5305 GS_UNIT_CLOCK_GATE_DISABLE |
5306 CL_UNIT_CLOCK_GATE_DISABLE);
5307 I915_WRITE(RAMCLK_GATE_D, 0);
5308 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5309 OVRUNIT_CLOCK_GATE_DISABLE |
5310 OVCUNIT_CLOCK_GATE_DISABLE;
5311 if (IS_GM45(dev))
5312 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5313 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
4358a374
DV
5314
5315 /* WaDisableRenderCachePipelinedFlush */
5316 I915_WRITE(CACHE_MODE_0,
5317 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
de1aa629 5318
0e088b8f 5319 g4x_disable_trickle_feed(dev);
6f1d69b0
ED
5320}
5321
1fa61106 5322static void crestline_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
5323{
5324 struct drm_i915_private *dev_priv = dev->dev_private;
5325
5326 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5327 I915_WRITE(RENCLK_GATE_D2, 0);
5328 I915_WRITE(DSPCLK_GATE_D, 0);
5329 I915_WRITE(RAMCLK_GATE_D, 0);
5330 I915_WRITE16(DEUC, 0);
20f94967
VS
5331 I915_WRITE(MI_ARB_STATE,
5332 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
5333}
5334
1fa61106 5335static void broadwater_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
5336{
5337 struct drm_i915_private *dev_priv = dev->dev_private;
5338
5339 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5340 I965_RCC_CLOCK_GATE_DISABLE |
5341 I965_RCPB_CLOCK_GATE_DISABLE |
5342 I965_ISC_CLOCK_GATE_DISABLE |
5343 I965_FBC_CLOCK_GATE_DISABLE);
5344 I915_WRITE(RENCLK_GATE_D2, 0);
20f94967
VS
5345 I915_WRITE(MI_ARB_STATE,
5346 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6f1d69b0
ED
5347}
5348
1fa61106 5349static void gen3_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
5350{
5351 struct drm_i915_private *dev_priv = dev->dev_private;
5352 u32 dstate = I915_READ(D_STATE);
5353
5354 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5355 DSTATE_DOT_CLOCK_GATING;
5356 I915_WRITE(D_STATE, dstate);
13a86b85
CW
5357
5358 if (IS_PINEVIEW(dev))
5359 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
974a3b0f
DV
5360
5361 /* IIR "flip pending" means done if this bit is set */
5362 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
6f1d69b0
ED
5363}
5364
1fa61106 5365static void i85x_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
5366{
5367 struct drm_i915_private *dev_priv = dev->dev_private;
5368
5369 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5370}
5371
1fa61106 5372static void i830_init_clock_gating(struct drm_device *dev)
6f1d69b0
ED
5373{
5374 struct drm_i915_private *dev_priv = dev->dev_private;
5375
5376 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5377}
5378
6f1d69b0
ED
5379void intel_init_clock_gating(struct drm_device *dev)
5380{
5381 struct drm_i915_private *dev_priv = dev->dev_private;
5382
5383 dev_priv->display.init_clock_gating(dev);
6f1d69b0
ED
5384}
5385
7d708ee4
ID
5386void intel_suspend_hw(struct drm_device *dev)
5387{
5388 if (HAS_PCH_LPT(dev))
5389 lpt_suspend_hw(dev);
5390}
5391
15d199ea
PZ
5392/**
5393 * We should only use the power well if we explicitly asked the hardware to
5394 * enable it, so check if it's enabled and also check if we've requested it to
5395 * be enabled.
5396 */
b97186f0
PZ
5397bool intel_display_power_enabled(struct drm_device *dev,
5398 enum intel_display_power_domain domain)
15d199ea
PZ
5399{
5400 struct drm_i915_private *dev_priv = dev->dev_private;
5401
b97186f0
PZ
5402 if (!HAS_POWER_WELL(dev))
5403 return true;
5404
5405 switch (domain) {
5406 case POWER_DOMAIN_PIPE_A:
5407 case POWER_DOMAIN_TRANSCODER_EDP:
5408 return true;
cdf8dd7f 5409 case POWER_DOMAIN_VGA:
b97186f0
PZ
5410 case POWER_DOMAIN_PIPE_B:
5411 case POWER_DOMAIN_PIPE_C:
5412 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5413 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5414 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5415 case POWER_DOMAIN_TRANSCODER_A:
5416 case POWER_DOMAIN_TRANSCODER_B:
5417 case POWER_DOMAIN_TRANSCODER_C:
15d199ea 5418 return I915_READ(HSW_PWR_WELL_DRIVER) ==
6aedd1f5 5419 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
b97186f0
PZ
5420 default:
5421 BUG();
5422 }
15d199ea
PZ
5423}
5424
a38911a3 5425static void __intel_set_power_well(struct drm_device *dev, bool enable)
d0d3e513
ED
5426{
5427 struct drm_i915_private *dev_priv = dev->dev_private;
fa42e23c
PZ
5428 bool is_enabled, enable_requested;
5429 uint32_t tmp;
d0d3e513 5430
fa42e23c 5431 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6aedd1f5
PZ
5432 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5433 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
d0d3e513 5434
fa42e23c
PZ
5435 if (enable) {
5436 if (!enable_requested)
6aedd1f5
PZ
5437 I915_WRITE(HSW_PWR_WELL_DRIVER,
5438 HSW_PWR_WELL_ENABLE_REQUEST);
d0d3e513 5439
fa42e23c
PZ
5440 if (!is_enabled) {
5441 DRM_DEBUG_KMS("Enabling power well\n");
5442 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6aedd1f5 5443 HSW_PWR_WELL_STATE_ENABLED), 20))
fa42e23c
PZ
5444 DRM_ERROR("Timeout enabling power well\n");
5445 }
5446 } else {
5447 if (enable_requested) {
9dbd8feb
PZ
5448 unsigned long irqflags;
5449 enum pipe p;
5450
fa42e23c 5451 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
9dbd8feb 5452 POSTING_READ(HSW_PWR_WELL_DRIVER);
fa42e23c 5453 DRM_DEBUG_KMS("Requesting to disable the power well\n");
9dbd8feb
PZ
5454
5455 /*
5456 * After this, the registers on the pipes that are part
5457 * of the power well will become zero, so we have to
5458 * adjust our counters according to that.
5459 *
5460 * FIXME: Should we do this in general in
5461 * drm_vblank_post_modeset?
5462 */
5463 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5464 for_each_pipe(p)
5465 if (p != PIPE_A)
5380e929 5466 dev->vblank[p].last = 0;
9dbd8feb 5467 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
d0d3e513
ED
5468 }
5469 }
fa42e23c 5470}
d0d3e513 5471
2d66aef5
VS
5472static void __intel_power_well_get(struct i915_power_well *power_well)
5473{
5474 if (!power_well->count++)
5475 __intel_set_power_well(power_well->device, true);
5476}
5477
5478static void __intel_power_well_put(struct i915_power_well *power_well)
5479{
5480 WARN_ON(!power_well->count);
5481 if (!--power_well->count)
5482 __intel_set_power_well(power_well->device, false);
5483}
5484
6765625e
VS
5485void intel_display_power_get(struct drm_device *dev,
5486 enum intel_display_power_domain domain)
5487{
5488 struct drm_i915_private *dev_priv = dev->dev_private;
5489 struct i915_power_well *power_well = &dev_priv->power_well;
5490
5491 if (!HAS_POWER_WELL(dev))
5492 return;
5493
5494 switch (domain) {
5495 case POWER_DOMAIN_PIPE_A:
5496 case POWER_DOMAIN_TRANSCODER_EDP:
5497 return;
cdf8dd7f 5498 case POWER_DOMAIN_VGA:
6765625e
VS
5499 case POWER_DOMAIN_PIPE_B:
5500 case POWER_DOMAIN_PIPE_C:
5501 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5502 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5503 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5504 case POWER_DOMAIN_TRANSCODER_A:
5505 case POWER_DOMAIN_TRANSCODER_B:
5506 case POWER_DOMAIN_TRANSCODER_C:
5507 spin_lock_irq(&power_well->lock);
2d66aef5 5508 __intel_power_well_get(power_well);
6765625e
VS
5509 spin_unlock_irq(&power_well->lock);
5510 return;
5511 default:
5512 BUG();
5513 }
5514}
5515
5516void intel_display_power_put(struct drm_device *dev,
5517 enum intel_display_power_domain domain)
5518{
5519 struct drm_i915_private *dev_priv = dev->dev_private;
5520 struct i915_power_well *power_well = &dev_priv->power_well;
5521
5522 if (!HAS_POWER_WELL(dev))
5523 return;
5524
5525 switch (domain) {
5526 case POWER_DOMAIN_PIPE_A:
5527 case POWER_DOMAIN_TRANSCODER_EDP:
5528 return;
cdf8dd7f 5529 case POWER_DOMAIN_VGA:
6765625e
VS
5530 case POWER_DOMAIN_PIPE_B:
5531 case POWER_DOMAIN_PIPE_C:
5532 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5533 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5534 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5535 case POWER_DOMAIN_TRANSCODER_A:
5536 case POWER_DOMAIN_TRANSCODER_B:
5537 case POWER_DOMAIN_TRANSCODER_C:
5538 spin_lock_irq(&power_well->lock);
2d66aef5 5539 __intel_power_well_put(power_well);
6765625e
VS
5540 spin_unlock_irq(&power_well->lock);
5541 return;
5542 default:
5543 BUG();
5544 }
5545}
5546
a38911a3
WX
5547static struct i915_power_well *hsw_pwr;
5548
5549/* Display audio driver power well request */
5550void i915_request_power_well(void)
5551{
5552 if (WARN_ON(!hsw_pwr))
5553 return;
5554
5555 spin_lock_irq(&hsw_pwr->lock);
2d66aef5 5556 __intel_power_well_get(hsw_pwr);
a38911a3
WX
5557 spin_unlock_irq(&hsw_pwr->lock);
5558}
5559EXPORT_SYMBOL_GPL(i915_request_power_well);
5560
5561/* Display audio driver power well release */
5562void i915_release_power_well(void)
5563{
5564 if (WARN_ON(!hsw_pwr))
5565 return;
5566
5567 spin_lock_irq(&hsw_pwr->lock);
2d66aef5 5568 __intel_power_well_put(hsw_pwr);
a38911a3
WX
5569 spin_unlock_irq(&hsw_pwr->lock);
5570}
5571EXPORT_SYMBOL_GPL(i915_release_power_well);
5572
5573int i915_init_power_well(struct drm_device *dev)
5574{
5575 struct drm_i915_private *dev_priv = dev->dev_private;
5576
5577 hsw_pwr = &dev_priv->power_well;
5578
5579 hsw_pwr->device = dev;
5580 spin_lock_init(&hsw_pwr->lock);
5581 hsw_pwr->count = 0;
5582
5583 return 0;
5584}
5585
5586void i915_remove_power_well(struct drm_device *dev)
5587{
5588 hsw_pwr = NULL;
5589}
5590
5591void intel_set_power_well(struct drm_device *dev, bool enable)
5592{
5593 struct drm_i915_private *dev_priv = dev->dev_private;
5594 struct i915_power_well *power_well = &dev_priv->power_well;
5595
5596 if (!HAS_POWER_WELL(dev))
5597 return;
5598
5599 if (!i915_disable_power_well && !enable)
5600 return;
5601
5602 spin_lock_irq(&power_well->lock);
9cdb826c
VS
5603
5604 /*
5605 * This function will only ever contribute one
5606 * to the power well reference count. i915_request
5607 * is what tracks whether we have or have not
5608 * added the one to the reference count.
5609 */
5610 if (power_well->i915_request == enable)
5611 goto out;
5612
a38911a3
WX
5613 power_well->i915_request = enable;
5614
2d66aef5
VS
5615 if (enable)
5616 __intel_power_well_get(power_well);
5617 else
5618 __intel_power_well_put(power_well);
a38911a3 5619
9cdb826c
VS
5620 out:
5621 spin_unlock_irq(&power_well->lock);
5622}
5623
51340990 5624static void intel_resume_power_well(struct drm_device *dev)
9cdb826c
VS
5625{
5626 struct drm_i915_private *dev_priv = dev->dev_private;
5627 struct i915_power_well *power_well = &dev_priv->power_well;
5628
5629 if (!HAS_POWER_WELL(dev))
5630 return;
5631
5632 spin_lock_irq(&power_well->lock);
5633 __intel_set_power_well(dev, power_well->count > 0);
a38911a3
WX
5634 spin_unlock_irq(&power_well->lock);
5635}
5636
fa42e23c
PZ
5637/*
5638 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5639 * when not needed anymore. We have 4 registers that can request the power well
5640 * to be enabled, and it will only be disabled if none of the registers is
5641 * requesting it to be enabled.
d0d3e513 5642 */
fa42e23c 5643void intel_init_power_well(struct drm_device *dev)
d0d3e513
ED
5644{
5645 struct drm_i915_private *dev_priv = dev->dev_private;
d0d3e513 5646
86d52df6 5647 if (!HAS_POWER_WELL(dev))
d0d3e513
ED
5648 return;
5649
fa42e23c
PZ
5650 /* For now, we need the power well to be always enabled. */
5651 intel_set_power_well(dev, true);
9cdb826c 5652 intel_resume_power_well(dev);
d0d3e513 5653
fa42e23c
PZ
5654 /* We're taking over the BIOS, so clear any requests made by it since
5655 * the driver is in charge now. */
6aedd1f5 5656 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
fa42e23c 5657 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
d0d3e513
ED
5658}
5659
c67a470b
PZ
5660/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
5661void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
5662{
5663 hsw_disable_package_c8(dev_priv);
5664}
5665
5666void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5667{
5668 hsw_enable_package_c8(dev_priv);
5669}
5670
1fa61106
ED
5671/* Set up chip specific power management-related functions */
5672void intel_init_pm(struct drm_device *dev)
5673{
5674 struct drm_i915_private *dev_priv = dev->dev_private;
5675
5676 if (I915_HAS_FBC(dev)) {
5677 if (HAS_PCH_SPLIT(dev)) {
5678 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
891348b2 5679 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
abe959c7
RV
5680 dev_priv->display.enable_fbc =
5681 gen7_enable_fbc;
5682 else
5683 dev_priv->display.enable_fbc =
5684 ironlake_enable_fbc;
1fa61106
ED
5685 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5686 } else if (IS_GM45(dev)) {
5687 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5688 dev_priv->display.enable_fbc = g4x_enable_fbc;
5689 dev_priv->display.disable_fbc = g4x_disable_fbc;
5690 } else if (IS_CRESTLINE(dev)) {
5691 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5692 dev_priv->display.enable_fbc = i8xx_enable_fbc;
5693 dev_priv->display.disable_fbc = i8xx_disable_fbc;
5694 }
5695 /* 855GM needs testing */
5696 }
5697
c921aba8
DV
5698 /* For cxsr */
5699 if (IS_PINEVIEW(dev))
5700 i915_pineview_get_mem_freq(dev);
5701 else if (IS_GEN5(dev))
5702 i915_ironlake_get_mem_freq(dev);
5703
1fa61106
ED
5704 /* For FIFO watermark updates */
5705 if (HAS_PCH_SPLIT(dev)) {
53615a5e
VS
5706 intel_setup_wm_latency(dev);
5707
1fa61106 5708 if (IS_GEN5(dev)) {
53615a5e
VS
5709 if (dev_priv->wm.pri_latency[1] &&
5710 dev_priv->wm.spr_latency[1] &&
5711 dev_priv->wm.cur_latency[1])
1fa61106
ED
5712 dev_priv->display.update_wm = ironlake_update_wm;
5713 else {
5714 DRM_DEBUG_KMS("Failed to get proper latency. "
5715 "Disable CxSR\n");
5716 dev_priv->display.update_wm = NULL;
5717 }
5718 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
5719 } else if (IS_GEN6(dev)) {
53615a5e
VS
5720 if (dev_priv->wm.pri_latency[0] &&
5721 dev_priv->wm.spr_latency[0] &&
5722 dev_priv->wm.cur_latency[0]) {
1fa61106
ED
5723 dev_priv->display.update_wm = sandybridge_update_wm;
5724 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5725 } else {
5726 DRM_DEBUG_KMS("Failed to read display plane latency. "
5727 "Disable CxSR\n");
5728 dev_priv->display.update_wm = NULL;
5729 }
5730 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
5731 } else if (IS_IVYBRIDGE(dev)) {
53615a5e
VS
5732 if (dev_priv->wm.pri_latency[0] &&
5733 dev_priv->wm.spr_latency[0] &&
5734 dev_priv->wm.cur_latency[0]) {
c43d0188 5735 dev_priv->display.update_wm = ivybridge_update_wm;
1fa61106
ED
5736 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5737 } else {
5738 DRM_DEBUG_KMS("Failed to read display plane latency. "
5739 "Disable CxSR\n");
5740 dev_priv->display.update_wm = NULL;
5741 }
5742 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6b8a5eeb 5743 } else if (IS_HASWELL(dev)) {
53615a5e
VS
5744 if (dev_priv->wm.pri_latency[0] &&
5745 dev_priv->wm.spr_latency[0] &&
5746 dev_priv->wm.cur_latency[0]) {
1011d8c4 5747 dev_priv->display.update_wm = haswell_update_wm;
526682e9
PZ
5748 dev_priv->display.update_sprite_wm =
5749 haswell_update_sprite_wm;
6b8a5eeb
ED
5750 } else {
5751 DRM_DEBUG_KMS("Failed to read display plane latency. "
5752 "Disable CxSR\n");
5753 dev_priv->display.update_wm = NULL;
5754 }
cad2a2d7 5755 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
1fa61106
ED
5756 } else
5757 dev_priv->display.update_wm = NULL;
5758 } else if (IS_VALLEYVIEW(dev)) {
5759 dev_priv->display.update_wm = valleyview_update_wm;
5760 dev_priv->display.init_clock_gating =
5761 valleyview_init_clock_gating;
1fa61106
ED
5762 } else if (IS_PINEVIEW(dev)) {
5763 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5764 dev_priv->is_ddr3,
5765 dev_priv->fsb_freq,
5766 dev_priv->mem_freq)) {
5767 DRM_INFO("failed to find known CxSR latency "
5768 "(found ddr%s fsb freq %d, mem freq %d), "
5769 "disabling CxSR\n",
5770 (dev_priv->is_ddr3 == 1) ? "3" : "2",
5771 dev_priv->fsb_freq, dev_priv->mem_freq);
5772 /* Disable CxSR and never update its watermark again */
5773 pineview_disable_cxsr(dev);
5774 dev_priv->display.update_wm = NULL;
5775 } else
5776 dev_priv->display.update_wm = pineview_update_wm;
5777 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5778 } else if (IS_G4X(dev)) {
5779 dev_priv->display.update_wm = g4x_update_wm;
5780 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
5781 } else if (IS_GEN4(dev)) {
5782 dev_priv->display.update_wm = i965_update_wm;
5783 if (IS_CRESTLINE(dev))
5784 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
5785 else if (IS_BROADWATER(dev))
5786 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
5787 } else if (IS_GEN3(dev)) {
5788 dev_priv->display.update_wm = i9xx_update_wm;
5789 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5790 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5791 } else if (IS_I865G(dev)) {
5792 dev_priv->display.update_wm = i830_update_wm;
5793 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5794 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5795 } else if (IS_I85X(dev)) {
5796 dev_priv->display.update_wm = i9xx_update_wm;
5797 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
5798 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5799 } else {
5800 dev_priv->display.update_wm = i830_update_wm;
5801 dev_priv->display.init_clock_gating = i830_init_clock_gating;
5802 if (IS_845G(dev))
5803 dev_priv->display.get_fifo_size = i845_get_fifo_size;
5804 else
5805 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5806 }
5807}
5808
42c0526c
BW
5809int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5810{
4fc688ce 5811 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
42c0526c
BW
5812
5813 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5814 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
5815 return -EAGAIN;
5816 }
5817
5818 I915_WRITE(GEN6_PCODE_DATA, *val);
5819 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5820
5821 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5822 500)) {
5823 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
5824 return -ETIMEDOUT;
5825 }
5826
5827 *val = I915_READ(GEN6_PCODE_DATA);
5828 I915_WRITE(GEN6_PCODE_DATA, 0);
5829
5830 return 0;
5831}
5832
5833int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
5834{
4fc688ce 5835 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
42c0526c
BW
5836
5837 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5838 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
5839 return -EAGAIN;
5840 }
5841
5842 I915_WRITE(GEN6_PCODE_DATA, val);
5843 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5844
5845 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5846 500)) {
5847 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
5848 return -ETIMEDOUT;
5849 }
5850
5851 I915_WRITE(GEN6_PCODE_DATA, 0);
5852
5853 return 0;
5854}
a0e4e199 5855
855ba3be
JB
5856int vlv_gpu_freq(int ddr_freq, int val)
5857{
5858 int mult, base;
5859
5860 switch (ddr_freq) {
5861 case 800:
5862 mult = 20;
5863 base = 120;
5864 break;
5865 case 1066:
5866 mult = 22;
5867 base = 133;
5868 break;
5869 case 1333:
5870 mult = 21;
5871 base = 125;
5872 break;
5873 default:
5874 return -1;
5875 }
5876
5877 return ((val - 0xbd) * mult) + base;
5878}
5879
5880int vlv_freq_opcode(int ddr_freq, int val)
5881{
5882 int mult, base;
5883
5884 switch (ddr_freq) {
5885 case 800:
5886 mult = 20;
5887 base = 120;
5888 break;
5889 case 1066:
5890 mult = 22;
5891 base = 133;
5892 break;
5893 case 1333:
5894 mult = 21;
5895 base = 125;
5896 break;
5897 default:
5898 return -1;
5899 }
5900
5901 val /= mult;
5902 val -= base / mult;
5903 val += 0xbd;
5904
5905 if (val > 0xea)
5906 val = 0xea;
5907
5908 return val;
5909}
5910
907b28c5
CW
5911void intel_pm_init(struct drm_device *dev)
5912{
5913 struct drm_i915_private *dev_priv = dev->dev_private;
5914
5915 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5916 intel_gen6_powersave_work);
5917}
5918