]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/intel_runtime_pm.c
9bbbdbc1c843379df33bb15a0da13932df191cfe
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 /**
36 * DOC: runtime pm
37 *
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
43 *
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
50 */
51
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 int power_well_id);
54
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
57
58 const char *
59 intel_display_power_domain_str(enum intel_display_power_domain domain)
60 {
61 switch (domain) {
62 case POWER_DOMAIN_PIPE_A:
63 return "PIPE_A";
64 case POWER_DOMAIN_PIPE_B:
65 return "PIPE_B";
66 case POWER_DOMAIN_PIPE_C:
67 return "PIPE_C";
68 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
69 return "PIPE_A_PANEL_FITTER";
70 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
71 return "PIPE_B_PANEL_FITTER";
72 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
73 return "PIPE_C_PANEL_FITTER";
74 case POWER_DOMAIN_TRANSCODER_A:
75 return "TRANSCODER_A";
76 case POWER_DOMAIN_TRANSCODER_B:
77 return "TRANSCODER_B";
78 case POWER_DOMAIN_TRANSCODER_C:
79 return "TRANSCODER_C";
80 case POWER_DOMAIN_TRANSCODER_EDP:
81 return "TRANSCODER_EDP";
82 case POWER_DOMAIN_TRANSCODER_DSI_A:
83 return "TRANSCODER_DSI_A";
84 case POWER_DOMAIN_TRANSCODER_DSI_C:
85 return "TRANSCODER_DSI_C";
86 case POWER_DOMAIN_PORT_DDI_A_LANES:
87 return "PORT_DDI_A_LANES";
88 case POWER_DOMAIN_PORT_DDI_B_LANES:
89 return "PORT_DDI_B_LANES";
90 case POWER_DOMAIN_PORT_DDI_C_LANES:
91 return "PORT_DDI_C_LANES";
92 case POWER_DOMAIN_PORT_DDI_D_LANES:
93 return "PORT_DDI_D_LANES";
94 case POWER_DOMAIN_PORT_DDI_E_LANES:
95 return "PORT_DDI_E_LANES";
96 case POWER_DOMAIN_PORT_DSI:
97 return "PORT_DSI";
98 case POWER_DOMAIN_PORT_CRT:
99 return "PORT_CRT";
100 case POWER_DOMAIN_PORT_OTHER:
101 return "PORT_OTHER";
102 case POWER_DOMAIN_VGA:
103 return "VGA";
104 case POWER_DOMAIN_AUDIO:
105 return "AUDIO";
106 case POWER_DOMAIN_PLLS:
107 return "PLLS";
108 case POWER_DOMAIN_AUX_A:
109 return "AUX_A";
110 case POWER_DOMAIN_AUX_B:
111 return "AUX_B";
112 case POWER_DOMAIN_AUX_C:
113 return "AUX_C";
114 case POWER_DOMAIN_AUX_D:
115 return "AUX_D";
116 case POWER_DOMAIN_GMBUS:
117 return "GMBUS";
118 case POWER_DOMAIN_INIT:
119 return "INIT";
120 case POWER_DOMAIN_MODESET:
121 return "MODESET";
122 default:
123 MISSING_CASE(domain);
124 return "?";
125 }
126 }
127
128 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
129 struct i915_power_well *power_well)
130 {
131 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
132 power_well->ops->enable(dev_priv, power_well);
133 power_well->hw_enabled = true;
134 }
135
136 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
137 struct i915_power_well *power_well)
138 {
139 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
140 power_well->hw_enabled = false;
141 power_well->ops->disable(dev_priv, power_well);
142 }
143
144 static void intel_power_well_get(struct drm_i915_private *dev_priv,
145 struct i915_power_well *power_well)
146 {
147 if (!power_well->count++)
148 intel_power_well_enable(dev_priv, power_well);
149 }
150
151 static void intel_power_well_put(struct drm_i915_private *dev_priv,
152 struct i915_power_well *power_well)
153 {
154 WARN(!power_well->count, "Use count on power well %s is already zero",
155 power_well->name);
156
157 if (!--power_well->count)
158 intel_power_well_disable(dev_priv, power_well);
159 }
160
161 /*
162 * We should only use the power well if we explicitly asked the hardware to
163 * enable it, so check if it's enabled and also check if we've requested it to
164 * be enabled.
165 */
166 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
167 struct i915_power_well *power_well)
168 {
169 return I915_READ(HSW_PWR_WELL_DRIVER) ==
170 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
171 }
172
173 /**
174 * __intel_display_power_is_enabled - unlocked check for a power domain
175 * @dev_priv: i915 device instance
176 * @domain: power domain to check
177 *
178 * This is the unlocked version of intel_display_power_is_enabled() and should
179 * only be used from error capture and recovery code where deadlocks are
180 * possible.
181 *
182 * Returns:
183 * True when the power domain is enabled, false otherwise.
184 */
185 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
186 enum intel_display_power_domain domain)
187 {
188 struct i915_power_well *power_well;
189 bool is_enabled;
190
191 if (dev_priv->pm.suspended)
192 return false;
193
194 is_enabled = true;
195
196 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
197 if (power_well->always_on)
198 continue;
199
200 if (!power_well->hw_enabled) {
201 is_enabled = false;
202 break;
203 }
204 }
205
206 return is_enabled;
207 }
208
209 /**
210 * intel_display_power_is_enabled - check for a power domain
211 * @dev_priv: i915 device instance
212 * @domain: power domain to check
213 *
214 * This function can be used to check the hw power domain state. It is mostly
215 * used in hardware state readout functions. Everywhere else code should rely
216 * upon explicit power domain reference counting to ensure that the hardware
217 * block is powered up before accessing it.
218 *
219 * Callers must hold the relevant modesetting locks to ensure that concurrent
220 * threads can't disable the power well while the caller tries to read a few
221 * registers.
222 *
223 * Returns:
224 * True when the power domain is enabled, false otherwise.
225 */
226 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
227 enum intel_display_power_domain domain)
228 {
229 struct i915_power_domains *power_domains;
230 bool ret;
231
232 power_domains = &dev_priv->power_domains;
233
234 mutex_lock(&power_domains->lock);
235 ret = __intel_display_power_is_enabled(dev_priv, domain);
236 mutex_unlock(&power_domains->lock);
237
238 return ret;
239 }
240
241 /**
242 * intel_display_set_init_power - set the initial power domain state
243 * @dev_priv: i915 device instance
244 * @enable: whether to enable or disable the initial power domain state
245 *
246 * For simplicity our driver load/unload and system suspend/resume code assumes
247 * that all power domains are always enabled. This functions controls the state
248 * of this little hack. While the initial power domain state is enabled runtime
249 * pm is effectively disabled.
250 */
251 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
252 bool enable)
253 {
254 if (dev_priv->power_domains.init_power_on == enable)
255 return;
256
257 if (enable)
258 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
259 else
260 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
261
262 dev_priv->power_domains.init_power_on = enable;
263 }
264
265 /*
266 * Starting with Haswell, we have a "Power Down Well" that can be turned off
267 * when not needed anymore. We have 4 registers that can request the power well
268 * to be enabled, and it will only be disabled if none of the registers is
269 * requesting it to be enabled.
270 */
271 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
272 {
273 struct pci_dev *pdev = dev_priv->drm.pdev;
274
275 /*
276 * After we re-enable the power well, if we touch VGA register 0x3d5
277 * we'll get unclaimed register interrupts. This stops after we write
278 * anything to the VGA MSR register. The vgacon module uses this
279 * register all the time, so if we unbind our driver and, as a
280 * consequence, bind vgacon, we'll get stuck in an infinite loop at
281 * console_unlock(). So make here we touch the VGA MSR register, making
282 * sure vgacon can keep working normally without triggering interrupts
283 * and error messages.
284 */
285 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
286 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
287 vga_put(pdev, VGA_RSRC_LEGACY_IO);
288
289 if (IS_BROADWELL(dev_priv))
290 gen8_irq_power_well_post_enable(dev_priv,
291 1 << PIPE_C | 1 << PIPE_B);
292 }
293
294 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
295 {
296 if (IS_BROADWELL(dev_priv))
297 gen8_irq_power_well_pre_disable(dev_priv,
298 1 << PIPE_C | 1 << PIPE_B);
299 }
300
301 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
302 struct i915_power_well *power_well)
303 {
304 struct pci_dev *pdev = dev_priv->drm.pdev;
305
306 /*
307 * After we re-enable the power well, if we touch VGA register 0x3d5
308 * we'll get unclaimed register interrupts. This stops after we write
309 * anything to the VGA MSR register. The vgacon module uses this
310 * register all the time, so if we unbind our driver and, as a
311 * consequence, bind vgacon, we'll get stuck in an infinite loop at
312 * console_unlock(). So make here we touch the VGA MSR register, making
313 * sure vgacon can keep working normally without triggering interrupts
314 * and error messages.
315 */
316 if (power_well->id == SKL_DISP_PW_2) {
317 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
318 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
319 vga_put(pdev, VGA_RSRC_LEGACY_IO);
320
321 gen8_irq_power_well_post_enable(dev_priv,
322 1 << PIPE_C | 1 << PIPE_B);
323 }
324 }
325
326 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
327 struct i915_power_well *power_well)
328 {
329 if (power_well->id == SKL_DISP_PW_2)
330 gen8_irq_power_well_pre_disable(dev_priv,
331 1 << PIPE_C | 1 << PIPE_B);
332 }
333
334 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
335 struct i915_power_well *power_well, bool enable)
336 {
337 bool is_enabled, enable_requested;
338 uint32_t tmp;
339
340 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
341 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
342 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
343
344 if (enable) {
345 if (!enable_requested)
346 I915_WRITE(HSW_PWR_WELL_DRIVER,
347 HSW_PWR_WELL_ENABLE_REQUEST);
348
349 if (!is_enabled) {
350 DRM_DEBUG_KMS("Enabling power well\n");
351 if (intel_wait_for_register(dev_priv,
352 HSW_PWR_WELL_DRIVER,
353 HSW_PWR_WELL_STATE_ENABLED,
354 HSW_PWR_WELL_STATE_ENABLED,
355 20))
356 DRM_ERROR("Timeout enabling power well\n");
357 hsw_power_well_post_enable(dev_priv);
358 }
359
360 } else {
361 if (enable_requested) {
362 hsw_power_well_pre_disable(dev_priv);
363 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
364 POSTING_READ(HSW_PWR_WELL_DRIVER);
365 DRM_DEBUG_KMS("Requesting to disable the power well\n");
366 }
367 }
368 }
369
370 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
371 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
372 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
373 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
374 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
375 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
376 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
377 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
378 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
379 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
380 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
381 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
382 BIT_ULL(POWER_DOMAIN_AUX_B) | \
383 BIT_ULL(POWER_DOMAIN_AUX_C) | \
384 BIT_ULL(POWER_DOMAIN_AUX_D) | \
385 BIT_ULL(POWER_DOMAIN_AUDIO) | \
386 BIT_ULL(POWER_DOMAIN_VGA) | \
387 BIT_ULL(POWER_DOMAIN_INIT))
388 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
389 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
390 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
391 BIT_ULL(POWER_DOMAIN_INIT))
392 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
393 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
394 BIT_ULL(POWER_DOMAIN_INIT))
395 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
396 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
397 BIT_ULL(POWER_DOMAIN_INIT))
398 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
399 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
400 BIT_ULL(POWER_DOMAIN_INIT))
401 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
402 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
403 BIT_ULL(POWER_DOMAIN_MODESET) | \
404 BIT_ULL(POWER_DOMAIN_AUX_A) | \
405 BIT_ULL(POWER_DOMAIN_INIT))
406
407 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
408 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
409 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
410 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
411 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
412 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
413 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
414 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
415 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
416 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
417 BIT_ULL(POWER_DOMAIN_AUX_B) | \
418 BIT_ULL(POWER_DOMAIN_AUX_C) | \
419 BIT_ULL(POWER_DOMAIN_AUDIO) | \
420 BIT_ULL(POWER_DOMAIN_VGA) | \
421 BIT_ULL(POWER_DOMAIN_GMBUS) | \
422 BIT_ULL(POWER_DOMAIN_INIT))
423 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
424 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
425 BIT_ULL(POWER_DOMAIN_MODESET) | \
426 BIT_ULL(POWER_DOMAIN_AUX_A) | \
427 BIT_ULL(POWER_DOMAIN_INIT))
428 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
429 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
430 BIT_ULL(POWER_DOMAIN_AUX_A) | \
431 BIT_ULL(POWER_DOMAIN_INIT))
432 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
433 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
434 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
435 BIT_ULL(POWER_DOMAIN_AUX_B) | \
436 BIT_ULL(POWER_DOMAIN_AUX_C) | \
437 BIT_ULL(POWER_DOMAIN_INIT))
438
439 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
440 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
441 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
442 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
443 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
444 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
445 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
446 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
447 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
448 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
449 BIT_ULL(POWER_DOMAIN_AUX_B) | \
450 BIT_ULL(POWER_DOMAIN_AUX_C) | \
451 BIT_ULL(POWER_DOMAIN_AUDIO) | \
452 BIT_ULL(POWER_DOMAIN_VGA) | \
453 BIT_ULL(POWER_DOMAIN_INIT))
454 #define GLK_DISPLAY_DDI_A_POWER_DOMAINS ( \
455 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
456 BIT_ULL(POWER_DOMAIN_INIT))
457 #define GLK_DISPLAY_DDI_B_POWER_DOMAINS ( \
458 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
459 BIT_ULL(POWER_DOMAIN_INIT))
460 #define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \
461 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
462 BIT_ULL(POWER_DOMAIN_INIT))
463 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
464 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
465 BIT_ULL(POWER_DOMAIN_AUX_A) | \
466 BIT_ULL(POWER_DOMAIN_INIT))
467 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
468 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
469 BIT_ULL(POWER_DOMAIN_AUX_B) | \
470 BIT_ULL(POWER_DOMAIN_INIT))
471 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
472 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
473 BIT_ULL(POWER_DOMAIN_AUX_C) | \
474 BIT_ULL(POWER_DOMAIN_INIT))
475 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
476 BIT_ULL(POWER_DOMAIN_AUX_A) | \
477 BIT_ULL(POWER_DOMAIN_INIT))
478 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
479 BIT_ULL(POWER_DOMAIN_AUX_B) | \
480 BIT_ULL(POWER_DOMAIN_INIT))
481 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
482 BIT_ULL(POWER_DOMAIN_AUX_C) | \
483 BIT_ULL(POWER_DOMAIN_INIT))
484 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
485 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
486 BIT_ULL(POWER_DOMAIN_MODESET) | \
487 BIT_ULL(POWER_DOMAIN_AUX_A) | \
488 BIT_ULL(POWER_DOMAIN_INIT))
489
490 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
491 {
492 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
493 "DC9 already programmed to be enabled.\n");
494 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
495 "DC5 still not disabled to enable DC9.\n");
496 WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
497 WARN_ONCE(intel_irqs_enabled(dev_priv),
498 "Interrupts not disabled yet.\n");
499
500 /*
501 * TODO: check for the following to verify the conditions to enter DC9
502 * state are satisfied:
503 * 1] Check relevant display engine registers to verify if mode set
504 * disable sequence was followed.
505 * 2] Check if display uninitialize sequence is initialized.
506 */
507 }
508
509 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
510 {
511 WARN_ONCE(intel_irqs_enabled(dev_priv),
512 "Interrupts not disabled yet.\n");
513 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
514 "DC5 still not disabled.\n");
515
516 /*
517 * TODO: check for the following to verify DC9 state was indeed
518 * entered before programming to disable it:
519 * 1] Check relevant display engine registers to verify if mode
520 * set disable sequence was followed.
521 * 2] Check if display uninitialize sequence is initialized.
522 */
523 }
524
525 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
526 u32 state)
527 {
528 int rewrites = 0;
529 int rereads = 0;
530 u32 v;
531
532 I915_WRITE(DC_STATE_EN, state);
533
534 /* It has been observed that disabling the dc6 state sometimes
535 * doesn't stick and dmc keeps returning old value. Make sure
536 * the write really sticks enough times and also force rewrite until
537 * we are confident that state is exactly what we want.
538 */
539 do {
540 v = I915_READ(DC_STATE_EN);
541
542 if (v != state) {
543 I915_WRITE(DC_STATE_EN, state);
544 rewrites++;
545 rereads = 0;
546 } else if (rereads++ > 5) {
547 break;
548 }
549
550 } while (rewrites < 100);
551
552 if (v != state)
553 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
554 state, v);
555
556 /* Most of the times we need one retry, avoid spam */
557 if (rewrites > 1)
558 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
559 state, rewrites);
560 }
561
562 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
563 {
564 u32 mask;
565
566 mask = DC_STATE_EN_UPTO_DC5;
567 if (IS_GEN9_LP(dev_priv))
568 mask |= DC_STATE_EN_DC9;
569 else
570 mask |= DC_STATE_EN_UPTO_DC6;
571
572 return mask;
573 }
574
575 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
576 {
577 u32 val;
578
579 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
580
581 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
582 dev_priv->csr.dc_state, val);
583 dev_priv->csr.dc_state = val;
584 }
585
586 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
587 {
588 uint32_t val;
589 uint32_t mask;
590
591 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
592 state &= dev_priv->csr.allowed_dc_mask;
593
594 val = I915_READ(DC_STATE_EN);
595 mask = gen9_dc_mask(dev_priv);
596 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
597 val & mask, state);
598
599 /* Check if DMC is ignoring our DC state requests */
600 if ((val & mask) != dev_priv->csr.dc_state)
601 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
602 dev_priv->csr.dc_state, val & mask);
603
604 val &= ~mask;
605 val |= state;
606
607 gen9_write_dc_state(dev_priv, val);
608
609 dev_priv->csr.dc_state = val & mask;
610 }
611
612 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
613 {
614 assert_can_enable_dc9(dev_priv);
615
616 DRM_DEBUG_KMS("Enabling DC9\n");
617
618 intel_power_sequencer_reset(dev_priv);
619 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
620 }
621
622 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
623 {
624 assert_can_disable_dc9(dev_priv);
625
626 DRM_DEBUG_KMS("Disabling DC9\n");
627
628 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
629
630 intel_pps_unlock_regs_wa(dev_priv);
631 }
632
633 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
634 {
635 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
636 "CSR program storage start is NULL\n");
637 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
638 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
639 }
640
641 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
642 {
643 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
644 SKL_DISP_PW_2);
645
646 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
647
648 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
649 "DC5 already programmed to be enabled.\n");
650 assert_rpm_wakelock_held(dev_priv);
651
652 assert_csr_loaded(dev_priv);
653 }
654
655 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
656 {
657 assert_can_enable_dc5(dev_priv);
658
659 DRM_DEBUG_KMS("Enabling DC5\n");
660
661 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
662 }
663
664 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
665 {
666 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
667 "Backlight is not disabled.\n");
668 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
669 "DC6 already programmed to be enabled.\n");
670
671 assert_csr_loaded(dev_priv);
672 }
673
674 void skl_enable_dc6(struct drm_i915_private *dev_priv)
675 {
676 assert_can_enable_dc6(dev_priv);
677
678 DRM_DEBUG_KMS("Enabling DC6\n");
679
680 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
681
682 }
683
684 void skl_disable_dc6(struct drm_i915_private *dev_priv)
685 {
686 DRM_DEBUG_KMS("Disabling DC6\n");
687
688 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
689 }
690
691 static void
692 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
693 struct i915_power_well *power_well)
694 {
695 enum skl_disp_power_wells power_well_id = power_well->id;
696 u32 val;
697 u32 mask;
698
699 mask = SKL_POWER_WELL_REQ(power_well_id);
700
701 val = I915_READ(HSW_PWR_WELL_KVMR);
702 if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
703 power_well->name))
704 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
705
706 val = I915_READ(HSW_PWR_WELL_BIOS);
707 val |= I915_READ(HSW_PWR_WELL_DEBUG);
708
709 if (!(val & mask))
710 return;
711
712 /*
713 * DMC is known to force on the request bits for power well 1 on SKL
714 * and BXT and the misc IO power well on SKL but we don't expect any
715 * other request bits to be set, so WARN for those.
716 */
717 if (power_well_id == SKL_DISP_PW_1 ||
718 (IS_GEN9_BC(dev_priv) &&
719 power_well_id == SKL_DISP_PW_MISC_IO))
720 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
721 "by DMC\n", power_well->name);
722 else
723 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
724 power_well->name);
725
726 I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
727 I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
728 }
729
730 static void skl_set_power_well(struct drm_i915_private *dev_priv,
731 struct i915_power_well *power_well, bool enable)
732 {
733 uint32_t tmp, fuse_status;
734 uint32_t req_mask, state_mask;
735 bool is_enabled, enable_requested, check_fuse_status = false;
736
737 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
738 fuse_status = I915_READ(SKL_FUSE_STATUS);
739
740 switch (power_well->id) {
741 case SKL_DISP_PW_1:
742 if (intel_wait_for_register(dev_priv,
743 SKL_FUSE_STATUS,
744 SKL_FUSE_PG0_DIST_STATUS,
745 SKL_FUSE_PG0_DIST_STATUS,
746 1)) {
747 DRM_ERROR("PG0 not enabled\n");
748 return;
749 }
750 break;
751 case SKL_DISP_PW_2:
752 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
753 DRM_ERROR("PG1 in disabled state\n");
754 return;
755 }
756 break;
757 case SKL_DISP_PW_MISC_IO:
758 case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */
759 case SKL_DISP_PW_DDI_B:
760 case SKL_DISP_PW_DDI_C:
761 case SKL_DISP_PW_DDI_D:
762 case GLK_DISP_PW_AUX_A:
763 case GLK_DISP_PW_AUX_B:
764 case GLK_DISP_PW_AUX_C:
765 break;
766 default:
767 WARN(1, "Unknown power well %lu\n", power_well->id);
768 return;
769 }
770
771 req_mask = SKL_POWER_WELL_REQ(power_well->id);
772 enable_requested = tmp & req_mask;
773 state_mask = SKL_POWER_WELL_STATE(power_well->id);
774 is_enabled = tmp & state_mask;
775
776 if (!enable && enable_requested)
777 skl_power_well_pre_disable(dev_priv, power_well);
778
779 if (enable) {
780 if (!enable_requested) {
781 WARN((tmp & state_mask) &&
782 !I915_READ(HSW_PWR_WELL_BIOS),
783 "Invalid for power well status to be enabled, unless done by the BIOS, \
784 when request is to disable!\n");
785 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
786 }
787
788 if (!is_enabled) {
789 DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
790 check_fuse_status = true;
791 }
792 } else {
793 if (enable_requested) {
794 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
795 POSTING_READ(HSW_PWR_WELL_DRIVER);
796 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
797 }
798
799 if (IS_GEN9(dev_priv))
800 gen9_sanitize_power_well_requests(dev_priv, power_well);
801 }
802
803 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
804 1))
805 DRM_ERROR("%s %s timeout\n",
806 power_well->name, enable ? "enable" : "disable");
807
808 if (check_fuse_status) {
809 if (power_well->id == SKL_DISP_PW_1) {
810 if (intel_wait_for_register(dev_priv,
811 SKL_FUSE_STATUS,
812 SKL_FUSE_PG1_DIST_STATUS,
813 SKL_FUSE_PG1_DIST_STATUS,
814 1))
815 DRM_ERROR("PG1 distributing status timeout\n");
816 } else if (power_well->id == SKL_DISP_PW_2) {
817 if (intel_wait_for_register(dev_priv,
818 SKL_FUSE_STATUS,
819 SKL_FUSE_PG2_DIST_STATUS,
820 SKL_FUSE_PG2_DIST_STATUS,
821 1))
822 DRM_ERROR("PG2 distributing status timeout\n");
823 }
824 }
825
826 if (enable && !is_enabled)
827 skl_power_well_post_enable(dev_priv, power_well);
828 }
829
830 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
831 struct i915_power_well *power_well)
832 {
833 /*
834 * We're taking over the BIOS, so clear any requests made by it since
835 * the driver is in charge now.
836 */
837 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
838 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
839 }
840
841 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
842 struct i915_power_well *power_well)
843 {
844 hsw_set_power_well(dev_priv, power_well, true);
845 }
846
847 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
848 struct i915_power_well *power_well)
849 {
850 hsw_set_power_well(dev_priv, power_well, false);
851 }
852
853 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
854 struct i915_power_well *power_well)
855 {
856 uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
857 SKL_POWER_WELL_STATE(power_well->id);
858
859 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
860 }
861
862 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
863 struct i915_power_well *power_well)
864 {
865 /* Clear any request made by BIOS as driver is taking over */
866 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
867 }
868
869 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
870 struct i915_power_well *power_well)
871 {
872 skl_set_power_well(dev_priv, power_well, true);
873 }
874
875 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
876 struct i915_power_well *power_well)
877 {
878 skl_set_power_well(dev_priv, power_well, false);
879 }
880
881 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
882 struct i915_power_well *power_well)
883 {
884 bxt_ddi_phy_init(dev_priv, power_well->data);
885 }
886
887 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
888 struct i915_power_well *power_well)
889 {
890 bxt_ddi_phy_uninit(dev_priv, power_well->data);
891 }
892
893 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
894 struct i915_power_well *power_well)
895 {
896 return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
897 }
898
899 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
900 {
901 struct i915_power_well *power_well;
902
903 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
904 if (power_well->count > 0)
905 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
906
907 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
908 if (power_well->count > 0)
909 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
910
911 if (IS_GEMINILAKE(dev_priv)) {
912 power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
913 if (power_well->count > 0)
914 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
915 }
916 }
917
918 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
919 struct i915_power_well *power_well)
920 {
921 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
922 }
923
924 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
925 {
926 u32 tmp = I915_READ(DBUF_CTL);
927
928 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
929 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
930 "Unexpected DBuf power power state (0x%08x)\n", tmp);
931 }
932
933 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
934 struct i915_power_well *power_well)
935 {
936 struct intel_cdclk_state cdclk_state = {};
937
938 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
939
940 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
941 WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
942
943 gen9_assert_dbuf_enabled(dev_priv);
944
945 if (IS_GEN9_LP(dev_priv))
946 bxt_verify_ddi_phy_power_wells(dev_priv);
947 }
948
949 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
950 struct i915_power_well *power_well)
951 {
952 if (!dev_priv->csr.dmc_payload)
953 return;
954
955 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
956 skl_enable_dc6(dev_priv);
957 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
958 gen9_enable_dc5(dev_priv);
959 }
960
961 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
962 struct i915_power_well *power_well)
963 {
964 }
965
966 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
967 struct i915_power_well *power_well)
968 {
969 }
970
971 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
972 struct i915_power_well *power_well)
973 {
974 return true;
975 }
976
977 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
978 struct i915_power_well *power_well, bool enable)
979 {
980 enum punit_power_well power_well_id = power_well->id;
981 u32 mask;
982 u32 state;
983 u32 ctrl;
984
985 mask = PUNIT_PWRGT_MASK(power_well_id);
986 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
987 PUNIT_PWRGT_PWR_GATE(power_well_id);
988
989 mutex_lock(&dev_priv->rps.hw_lock);
990
991 #define COND \
992 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
993
994 if (COND)
995 goto out;
996
997 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
998 ctrl &= ~mask;
999 ctrl |= state;
1000 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1001
1002 if (wait_for(COND, 100))
1003 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1004 state,
1005 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1006
1007 #undef COND
1008
1009 out:
1010 mutex_unlock(&dev_priv->rps.hw_lock);
1011 }
1012
1013 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1014 struct i915_power_well *power_well)
1015 {
1016 vlv_set_power_well(dev_priv, power_well, true);
1017 }
1018
1019 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1020 struct i915_power_well *power_well)
1021 {
1022 vlv_set_power_well(dev_priv, power_well, false);
1023 }
1024
1025 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1026 struct i915_power_well *power_well)
1027 {
1028 int power_well_id = power_well->id;
1029 bool enabled = false;
1030 u32 mask;
1031 u32 state;
1032 u32 ctrl;
1033
1034 mask = PUNIT_PWRGT_MASK(power_well_id);
1035 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1036
1037 mutex_lock(&dev_priv->rps.hw_lock);
1038
1039 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1040 /*
1041 * We only ever set the power-on and power-gate states, anything
1042 * else is unexpected.
1043 */
1044 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1045 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1046 if (state == ctrl)
1047 enabled = true;
1048
1049 /*
1050 * A transient state at this point would mean some unexpected party
1051 * is poking at the power controls too.
1052 */
1053 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1054 WARN_ON(ctrl != state);
1055
1056 mutex_unlock(&dev_priv->rps.hw_lock);
1057
1058 return enabled;
1059 }
1060
1061 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1062 {
1063 u32 val;
1064
1065 /*
1066 * On driver load, a pipe may be active and driving a DSI display.
1067 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1068 * (and never recovering) in this case. intel_dsi_post_disable() will
1069 * clear it when we turn off the display.
1070 */
1071 val = I915_READ(DSPCLK_GATE_D);
1072 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1073 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1074 I915_WRITE(DSPCLK_GATE_D, val);
1075
1076 /*
1077 * Disable trickle feed and enable pnd deadline calculation
1078 */
1079 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1080 I915_WRITE(CBR1_VLV, 0);
1081
1082 WARN_ON(dev_priv->rawclk_freq == 0);
1083
1084 I915_WRITE(RAWCLK_FREQ_VLV,
1085 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1086 }
1087
1088 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1089 {
1090 struct intel_encoder *encoder;
1091 enum pipe pipe;
1092
1093 /*
1094 * Enable the CRI clock source so we can get at the
1095 * display and the reference clock for VGA
1096 * hotplug / manual detection. Supposedly DSI also
1097 * needs the ref clock up and running.
1098 *
1099 * CHV DPLL B/C have some issues if VGA mode is enabled.
1100 */
1101 for_each_pipe(dev_priv, pipe) {
1102 u32 val = I915_READ(DPLL(pipe));
1103
1104 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1105 if (pipe != PIPE_A)
1106 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1107
1108 I915_WRITE(DPLL(pipe), val);
1109 }
1110
1111 vlv_init_display_clock_gating(dev_priv);
1112
1113 spin_lock_irq(&dev_priv->irq_lock);
1114 valleyview_enable_display_irqs(dev_priv);
1115 spin_unlock_irq(&dev_priv->irq_lock);
1116
1117 /*
1118 * During driver initialization/resume we can avoid restoring the
1119 * part of the HW/SW state that will be inited anyway explicitly.
1120 */
1121 if (dev_priv->power_domains.initializing)
1122 return;
1123
1124 intel_hpd_init(dev_priv);
1125
1126 /* Re-enable the ADPA, if we have one */
1127 for_each_intel_encoder(&dev_priv->drm, encoder) {
1128 if (encoder->type == INTEL_OUTPUT_ANALOG)
1129 intel_crt_reset(&encoder->base);
1130 }
1131
1132 i915_redisable_vga_power_on(dev_priv);
1133
1134 intel_pps_unlock_regs_wa(dev_priv);
1135 }
1136
1137 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1138 {
1139 spin_lock_irq(&dev_priv->irq_lock);
1140 valleyview_disable_display_irqs(dev_priv);
1141 spin_unlock_irq(&dev_priv->irq_lock);
1142
1143 /* make sure we're done processing display irqs */
1144 synchronize_irq(dev_priv->drm.irq);
1145
1146 intel_power_sequencer_reset(dev_priv);
1147
1148 /* Prevent us from re-enabling polling on accident in late suspend */
1149 if (!dev_priv->drm.dev->power.is_suspended)
1150 intel_hpd_poll_init(dev_priv);
1151 }
1152
1153 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1154 struct i915_power_well *power_well)
1155 {
1156 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1157
1158 vlv_set_power_well(dev_priv, power_well, true);
1159
1160 vlv_display_power_well_init(dev_priv);
1161 }
1162
1163 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1164 struct i915_power_well *power_well)
1165 {
1166 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1167
1168 vlv_display_power_well_deinit(dev_priv);
1169
1170 vlv_set_power_well(dev_priv, power_well, false);
1171 }
1172
1173 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1174 struct i915_power_well *power_well)
1175 {
1176 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1177
1178 /* since ref/cri clock was enabled */
1179 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1180
1181 vlv_set_power_well(dev_priv, power_well, true);
1182
1183 /*
1184 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1185 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1186 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1187 * b. The other bits such as sfr settings / modesel may all
1188 * be set to 0.
1189 *
1190 * This should only be done on init and resume from S3 with
1191 * both PLLs disabled, or we risk losing DPIO and PLL
1192 * synchronization.
1193 */
1194 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1195 }
1196
1197 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1198 struct i915_power_well *power_well)
1199 {
1200 enum pipe pipe;
1201
1202 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1203
1204 for_each_pipe(dev_priv, pipe)
1205 assert_pll_disabled(dev_priv, pipe);
1206
1207 /* Assert common reset */
1208 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1209
1210 vlv_set_power_well(dev_priv, power_well, false);
1211 }
1212
1213 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1214
1215 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1216 int power_well_id)
1217 {
1218 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1219 int i;
1220
1221 for (i = 0; i < power_domains->power_well_count; i++) {
1222 struct i915_power_well *power_well;
1223
1224 power_well = &power_domains->power_wells[i];
1225 if (power_well->id == power_well_id)
1226 return power_well;
1227 }
1228
1229 return NULL;
1230 }
1231
1232 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1233
1234 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1235 {
1236 struct i915_power_well *cmn_bc =
1237 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1238 struct i915_power_well *cmn_d =
1239 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1240 u32 phy_control = dev_priv->chv_phy_control;
1241 u32 phy_status = 0;
1242 u32 phy_status_mask = 0xffffffff;
1243
1244 /*
1245 * The BIOS can leave the PHY is some weird state
1246 * where it doesn't fully power down some parts.
1247 * Disable the asserts until the PHY has been fully
1248 * reset (ie. the power well has been disabled at
1249 * least once).
1250 */
1251 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1252 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1253 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1254 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1255 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1256 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1257 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1258
1259 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1260 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1261 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1262 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1263
1264 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1265 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1266
1267 /* this assumes override is only used to enable lanes */
1268 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1269 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1270
1271 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1272 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1273
1274 /* CL1 is on whenever anything is on in either channel */
1275 if (BITS_SET(phy_control,
1276 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1277 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1278 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1279
1280 /*
1281 * The DPLLB check accounts for the pipe B + port A usage
1282 * with CL2 powered up but all the lanes in the second channel
1283 * powered down.
1284 */
1285 if (BITS_SET(phy_control,
1286 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1287 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1288 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1289
1290 if (BITS_SET(phy_control,
1291 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1292 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1293 if (BITS_SET(phy_control,
1294 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1295 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1296
1297 if (BITS_SET(phy_control,
1298 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1299 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1300 if (BITS_SET(phy_control,
1301 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1302 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1303 }
1304
1305 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1306 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1307
1308 /* this assumes override is only used to enable lanes */
1309 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1310 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1311
1312 if (BITS_SET(phy_control,
1313 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1314 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1315
1316 if (BITS_SET(phy_control,
1317 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1318 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1319 if (BITS_SET(phy_control,
1320 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1321 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1322 }
1323
1324 phy_status &= phy_status_mask;
1325
1326 /*
1327 * The PHY may be busy with some initial calibration and whatnot,
1328 * so the power state can take a while to actually change.
1329 */
1330 if (intel_wait_for_register(dev_priv,
1331 DISPLAY_PHY_STATUS,
1332 phy_status_mask,
1333 phy_status,
1334 10))
1335 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1336 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1337 phy_status, dev_priv->chv_phy_control);
1338 }
1339
1340 #undef BITS_SET
1341
1342 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1343 struct i915_power_well *power_well)
1344 {
1345 enum dpio_phy phy;
1346 enum pipe pipe;
1347 uint32_t tmp;
1348
1349 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1350 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1351
1352 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1353 pipe = PIPE_A;
1354 phy = DPIO_PHY0;
1355 } else {
1356 pipe = PIPE_C;
1357 phy = DPIO_PHY1;
1358 }
1359
1360 /* since ref/cri clock was enabled */
1361 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1362 vlv_set_power_well(dev_priv, power_well, true);
1363
1364 /* Poll for phypwrgood signal */
1365 if (intel_wait_for_register(dev_priv,
1366 DISPLAY_PHY_STATUS,
1367 PHY_POWERGOOD(phy),
1368 PHY_POWERGOOD(phy),
1369 1))
1370 DRM_ERROR("Display PHY %d is not power up\n", phy);
1371
1372 mutex_lock(&dev_priv->sb_lock);
1373
1374 /* Enable dynamic power down */
1375 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1376 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1377 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1378 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1379
1380 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1381 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1382 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1383 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1384 } else {
1385 /*
1386 * Force the non-existing CL2 off. BXT does this
1387 * too, so maybe it saves some power even though
1388 * CL2 doesn't exist?
1389 */
1390 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1391 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1392 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1393 }
1394
1395 mutex_unlock(&dev_priv->sb_lock);
1396
1397 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1398 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1399
1400 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1401 phy, dev_priv->chv_phy_control);
1402
1403 assert_chv_phy_status(dev_priv);
1404 }
1405
1406 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1407 struct i915_power_well *power_well)
1408 {
1409 enum dpio_phy phy;
1410
1411 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1412 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1413
1414 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1415 phy = DPIO_PHY0;
1416 assert_pll_disabled(dev_priv, PIPE_A);
1417 assert_pll_disabled(dev_priv, PIPE_B);
1418 } else {
1419 phy = DPIO_PHY1;
1420 assert_pll_disabled(dev_priv, PIPE_C);
1421 }
1422
1423 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1424 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1425
1426 vlv_set_power_well(dev_priv, power_well, false);
1427
1428 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1429 phy, dev_priv->chv_phy_control);
1430
1431 /* PHY is fully reset now, so we can enable the PHY state asserts */
1432 dev_priv->chv_phy_assert[phy] = true;
1433
1434 assert_chv_phy_status(dev_priv);
1435 }
1436
1437 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1438 enum dpio_channel ch, bool override, unsigned int mask)
1439 {
1440 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1441 u32 reg, val, expected, actual;
1442
1443 /*
1444 * The BIOS can leave the PHY is some weird state
1445 * where it doesn't fully power down some parts.
1446 * Disable the asserts until the PHY has been fully
1447 * reset (ie. the power well has been disabled at
1448 * least once).
1449 */
1450 if (!dev_priv->chv_phy_assert[phy])
1451 return;
1452
1453 if (ch == DPIO_CH0)
1454 reg = _CHV_CMN_DW0_CH0;
1455 else
1456 reg = _CHV_CMN_DW6_CH1;
1457
1458 mutex_lock(&dev_priv->sb_lock);
1459 val = vlv_dpio_read(dev_priv, pipe, reg);
1460 mutex_unlock(&dev_priv->sb_lock);
1461
1462 /*
1463 * This assumes !override is only used when the port is disabled.
1464 * All lanes should power down even without the override when
1465 * the port is disabled.
1466 */
1467 if (!override || mask == 0xf) {
1468 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1469 /*
1470 * If CH1 common lane is not active anymore
1471 * (eg. for pipe B DPLL) the entire channel will
1472 * shut down, which causes the common lane registers
1473 * to read as 0. That means we can't actually check
1474 * the lane power down status bits, but as the entire
1475 * register reads as 0 it's a good indication that the
1476 * channel is indeed entirely powered down.
1477 */
1478 if (ch == DPIO_CH1 && val == 0)
1479 expected = 0;
1480 } else if (mask != 0x0) {
1481 expected = DPIO_ANYDL_POWERDOWN;
1482 } else {
1483 expected = 0;
1484 }
1485
1486 if (ch == DPIO_CH0)
1487 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1488 else
1489 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1490 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1491
1492 WARN(actual != expected,
1493 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1494 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1495 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1496 reg, val);
1497 }
1498
1499 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1500 enum dpio_channel ch, bool override)
1501 {
1502 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1503 bool was_override;
1504
1505 mutex_lock(&power_domains->lock);
1506
1507 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1508
1509 if (override == was_override)
1510 goto out;
1511
1512 if (override)
1513 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1514 else
1515 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1516
1517 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1518
1519 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1520 phy, ch, dev_priv->chv_phy_control);
1521
1522 assert_chv_phy_status(dev_priv);
1523
1524 out:
1525 mutex_unlock(&power_domains->lock);
1526
1527 return was_override;
1528 }
1529
1530 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1531 bool override, unsigned int mask)
1532 {
1533 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1534 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1535 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1536 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1537
1538 mutex_lock(&power_domains->lock);
1539
1540 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1541 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1542
1543 if (override)
1544 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1545 else
1546 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1547
1548 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1549
1550 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1551 phy, ch, mask, dev_priv->chv_phy_control);
1552
1553 assert_chv_phy_status(dev_priv);
1554
1555 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1556
1557 mutex_unlock(&power_domains->lock);
1558 }
1559
1560 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1561 struct i915_power_well *power_well)
1562 {
1563 enum pipe pipe = power_well->id;
1564 bool enabled;
1565 u32 state, ctrl;
1566
1567 mutex_lock(&dev_priv->rps.hw_lock);
1568
1569 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1570 /*
1571 * We only ever set the power-on and power-gate states, anything
1572 * else is unexpected.
1573 */
1574 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1575 enabled = state == DP_SSS_PWR_ON(pipe);
1576
1577 /*
1578 * A transient state at this point would mean some unexpected party
1579 * is poking at the power controls too.
1580 */
1581 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1582 WARN_ON(ctrl << 16 != state);
1583
1584 mutex_unlock(&dev_priv->rps.hw_lock);
1585
1586 return enabled;
1587 }
1588
1589 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1590 struct i915_power_well *power_well,
1591 bool enable)
1592 {
1593 enum pipe pipe = power_well->id;
1594 u32 state;
1595 u32 ctrl;
1596
1597 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1598
1599 mutex_lock(&dev_priv->rps.hw_lock);
1600
1601 #define COND \
1602 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1603
1604 if (COND)
1605 goto out;
1606
1607 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1608 ctrl &= ~DP_SSC_MASK(pipe);
1609 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1610 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1611
1612 if (wait_for(COND, 100))
1613 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1614 state,
1615 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1616
1617 #undef COND
1618
1619 out:
1620 mutex_unlock(&dev_priv->rps.hw_lock);
1621 }
1622
1623 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1624 struct i915_power_well *power_well)
1625 {
1626 WARN_ON_ONCE(power_well->id != PIPE_A);
1627
1628 chv_set_pipe_power_well(dev_priv, power_well, true);
1629
1630 vlv_display_power_well_init(dev_priv);
1631 }
1632
1633 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1634 struct i915_power_well *power_well)
1635 {
1636 WARN_ON_ONCE(power_well->id != PIPE_A);
1637
1638 vlv_display_power_well_deinit(dev_priv);
1639
1640 chv_set_pipe_power_well(dev_priv, power_well, false);
1641 }
1642
1643 static void
1644 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1645 enum intel_display_power_domain domain)
1646 {
1647 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1648 struct i915_power_well *power_well;
1649
1650 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1651 intel_power_well_get(dev_priv, power_well);
1652
1653 power_domains->domain_use_count[domain]++;
1654 }
1655
1656 /**
1657 * intel_display_power_get - grab a power domain reference
1658 * @dev_priv: i915 device instance
1659 * @domain: power domain to reference
1660 *
1661 * This function grabs a power domain reference for @domain and ensures that the
1662 * power domain and all its parents are powered up. Therefore users should only
1663 * grab a reference to the innermost power domain they need.
1664 *
1665 * Any power domain reference obtained by this function must have a symmetric
1666 * call to intel_display_power_put() to release the reference again.
1667 */
1668 void intel_display_power_get(struct drm_i915_private *dev_priv,
1669 enum intel_display_power_domain domain)
1670 {
1671 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1672
1673 intel_runtime_pm_get(dev_priv);
1674
1675 mutex_lock(&power_domains->lock);
1676
1677 __intel_display_power_get_domain(dev_priv, domain);
1678
1679 mutex_unlock(&power_domains->lock);
1680 }
1681
1682 /**
1683 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1684 * @dev_priv: i915 device instance
1685 * @domain: power domain to reference
1686 *
1687 * This function grabs a power domain reference for @domain and ensures that the
1688 * power domain and all its parents are powered up. Therefore users should only
1689 * grab a reference to the innermost power domain they need.
1690 *
1691 * Any power domain reference obtained by this function must have a symmetric
1692 * call to intel_display_power_put() to release the reference again.
1693 */
1694 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1695 enum intel_display_power_domain domain)
1696 {
1697 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1698 bool is_enabled;
1699
1700 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1701 return false;
1702
1703 mutex_lock(&power_domains->lock);
1704
1705 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1706 __intel_display_power_get_domain(dev_priv, domain);
1707 is_enabled = true;
1708 } else {
1709 is_enabled = false;
1710 }
1711
1712 mutex_unlock(&power_domains->lock);
1713
1714 if (!is_enabled)
1715 intel_runtime_pm_put(dev_priv);
1716
1717 return is_enabled;
1718 }
1719
1720 /**
1721 * intel_display_power_put - release a power domain reference
1722 * @dev_priv: i915 device instance
1723 * @domain: power domain to reference
1724 *
1725 * This function drops the power domain reference obtained by
1726 * intel_display_power_get() and might power down the corresponding hardware
1727 * block right away if this is the last reference.
1728 */
1729 void intel_display_power_put(struct drm_i915_private *dev_priv,
1730 enum intel_display_power_domain domain)
1731 {
1732 struct i915_power_domains *power_domains;
1733 struct i915_power_well *power_well;
1734
1735 power_domains = &dev_priv->power_domains;
1736
1737 mutex_lock(&power_domains->lock);
1738
1739 WARN(!power_domains->domain_use_count[domain],
1740 "Use count on domain %s is already zero\n",
1741 intel_display_power_domain_str(domain));
1742 power_domains->domain_use_count[domain]--;
1743
1744 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1745 intel_power_well_put(dev_priv, power_well);
1746
1747 mutex_unlock(&power_domains->lock);
1748
1749 intel_runtime_pm_put(dev_priv);
1750 }
1751
1752 #define HSW_DISPLAY_POWER_DOMAINS ( \
1753 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1754 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1755 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1756 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1757 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1758 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1759 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1760 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1761 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1762 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1763 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1764 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1765 BIT_ULL(POWER_DOMAIN_VGA) | \
1766 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1767 BIT_ULL(POWER_DOMAIN_INIT))
1768
1769 #define BDW_DISPLAY_POWER_DOMAINS ( \
1770 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1771 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1772 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1773 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1774 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1775 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1776 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1777 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1778 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1779 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1780 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1781 BIT_ULL(POWER_DOMAIN_VGA) | \
1782 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1783 BIT_ULL(POWER_DOMAIN_INIT))
1784
1785 #define VLV_DISPLAY_POWER_DOMAINS ( \
1786 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1787 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1788 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1789 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1790 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1791 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1792 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1793 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1794 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1795 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1796 BIT_ULL(POWER_DOMAIN_VGA) | \
1797 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1798 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1799 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1800 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1801 BIT_ULL(POWER_DOMAIN_INIT))
1802
1803 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
1804 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1805 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1806 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1807 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1808 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1809 BIT_ULL(POWER_DOMAIN_INIT))
1810
1811 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
1812 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1813 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1814 BIT_ULL(POWER_DOMAIN_INIT))
1815
1816 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
1817 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1818 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1819 BIT_ULL(POWER_DOMAIN_INIT))
1820
1821 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
1822 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1823 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1824 BIT_ULL(POWER_DOMAIN_INIT))
1825
1826 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
1827 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1828 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1829 BIT_ULL(POWER_DOMAIN_INIT))
1830
1831 #define CHV_DISPLAY_POWER_DOMAINS ( \
1832 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1833 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1834 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1835 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1836 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1837 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1838 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1839 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1840 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1841 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1842 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1843 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1844 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1845 BIT_ULL(POWER_DOMAIN_VGA) | \
1846 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1847 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1848 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1849 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1850 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1851 BIT_ULL(POWER_DOMAIN_INIT))
1852
1853 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
1854 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1855 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1856 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1857 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1858 BIT_ULL(POWER_DOMAIN_INIT))
1859
1860 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
1861 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1862 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1863 BIT_ULL(POWER_DOMAIN_INIT))
1864
1865 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1866 .sync_hw = i9xx_power_well_sync_hw_noop,
1867 .enable = i9xx_always_on_power_well_noop,
1868 .disable = i9xx_always_on_power_well_noop,
1869 .is_enabled = i9xx_always_on_power_well_enabled,
1870 };
1871
1872 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1873 .sync_hw = i9xx_power_well_sync_hw_noop,
1874 .enable = chv_pipe_power_well_enable,
1875 .disable = chv_pipe_power_well_disable,
1876 .is_enabled = chv_pipe_power_well_enabled,
1877 };
1878
1879 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1880 .sync_hw = i9xx_power_well_sync_hw_noop,
1881 .enable = chv_dpio_cmn_power_well_enable,
1882 .disable = chv_dpio_cmn_power_well_disable,
1883 .is_enabled = vlv_power_well_enabled,
1884 };
1885
1886 static struct i915_power_well i9xx_always_on_power_well[] = {
1887 {
1888 .name = "always-on",
1889 .always_on = 1,
1890 .domains = POWER_DOMAIN_MASK,
1891 .ops = &i9xx_always_on_power_well_ops,
1892 },
1893 };
1894
1895 static const struct i915_power_well_ops hsw_power_well_ops = {
1896 .sync_hw = hsw_power_well_sync_hw,
1897 .enable = hsw_power_well_enable,
1898 .disable = hsw_power_well_disable,
1899 .is_enabled = hsw_power_well_enabled,
1900 };
1901
1902 static const struct i915_power_well_ops skl_power_well_ops = {
1903 .sync_hw = skl_power_well_sync_hw,
1904 .enable = skl_power_well_enable,
1905 .disable = skl_power_well_disable,
1906 .is_enabled = skl_power_well_enabled,
1907 };
1908
1909 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1910 .sync_hw = i9xx_power_well_sync_hw_noop,
1911 .enable = gen9_dc_off_power_well_enable,
1912 .disable = gen9_dc_off_power_well_disable,
1913 .is_enabled = gen9_dc_off_power_well_enabled,
1914 };
1915
1916 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1917 .sync_hw = i9xx_power_well_sync_hw_noop,
1918 .enable = bxt_dpio_cmn_power_well_enable,
1919 .disable = bxt_dpio_cmn_power_well_disable,
1920 .is_enabled = bxt_dpio_cmn_power_well_enabled,
1921 };
1922
1923 static struct i915_power_well hsw_power_wells[] = {
1924 {
1925 .name = "always-on",
1926 .always_on = 1,
1927 .domains = POWER_DOMAIN_MASK,
1928 .ops = &i9xx_always_on_power_well_ops,
1929 },
1930 {
1931 .name = "display",
1932 .domains = HSW_DISPLAY_POWER_DOMAINS,
1933 .ops = &hsw_power_well_ops,
1934 },
1935 };
1936
1937 static struct i915_power_well bdw_power_wells[] = {
1938 {
1939 .name = "always-on",
1940 .always_on = 1,
1941 .domains = POWER_DOMAIN_MASK,
1942 .ops = &i9xx_always_on_power_well_ops,
1943 },
1944 {
1945 .name = "display",
1946 .domains = BDW_DISPLAY_POWER_DOMAINS,
1947 .ops = &hsw_power_well_ops,
1948 },
1949 };
1950
1951 static const struct i915_power_well_ops vlv_display_power_well_ops = {
1952 .sync_hw = i9xx_power_well_sync_hw_noop,
1953 .enable = vlv_display_power_well_enable,
1954 .disable = vlv_display_power_well_disable,
1955 .is_enabled = vlv_power_well_enabled,
1956 };
1957
1958 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1959 .sync_hw = i9xx_power_well_sync_hw_noop,
1960 .enable = vlv_dpio_cmn_power_well_enable,
1961 .disable = vlv_dpio_cmn_power_well_disable,
1962 .is_enabled = vlv_power_well_enabled,
1963 };
1964
1965 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1966 .sync_hw = i9xx_power_well_sync_hw_noop,
1967 .enable = vlv_power_well_enable,
1968 .disable = vlv_power_well_disable,
1969 .is_enabled = vlv_power_well_enabled,
1970 };
1971
1972 static struct i915_power_well vlv_power_wells[] = {
1973 {
1974 .name = "always-on",
1975 .always_on = 1,
1976 .domains = POWER_DOMAIN_MASK,
1977 .ops = &i9xx_always_on_power_well_ops,
1978 .id = PUNIT_POWER_WELL_ALWAYS_ON,
1979 },
1980 {
1981 .name = "display",
1982 .domains = VLV_DISPLAY_POWER_DOMAINS,
1983 .id = PUNIT_POWER_WELL_DISP2D,
1984 .ops = &vlv_display_power_well_ops,
1985 },
1986 {
1987 .name = "dpio-tx-b-01",
1988 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1989 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1990 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1991 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1992 .ops = &vlv_dpio_power_well_ops,
1993 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1994 },
1995 {
1996 .name = "dpio-tx-b-23",
1997 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1998 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1999 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2000 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2001 .ops = &vlv_dpio_power_well_ops,
2002 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2003 },
2004 {
2005 .name = "dpio-tx-c-01",
2006 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2007 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2008 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2009 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2010 .ops = &vlv_dpio_power_well_ops,
2011 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2012 },
2013 {
2014 .name = "dpio-tx-c-23",
2015 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2016 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2017 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2018 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2019 .ops = &vlv_dpio_power_well_ops,
2020 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2021 },
2022 {
2023 .name = "dpio-common",
2024 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2025 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2026 .ops = &vlv_dpio_cmn_power_well_ops,
2027 },
2028 };
2029
2030 static struct i915_power_well chv_power_wells[] = {
2031 {
2032 .name = "always-on",
2033 .always_on = 1,
2034 .domains = POWER_DOMAIN_MASK,
2035 .ops = &i9xx_always_on_power_well_ops,
2036 },
2037 {
2038 .name = "display",
2039 /*
2040 * Pipe A power well is the new disp2d well. Pipe B and C
2041 * power wells don't actually exist. Pipe A power well is
2042 * required for any pipe to work.
2043 */
2044 .domains = CHV_DISPLAY_POWER_DOMAINS,
2045 .id = PIPE_A,
2046 .ops = &chv_pipe_power_well_ops,
2047 },
2048 {
2049 .name = "dpio-common-bc",
2050 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2051 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2052 .ops = &chv_dpio_cmn_power_well_ops,
2053 },
2054 {
2055 .name = "dpio-common-d",
2056 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2057 .id = PUNIT_POWER_WELL_DPIO_CMN_D,
2058 .ops = &chv_dpio_cmn_power_well_ops,
2059 },
2060 };
2061
2062 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2063 int power_well_id)
2064 {
2065 struct i915_power_well *power_well;
2066 bool ret;
2067
2068 power_well = lookup_power_well(dev_priv, power_well_id);
2069 ret = power_well->ops->is_enabled(dev_priv, power_well);
2070
2071 return ret;
2072 }
2073
2074 static struct i915_power_well skl_power_wells[] = {
2075 {
2076 .name = "always-on",
2077 .always_on = 1,
2078 .domains = POWER_DOMAIN_MASK,
2079 .ops = &i9xx_always_on_power_well_ops,
2080 .id = SKL_DISP_PW_ALWAYS_ON,
2081 },
2082 {
2083 .name = "power well 1",
2084 /* Handled by the DMC firmware */
2085 .domains = 0,
2086 .ops = &skl_power_well_ops,
2087 .id = SKL_DISP_PW_1,
2088 },
2089 {
2090 .name = "MISC IO power well",
2091 /* Handled by the DMC firmware */
2092 .domains = 0,
2093 .ops = &skl_power_well_ops,
2094 .id = SKL_DISP_PW_MISC_IO,
2095 },
2096 {
2097 .name = "DC off",
2098 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2099 .ops = &gen9_dc_off_power_well_ops,
2100 .id = SKL_DISP_PW_DC_OFF,
2101 },
2102 {
2103 .name = "power well 2",
2104 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2105 .ops = &skl_power_well_ops,
2106 .id = SKL_DISP_PW_2,
2107 },
2108 {
2109 .name = "DDI A/E power well",
2110 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
2111 .ops = &skl_power_well_ops,
2112 .id = SKL_DISP_PW_DDI_A_E,
2113 },
2114 {
2115 .name = "DDI B power well",
2116 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
2117 .ops = &skl_power_well_ops,
2118 .id = SKL_DISP_PW_DDI_B,
2119 },
2120 {
2121 .name = "DDI C power well",
2122 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
2123 .ops = &skl_power_well_ops,
2124 .id = SKL_DISP_PW_DDI_C,
2125 },
2126 {
2127 .name = "DDI D power well",
2128 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
2129 .ops = &skl_power_well_ops,
2130 .id = SKL_DISP_PW_DDI_D,
2131 },
2132 };
2133
2134 static struct i915_power_well bxt_power_wells[] = {
2135 {
2136 .name = "always-on",
2137 .always_on = 1,
2138 .domains = POWER_DOMAIN_MASK,
2139 .ops = &i9xx_always_on_power_well_ops,
2140 },
2141 {
2142 .name = "power well 1",
2143 .domains = 0,
2144 .ops = &skl_power_well_ops,
2145 .id = SKL_DISP_PW_1,
2146 },
2147 {
2148 .name = "DC off",
2149 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2150 .ops = &gen9_dc_off_power_well_ops,
2151 .id = SKL_DISP_PW_DC_OFF,
2152 },
2153 {
2154 .name = "power well 2",
2155 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2156 .ops = &skl_power_well_ops,
2157 .id = SKL_DISP_PW_2,
2158 },
2159 {
2160 .name = "dpio-common-a",
2161 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2162 .ops = &bxt_dpio_cmn_power_well_ops,
2163 .id = BXT_DPIO_CMN_A,
2164 .data = DPIO_PHY1,
2165 },
2166 {
2167 .name = "dpio-common-bc",
2168 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2169 .ops = &bxt_dpio_cmn_power_well_ops,
2170 .id = BXT_DPIO_CMN_BC,
2171 .data = DPIO_PHY0,
2172 },
2173 };
2174
2175 static struct i915_power_well glk_power_wells[] = {
2176 {
2177 .name = "always-on",
2178 .always_on = 1,
2179 .domains = POWER_DOMAIN_MASK,
2180 .ops = &i9xx_always_on_power_well_ops,
2181 },
2182 {
2183 .name = "power well 1",
2184 /* Handled by the DMC firmware */
2185 .domains = 0,
2186 .ops = &skl_power_well_ops,
2187 .id = SKL_DISP_PW_1,
2188 },
2189 {
2190 .name = "DC off",
2191 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2192 .ops = &gen9_dc_off_power_well_ops,
2193 .id = SKL_DISP_PW_DC_OFF,
2194 },
2195 {
2196 .name = "power well 2",
2197 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2198 .ops = &skl_power_well_ops,
2199 .id = SKL_DISP_PW_2,
2200 },
2201 {
2202 .name = "dpio-common-a",
2203 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2204 .ops = &bxt_dpio_cmn_power_well_ops,
2205 .id = BXT_DPIO_CMN_A,
2206 .data = DPIO_PHY1,
2207 },
2208 {
2209 .name = "dpio-common-b",
2210 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2211 .ops = &bxt_dpio_cmn_power_well_ops,
2212 .id = BXT_DPIO_CMN_BC,
2213 .data = DPIO_PHY0,
2214 },
2215 {
2216 .name = "dpio-common-c",
2217 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2218 .ops = &bxt_dpio_cmn_power_well_ops,
2219 .id = GLK_DPIO_CMN_C,
2220 .data = DPIO_PHY2,
2221 },
2222 {
2223 .name = "AUX A",
2224 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2225 .ops = &skl_power_well_ops,
2226 .id = GLK_DISP_PW_AUX_A,
2227 },
2228 {
2229 .name = "AUX B",
2230 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2231 .ops = &skl_power_well_ops,
2232 .id = GLK_DISP_PW_AUX_B,
2233 },
2234 {
2235 .name = "AUX C",
2236 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2237 .ops = &skl_power_well_ops,
2238 .id = GLK_DISP_PW_AUX_C,
2239 },
2240 {
2241 .name = "DDI A power well",
2242 .domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS,
2243 .ops = &skl_power_well_ops,
2244 .id = GLK_DISP_PW_DDI_A,
2245 },
2246 {
2247 .name = "DDI B power well",
2248 .domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS,
2249 .ops = &skl_power_well_ops,
2250 .id = SKL_DISP_PW_DDI_B,
2251 },
2252 {
2253 .name = "DDI C power well",
2254 .domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS,
2255 .ops = &skl_power_well_ops,
2256 .id = SKL_DISP_PW_DDI_C,
2257 },
2258 };
2259
2260 static int
2261 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2262 int disable_power_well)
2263 {
2264 if (disable_power_well >= 0)
2265 return !!disable_power_well;
2266
2267 return 1;
2268 }
2269
2270 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2271 int enable_dc)
2272 {
2273 uint32_t mask;
2274 int requested_dc;
2275 int max_dc;
2276
2277 if (IS_GEN9_BC(dev_priv)) {
2278 max_dc = 2;
2279 mask = 0;
2280 } else if (IS_GEN9_LP(dev_priv)) {
2281 max_dc = 1;
2282 /*
2283 * DC9 has a separate HW flow from the rest of the DC states,
2284 * not depending on the DMC firmware. It's needed by system
2285 * suspend/resume, so allow it unconditionally.
2286 */
2287 mask = DC_STATE_EN_DC9;
2288 } else {
2289 max_dc = 0;
2290 mask = 0;
2291 }
2292
2293 if (!i915.disable_power_well)
2294 max_dc = 0;
2295
2296 if (enable_dc >= 0 && enable_dc <= max_dc) {
2297 requested_dc = enable_dc;
2298 } else if (enable_dc == -1) {
2299 requested_dc = max_dc;
2300 } else if (enable_dc > max_dc && enable_dc <= 2) {
2301 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2302 enable_dc, max_dc);
2303 requested_dc = max_dc;
2304 } else {
2305 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2306 requested_dc = max_dc;
2307 }
2308
2309 if (requested_dc > 1)
2310 mask |= DC_STATE_EN_UPTO_DC6;
2311 if (requested_dc > 0)
2312 mask |= DC_STATE_EN_UPTO_DC5;
2313
2314 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2315
2316 return mask;
2317 }
2318
2319 #define set_power_wells(power_domains, __power_wells) ({ \
2320 (power_domains)->power_wells = (__power_wells); \
2321 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
2322 })
2323
2324 /**
2325 * intel_power_domains_init - initializes the power domain structures
2326 * @dev_priv: i915 device instance
2327 *
2328 * Initializes the power domain structures for @dev_priv depending upon the
2329 * supported platform.
2330 */
2331 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2332 {
2333 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2334
2335 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2336 i915.disable_power_well);
2337 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2338 i915.enable_dc);
2339
2340 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2341
2342 mutex_init(&power_domains->lock);
2343
2344 /*
2345 * The enabling order will be from lower to higher indexed wells,
2346 * the disabling order is reversed.
2347 */
2348 if (IS_HASWELL(dev_priv)) {
2349 set_power_wells(power_domains, hsw_power_wells);
2350 } else if (IS_BROADWELL(dev_priv)) {
2351 set_power_wells(power_domains, bdw_power_wells);
2352 } else if (IS_GEN9_BC(dev_priv)) {
2353 set_power_wells(power_domains, skl_power_wells);
2354 } else if (IS_BROXTON(dev_priv)) {
2355 set_power_wells(power_domains, bxt_power_wells);
2356 } else if (IS_GEMINILAKE(dev_priv)) {
2357 set_power_wells(power_domains, glk_power_wells);
2358 } else if (IS_CHERRYVIEW(dev_priv)) {
2359 set_power_wells(power_domains, chv_power_wells);
2360 } else if (IS_VALLEYVIEW(dev_priv)) {
2361 set_power_wells(power_domains, vlv_power_wells);
2362 } else {
2363 set_power_wells(power_domains, i9xx_always_on_power_well);
2364 }
2365
2366 return 0;
2367 }
2368
2369 /**
2370 * intel_power_domains_fini - finalizes the power domain structures
2371 * @dev_priv: i915 device instance
2372 *
2373 * Finalizes the power domain structures for @dev_priv depending upon the
2374 * supported platform. This function also disables runtime pm and ensures that
2375 * the device stays powered up so that the driver can be reloaded.
2376 */
2377 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2378 {
2379 struct device *kdev = &dev_priv->drm.pdev->dev;
2380
2381 /*
2382 * The i915.ko module is still not prepared to be loaded when
2383 * the power well is not enabled, so just enable it in case
2384 * we're going to unload/reload.
2385 * The following also reacquires the RPM reference the core passed
2386 * to the driver during loading, which is dropped in
2387 * intel_runtime_pm_enable(). We have to hand back the control of the
2388 * device to the core with this reference held.
2389 */
2390 intel_display_set_init_power(dev_priv, true);
2391
2392 /* Remove the refcount we took to keep power well support disabled. */
2393 if (!i915.disable_power_well)
2394 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2395
2396 /*
2397 * Remove the refcount we took in intel_runtime_pm_enable() in case
2398 * the platform doesn't support runtime PM.
2399 */
2400 if (!HAS_RUNTIME_PM(dev_priv))
2401 pm_runtime_put(kdev);
2402 }
2403
2404 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2405 {
2406 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2407 struct i915_power_well *power_well;
2408
2409 mutex_lock(&power_domains->lock);
2410 for_each_power_well(dev_priv, power_well) {
2411 power_well->ops->sync_hw(dev_priv, power_well);
2412 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2413 power_well);
2414 }
2415 mutex_unlock(&power_domains->lock);
2416 }
2417
2418 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2419 {
2420 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2421 POSTING_READ(DBUF_CTL);
2422
2423 udelay(10);
2424
2425 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2426 DRM_ERROR("DBuf power enable timeout\n");
2427 }
2428
2429 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2430 {
2431 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2432 POSTING_READ(DBUF_CTL);
2433
2434 udelay(10);
2435
2436 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2437 DRM_ERROR("DBuf power disable timeout!\n");
2438 }
2439
2440 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2441 bool resume)
2442 {
2443 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2444 struct i915_power_well *well;
2445 uint32_t val;
2446
2447 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2448
2449 /* enable PCH reset handshake */
2450 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2451 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2452
2453 /* enable PG1 and Misc I/O */
2454 mutex_lock(&power_domains->lock);
2455
2456 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2457 intel_power_well_enable(dev_priv, well);
2458
2459 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2460 intel_power_well_enable(dev_priv, well);
2461
2462 mutex_unlock(&power_domains->lock);
2463
2464 skl_init_cdclk(dev_priv);
2465
2466 gen9_dbuf_enable(dev_priv);
2467
2468 if (resume && dev_priv->csr.dmc_payload)
2469 intel_csr_load_program(dev_priv);
2470 }
2471
2472 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2473 {
2474 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2475 struct i915_power_well *well;
2476
2477 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2478
2479 gen9_dbuf_disable(dev_priv);
2480
2481 skl_uninit_cdclk(dev_priv);
2482
2483 /* The spec doesn't call for removing the reset handshake flag */
2484 /* disable PG1 and Misc I/O */
2485
2486 mutex_lock(&power_domains->lock);
2487
2488 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2489 intel_power_well_disable(dev_priv, well);
2490
2491 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2492 intel_power_well_disable(dev_priv, well);
2493
2494 mutex_unlock(&power_domains->lock);
2495 }
2496
2497 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2498 bool resume)
2499 {
2500 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2501 struct i915_power_well *well;
2502 uint32_t val;
2503
2504 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2505
2506 /*
2507 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2508 * or else the reset will hang because there is no PCH to respond.
2509 * Move the handshake programming to initialization sequence.
2510 * Previously was left up to BIOS.
2511 */
2512 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2513 val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2514 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2515
2516 /* Enable PG1 */
2517 mutex_lock(&power_domains->lock);
2518
2519 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2520 intel_power_well_enable(dev_priv, well);
2521
2522 mutex_unlock(&power_domains->lock);
2523
2524 bxt_init_cdclk(dev_priv);
2525
2526 gen9_dbuf_enable(dev_priv);
2527
2528 if (resume && dev_priv->csr.dmc_payload)
2529 intel_csr_load_program(dev_priv);
2530 }
2531
2532 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2533 {
2534 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2535 struct i915_power_well *well;
2536
2537 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2538
2539 gen9_dbuf_disable(dev_priv);
2540
2541 bxt_uninit_cdclk(dev_priv);
2542
2543 /* The spec doesn't call for removing the reset handshake flag */
2544
2545 /* Disable PG1 */
2546 mutex_lock(&power_domains->lock);
2547
2548 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2549 intel_power_well_disable(dev_priv, well);
2550
2551 mutex_unlock(&power_domains->lock);
2552 }
2553
2554 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2555 {
2556 struct i915_power_well *cmn_bc =
2557 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2558 struct i915_power_well *cmn_d =
2559 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2560
2561 /*
2562 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2563 * workaround never ever read DISPLAY_PHY_CONTROL, and
2564 * instead maintain a shadow copy ourselves. Use the actual
2565 * power well state and lane status to reconstruct the
2566 * expected initial value.
2567 */
2568 dev_priv->chv_phy_control =
2569 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2570 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2571 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2572 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2573 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2574
2575 /*
2576 * If all lanes are disabled we leave the override disabled
2577 * with all power down bits cleared to match the state we
2578 * would use after disabling the port. Otherwise enable the
2579 * override and set the lane powerdown bits accding to the
2580 * current lane status.
2581 */
2582 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2583 uint32_t status = I915_READ(DPLL(PIPE_A));
2584 unsigned int mask;
2585
2586 mask = status & DPLL_PORTB_READY_MASK;
2587 if (mask == 0xf)
2588 mask = 0x0;
2589 else
2590 dev_priv->chv_phy_control |=
2591 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2592
2593 dev_priv->chv_phy_control |=
2594 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2595
2596 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2597 if (mask == 0xf)
2598 mask = 0x0;
2599 else
2600 dev_priv->chv_phy_control |=
2601 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2602
2603 dev_priv->chv_phy_control |=
2604 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2605
2606 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2607
2608 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2609 } else {
2610 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2611 }
2612
2613 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2614 uint32_t status = I915_READ(DPIO_PHY_STATUS);
2615 unsigned int mask;
2616
2617 mask = status & DPLL_PORTD_READY_MASK;
2618
2619 if (mask == 0xf)
2620 mask = 0x0;
2621 else
2622 dev_priv->chv_phy_control |=
2623 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2624
2625 dev_priv->chv_phy_control |=
2626 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2627
2628 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2629
2630 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2631 } else {
2632 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2633 }
2634
2635 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2636
2637 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2638 dev_priv->chv_phy_control);
2639 }
2640
2641 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2642 {
2643 struct i915_power_well *cmn =
2644 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2645 struct i915_power_well *disp2d =
2646 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2647
2648 /* If the display might be already active skip this */
2649 if (cmn->ops->is_enabled(dev_priv, cmn) &&
2650 disp2d->ops->is_enabled(dev_priv, disp2d) &&
2651 I915_READ(DPIO_CTL) & DPIO_CMNRST)
2652 return;
2653
2654 DRM_DEBUG_KMS("toggling display PHY side reset\n");
2655
2656 /* cmnlane needs DPLL registers */
2657 disp2d->ops->enable(dev_priv, disp2d);
2658
2659 /*
2660 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2661 * Need to assert and de-assert PHY SB reset by gating the
2662 * common lane power, then un-gating it.
2663 * Simply ungating isn't enough to reset the PHY enough to get
2664 * ports and lanes running.
2665 */
2666 cmn->ops->disable(dev_priv, cmn);
2667 }
2668
2669 /**
2670 * intel_power_domains_init_hw - initialize hardware power domain state
2671 * @dev_priv: i915 device instance
2672 * @resume: Called from resume code paths or not
2673 *
2674 * This function initializes the hardware power domain state and enables all
2675 * power domains using intel_display_set_init_power().
2676 */
2677 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2678 {
2679 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2680
2681 power_domains->initializing = true;
2682
2683 if (IS_GEN9_BC(dev_priv)) {
2684 skl_display_core_init(dev_priv, resume);
2685 } else if (IS_GEN9_LP(dev_priv)) {
2686 bxt_display_core_init(dev_priv, resume);
2687 } else if (IS_CHERRYVIEW(dev_priv)) {
2688 mutex_lock(&power_domains->lock);
2689 chv_phy_control_init(dev_priv);
2690 mutex_unlock(&power_domains->lock);
2691 } else if (IS_VALLEYVIEW(dev_priv)) {
2692 mutex_lock(&power_domains->lock);
2693 vlv_cmnlane_wa(dev_priv);
2694 mutex_unlock(&power_domains->lock);
2695 }
2696
2697 /* For now, we need the power well to be always enabled. */
2698 intel_display_set_init_power(dev_priv, true);
2699 /* Disable power support if the user asked so. */
2700 if (!i915.disable_power_well)
2701 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2702 intel_power_domains_sync_hw(dev_priv);
2703 power_domains->initializing = false;
2704 }
2705
2706 /**
2707 * intel_power_domains_suspend - suspend power domain state
2708 * @dev_priv: i915 device instance
2709 *
2710 * This function prepares the hardware power domain state before entering
2711 * system suspend. It must be paired with intel_power_domains_init_hw().
2712 */
2713 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2714 {
2715 /*
2716 * Even if power well support was disabled we still want to disable
2717 * power wells while we are system suspended.
2718 */
2719 if (!i915.disable_power_well)
2720 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2721
2722 if (IS_GEN9_BC(dev_priv))
2723 skl_display_core_uninit(dev_priv);
2724 else if (IS_GEN9_LP(dev_priv))
2725 bxt_display_core_uninit(dev_priv);
2726 }
2727
2728 /**
2729 * intel_runtime_pm_get - grab a runtime pm reference
2730 * @dev_priv: i915 device instance
2731 *
2732 * This function grabs a device-level runtime pm reference (mostly used for GEM
2733 * code to ensure the GTT or GT is on) and ensures that it is powered up.
2734 *
2735 * Any runtime pm reference obtained by this function must have a symmetric
2736 * call to intel_runtime_pm_put() to release the reference again.
2737 */
2738 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2739 {
2740 struct pci_dev *pdev = dev_priv->drm.pdev;
2741 struct device *kdev = &pdev->dev;
2742
2743 pm_runtime_get_sync(kdev);
2744
2745 atomic_inc(&dev_priv->pm.wakeref_count);
2746 assert_rpm_wakelock_held(dev_priv);
2747 }
2748
2749 /**
2750 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2751 * @dev_priv: i915 device instance
2752 *
2753 * This function grabs a device-level runtime pm reference if the device is
2754 * already in use and ensures that it is powered up.
2755 *
2756 * Any runtime pm reference obtained by this function must have a symmetric
2757 * call to intel_runtime_pm_put() to release the reference again.
2758 */
2759 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2760 {
2761 struct pci_dev *pdev = dev_priv->drm.pdev;
2762 struct device *kdev = &pdev->dev;
2763
2764 if (IS_ENABLED(CONFIG_PM)) {
2765 int ret = pm_runtime_get_if_in_use(kdev);
2766
2767 /*
2768 * In cases runtime PM is disabled by the RPM core and we get
2769 * an -EINVAL return value we are not supposed to call this
2770 * function, since the power state is undefined. This applies
2771 * atm to the late/early system suspend/resume handlers.
2772 */
2773 WARN_ON_ONCE(ret < 0);
2774 if (ret <= 0)
2775 return false;
2776 }
2777
2778 atomic_inc(&dev_priv->pm.wakeref_count);
2779 assert_rpm_wakelock_held(dev_priv);
2780
2781 return true;
2782 }
2783
2784 /**
2785 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2786 * @dev_priv: i915 device instance
2787 *
2788 * This function grabs a device-level runtime pm reference (mostly used for GEM
2789 * code to ensure the GTT or GT is on).
2790 *
2791 * It will _not_ power up the device but instead only check that it's powered
2792 * on. Therefore it is only valid to call this functions from contexts where
2793 * the device is known to be powered up and where trying to power it up would
2794 * result in hilarity and deadlocks. That pretty much means only the system
2795 * suspend/resume code where this is used to grab runtime pm references for
2796 * delayed setup down in work items.
2797 *
2798 * Any runtime pm reference obtained by this function must have a symmetric
2799 * call to intel_runtime_pm_put() to release the reference again.
2800 */
2801 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2802 {
2803 struct pci_dev *pdev = dev_priv->drm.pdev;
2804 struct device *kdev = &pdev->dev;
2805
2806 assert_rpm_wakelock_held(dev_priv);
2807 pm_runtime_get_noresume(kdev);
2808
2809 atomic_inc(&dev_priv->pm.wakeref_count);
2810 }
2811
2812 /**
2813 * intel_runtime_pm_put - release a runtime pm reference
2814 * @dev_priv: i915 device instance
2815 *
2816 * This function drops the device-level runtime pm reference obtained by
2817 * intel_runtime_pm_get() and might power down the corresponding
2818 * hardware block right away if this is the last reference.
2819 */
2820 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2821 {
2822 struct pci_dev *pdev = dev_priv->drm.pdev;
2823 struct device *kdev = &pdev->dev;
2824
2825 assert_rpm_wakelock_held(dev_priv);
2826 atomic_dec(&dev_priv->pm.wakeref_count);
2827
2828 pm_runtime_mark_last_busy(kdev);
2829 pm_runtime_put_autosuspend(kdev);
2830 }
2831
2832 /**
2833 * intel_runtime_pm_enable - enable runtime pm
2834 * @dev_priv: i915 device instance
2835 *
2836 * This function enables runtime pm at the end of the driver load sequence.
2837 *
2838 * Note that this function does currently not enable runtime pm for the
2839 * subordinate display power domains. That is only done on the first modeset
2840 * using intel_display_set_init_power().
2841 */
2842 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2843 {
2844 struct pci_dev *pdev = dev_priv->drm.pdev;
2845 struct device *kdev = &pdev->dev;
2846
2847 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
2848 pm_runtime_mark_last_busy(kdev);
2849
2850 /*
2851 * Take a permanent reference to disable the RPM functionality and drop
2852 * it only when unloading the driver. Use the low level get/put helpers,
2853 * so the driver's own RPM reference tracking asserts also work on
2854 * platforms without RPM support.
2855 */
2856 if (!HAS_RUNTIME_PM(dev_priv)) {
2857 pm_runtime_dont_use_autosuspend(kdev);
2858 pm_runtime_get_sync(kdev);
2859 } else {
2860 pm_runtime_use_autosuspend(kdev);
2861 }
2862
2863 /*
2864 * The core calls the driver load handler with an RPM reference held.
2865 * We drop that here and will reacquire it during unloading in
2866 * intel_power_domains_fini().
2867 */
2868 pm_runtime_put_autosuspend(kdev);
2869 }