]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/i915/intel_runtime_pm.c
drm/i915: Apply Display WA #1183 on skl, kbl, and cfl
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 /**
36 * DOC: runtime pm
37 *
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
43 *
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
50 */
51
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 int power_well_id);
54
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
57
58 const char *
59 intel_display_power_domain_str(enum intel_display_power_domain domain)
60 {
61 switch (domain) {
62 case POWER_DOMAIN_PIPE_A:
63 return "PIPE_A";
64 case POWER_DOMAIN_PIPE_B:
65 return "PIPE_B";
66 case POWER_DOMAIN_PIPE_C:
67 return "PIPE_C";
68 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
69 return "PIPE_A_PANEL_FITTER";
70 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
71 return "PIPE_B_PANEL_FITTER";
72 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
73 return "PIPE_C_PANEL_FITTER";
74 case POWER_DOMAIN_TRANSCODER_A:
75 return "TRANSCODER_A";
76 case POWER_DOMAIN_TRANSCODER_B:
77 return "TRANSCODER_B";
78 case POWER_DOMAIN_TRANSCODER_C:
79 return "TRANSCODER_C";
80 case POWER_DOMAIN_TRANSCODER_EDP:
81 return "TRANSCODER_EDP";
82 case POWER_DOMAIN_TRANSCODER_DSI_A:
83 return "TRANSCODER_DSI_A";
84 case POWER_DOMAIN_TRANSCODER_DSI_C:
85 return "TRANSCODER_DSI_C";
86 case POWER_DOMAIN_PORT_DDI_A_LANES:
87 return "PORT_DDI_A_LANES";
88 case POWER_DOMAIN_PORT_DDI_B_LANES:
89 return "PORT_DDI_B_LANES";
90 case POWER_DOMAIN_PORT_DDI_C_LANES:
91 return "PORT_DDI_C_LANES";
92 case POWER_DOMAIN_PORT_DDI_D_LANES:
93 return "PORT_DDI_D_LANES";
94 case POWER_DOMAIN_PORT_DDI_E_LANES:
95 return "PORT_DDI_E_LANES";
96 case POWER_DOMAIN_PORT_DDI_A_IO:
97 return "PORT_DDI_A_IO";
98 case POWER_DOMAIN_PORT_DDI_B_IO:
99 return "PORT_DDI_B_IO";
100 case POWER_DOMAIN_PORT_DDI_C_IO:
101 return "PORT_DDI_C_IO";
102 case POWER_DOMAIN_PORT_DDI_D_IO:
103 return "PORT_DDI_D_IO";
104 case POWER_DOMAIN_PORT_DDI_E_IO:
105 return "PORT_DDI_E_IO";
106 case POWER_DOMAIN_PORT_DSI:
107 return "PORT_DSI";
108 case POWER_DOMAIN_PORT_CRT:
109 return "PORT_CRT";
110 case POWER_DOMAIN_PORT_OTHER:
111 return "PORT_OTHER";
112 case POWER_DOMAIN_VGA:
113 return "VGA";
114 case POWER_DOMAIN_AUDIO:
115 return "AUDIO";
116 case POWER_DOMAIN_PLLS:
117 return "PLLS";
118 case POWER_DOMAIN_AUX_A:
119 return "AUX_A";
120 case POWER_DOMAIN_AUX_B:
121 return "AUX_B";
122 case POWER_DOMAIN_AUX_C:
123 return "AUX_C";
124 case POWER_DOMAIN_AUX_D:
125 return "AUX_D";
126 case POWER_DOMAIN_GMBUS:
127 return "GMBUS";
128 case POWER_DOMAIN_INIT:
129 return "INIT";
130 case POWER_DOMAIN_MODESET:
131 return "MODESET";
132 default:
133 MISSING_CASE(domain);
134 return "?";
135 }
136 }
137
138 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
139 struct i915_power_well *power_well)
140 {
141 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
142 power_well->ops->enable(dev_priv, power_well);
143 power_well->hw_enabled = true;
144 }
145
146 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
147 struct i915_power_well *power_well)
148 {
149 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
150 power_well->hw_enabled = false;
151 power_well->ops->disable(dev_priv, power_well);
152 }
153
154 static void intel_power_well_get(struct drm_i915_private *dev_priv,
155 struct i915_power_well *power_well)
156 {
157 if (!power_well->count++)
158 intel_power_well_enable(dev_priv, power_well);
159 }
160
161 static void intel_power_well_put(struct drm_i915_private *dev_priv,
162 struct i915_power_well *power_well)
163 {
164 WARN(!power_well->count, "Use count on power well %s is already zero",
165 power_well->name);
166
167 if (!--power_well->count)
168 intel_power_well_disable(dev_priv, power_well);
169 }
170
171 /*
172 * We should only use the power well if we explicitly asked the hardware to
173 * enable it, so check if it's enabled and also check if we've requested it to
174 * be enabled.
175 */
176 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
177 struct i915_power_well *power_well)
178 {
179 return I915_READ(HSW_PWR_WELL_DRIVER) ==
180 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
181 }
182
183 /**
184 * __intel_display_power_is_enabled - unlocked check for a power domain
185 * @dev_priv: i915 device instance
186 * @domain: power domain to check
187 *
188 * This is the unlocked version of intel_display_power_is_enabled() and should
189 * only be used from error capture and recovery code where deadlocks are
190 * possible.
191 *
192 * Returns:
193 * True when the power domain is enabled, false otherwise.
194 */
195 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
196 enum intel_display_power_domain domain)
197 {
198 struct i915_power_well *power_well;
199 bool is_enabled;
200
201 if (dev_priv->pm.suspended)
202 return false;
203
204 is_enabled = true;
205
206 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
207 if (power_well->always_on)
208 continue;
209
210 if (!power_well->hw_enabled) {
211 is_enabled = false;
212 break;
213 }
214 }
215
216 return is_enabled;
217 }
218
219 /**
220 * intel_display_power_is_enabled - check for a power domain
221 * @dev_priv: i915 device instance
222 * @domain: power domain to check
223 *
224 * This function can be used to check the hw power domain state. It is mostly
225 * used in hardware state readout functions. Everywhere else code should rely
226 * upon explicit power domain reference counting to ensure that the hardware
227 * block is powered up before accessing it.
228 *
229 * Callers must hold the relevant modesetting locks to ensure that concurrent
230 * threads can't disable the power well while the caller tries to read a few
231 * registers.
232 *
233 * Returns:
234 * True when the power domain is enabled, false otherwise.
235 */
236 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
237 enum intel_display_power_domain domain)
238 {
239 struct i915_power_domains *power_domains;
240 bool ret;
241
242 power_domains = &dev_priv->power_domains;
243
244 mutex_lock(&power_domains->lock);
245 ret = __intel_display_power_is_enabled(dev_priv, domain);
246 mutex_unlock(&power_domains->lock);
247
248 return ret;
249 }
250
251 /**
252 * intel_display_set_init_power - set the initial power domain state
253 * @dev_priv: i915 device instance
254 * @enable: whether to enable or disable the initial power domain state
255 *
256 * For simplicity our driver load/unload and system suspend/resume code assumes
257 * that all power domains are always enabled. This functions controls the state
258 * of this little hack. While the initial power domain state is enabled runtime
259 * pm is effectively disabled.
260 */
261 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
262 bool enable)
263 {
264 if (dev_priv->power_domains.init_power_on == enable)
265 return;
266
267 if (enable)
268 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
269 else
270 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
271
272 dev_priv->power_domains.init_power_on = enable;
273 }
274
275 /*
276 * Starting with Haswell, we have a "Power Down Well" that can be turned off
277 * when not needed anymore. We have 4 registers that can request the power well
278 * to be enabled, and it will only be disabled if none of the registers is
279 * requesting it to be enabled.
280 */
281 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
282 {
283 struct pci_dev *pdev = dev_priv->drm.pdev;
284
285 /*
286 * After we re-enable the power well, if we touch VGA register 0x3d5
287 * we'll get unclaimed register interrupts. This stops after we write
288 * anything to the VGA MSR register. The vgacon module uses this
289 * register all the time, so if we unbind our driver and, as a
290 * consequence, bind vgacon, we'll get stuck in an infinite loop at
291 * console_unlock(). So make here we touch the VGA MSR register, making
292 * sure vgacon can keep working normally without triggering interrupts
293 * and error messages.
294 */
295 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
296 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
297 vga_put(pdev, VGA_RSRC_LEGACY_IO);
298
299 if (IS_BROADWELL(dev_priv))
300 gen8_irq_power_well_post_enable(dev_priv,
301 1 << PIPE_C | 1 << PIPE_B);
302 }
303
304 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
305 {
306 if (IS_BROADWELL(dev_priv))
307 gen8_irq_power_well_pre_disable(dev_priv,
308 1 << PIPE_C | 1 << PIPE_B);
309 }
310
311 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
312 struct i915_power_well *power_well)
313 {
314 struct pci_dev *pdev = dev_priv->drm.pdev;
315
316 /*
317 * After we re-enable the power well, if we touch VGA register 0x3d5
318 * we'll get unclaimed register interrupts. This stops after we write
319 * anything to the VGA MSR register. The vgacon module uses this
320 * register all the time, so if we unbind our driver and, as a
321 * consequence, bind vgacon, we'll get stuck in an infinite loop at
322 * console_unlock(). So make here we touch the VGA MSR register, making
323 * sure vgacon can keep working normally without triggering interrupts
324 * and error messages.
325 */
326 if (power_well->id == SKL_DISP_PW_2) {
327 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
328 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
329 vga_put(pdev, VGA_RSRC_LEGACY_IO);
330
331 gen8_irq_power_well_post_enable(dev_priv,
332 1 << PIPE_C | 1 << PIPE_B);
333 }
334 }
335
336 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
337 struct i915_power_well *power_well)
338 {
339 if (power_well->id == SKL_DISP_PW_2)
340 gen8_irq_power_well_pre_disable(dev_priv,
341 1 << PIPE_C | 1 << PIPE_B);
342 }
343
344 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
345 struct i915_power_well *power_well, bool enable)
346 {
347 bool is_enabled, enable_requested;
348 uint32_t tmp;
349
350 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
351 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
352 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
353
354 if (enable) {
355 if (!enable_requested)
356 I915_WRITE(HSW_PWR_WELL_DRIVER,
357 HSW_PWR_WELL_ENABLE_REQUEST);
358
359 if (!is_enabled) {
360 DRM_DEBUG_KMS("Enabling power well\n");
361 if (intel_wait_for_register(dev_priv,
362 HSW_PWR_WELL_DRIVER,
363 HSW_PWR_WELL_STATE_ENABLED,
364 HSW_PWR_WELL_STATE_ENABLED,
365 20))
366 DRM_ERROR("Timeout enabling power well\n");
367 hsw_power_well_post_enable(dev_priv);
368 }
369
370 } else {
371 if (enable_requested) {
372 hsw_power_well_pre_disable(dev_priv);
373 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
374 POSTING_READ(HSW_PWR_WELL_DRIVER);
375 DRM_DEBUG_KMS("Requesting to disable the power well\n");
376 }
377 }
378 }
379
380 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
381 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
382 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
383 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
384 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
385 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
386 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
387 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
388 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
389 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
390 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
391 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
392 BIT_ULL(POWER_DOMAIN_AUX_B) | \
393 BIT_ULL(POWER_DOMAIN_AUX_C) | \
394 BIT_ULL(POWER_DOMAIN_AUX_D) | \
395 BIT_ULL(POWER_DOMAIN_AUDIO) | \
396 BIT_ULL(POWER_DOMAIN_VGA) | \
397 BIT_ULL(POWER_DOMAIN_INIT))
398 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
399 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
400 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
401 BIT_ULL(POWER_DOMAIN_INIT))
402 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
403 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
404 BIT_ULL(POWER_DOMAIN_INIT))
405 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
406 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
407 BIT_ULL(POWER_DOMAIN_INIT))
408 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
409 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
410 BIT_ULL(POWER_DOMAIN_INIT))
411 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
412 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
413 BIT_ULL(POWER_DOMAIN_MODESET) | \
414 BIT_ULL(POWER_DOMAIN_AUX_A) | \
415 BIT_ULL(POWER_DOMAIN_INIT))
416
417 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
418 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
419 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
420 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
421 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
422 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
423 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
424 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
425 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
426 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
427 BIT_ULL(POWER_DOMAIN_AUX_B) | \
428 BIT_ULL(POWER_DOMAIN_AUX_C) | \
429 BIT_ULL(POWER_DOMAIN_AUDIO) | \
430 BIT_ULL(POWER_DOMAIN_VGA) | \
431 BIT_ULL(POWER_DOMAIN_GMBUS) | \
432 BIT_ULL(POWER_DOMAIN_INIT))
433 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
434 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
435 BIT_ULL(POWER_DOMAIN_MODESET) | \
436 BIT_ULL(POWER_DOMAIN_AUX_A) | \
437 BIT_ULL(POWER_DOMAIN_INIT))
438 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
439 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
440 BIT_ULL(POWER_DOMAIN_AUX_A) | \
441 BIT_ULL(POWER_DOMAIN_INIT))
442 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
443 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
444 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
445 BIT_ULL(POWER_DOMAIN_AUX_B) | \
446 BIT_ULL(POWER_DOMAIN_AUX_C) | \
447 BIT_ULL(POWER_DOMAIN_INIT))
448
449 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
450 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
451 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
452 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
453 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
454 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
455 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
456 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
457 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
458 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
459 BIT_ULL(POWER_DOMAIN_AUX_B) | \
460 BIT_ULL(POWER_DOMAIN_AUX_C) | \
461 BIT_ULL(POWER_DOMAIN_AUDIO) | \
462 BIT_ULL(POWER_DOMAIN_VGA) | \
463 BIT_ULL(POWER_DOMAIN_INIT))
464 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
465 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
466 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
467 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
468 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
469 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
470 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
471 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
472 BIT_ULL(POWER_DOMAIN_AUX_A) | \
473 BIT_ULL(POWER_DOMAIN_INIT))
474 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
475 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
476 BIT_ULL(POWER_DOMAIN_AUX_B) | \
477 BIT_ULL(POWER_DOMAIN_INIT))
478 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
479 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
480 BIT_ULL(POWER_DOMAIN_AUX_C) | \
481 BIT_ULL(POWER_DOMAIN_INIT))
482 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
483 BIT_ULL(POWER_DOMAIN_AUX_A) | \
484 BIT_ULL(POWER_DOMAIN_INIT))
485 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
486 BIT_ULL(POWER_DOMAIN_AUX_B) | \
487 BIT_ULL(POWER_DOMAIN_INIT))
488 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
489 BIT_ULL(POWER_DOMAIN_AUX_C) | \
490 BIT_ULL(POWER_DOMAIN_INIT))
491 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
492 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
493 BIT_ULL(POWER_DOMAIN_MODESET) | \
494 BIT_ULL(POWER_DOMAIN_AUX_A) | \
495 BIT_ULL(POWER_DOMAIN_INIT))
496
497 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
498 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
499 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
500 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
501 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
502 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
503 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
504 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
505 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
506 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
507 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
508 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
509 BIT_ULL(POWER_DOMAIN_AUX_B) | \
510 BIT_ULL(POWER_DOMAIN_AUX_C) | \
511 BIT_ULL(POWER_DOMAIN_AUX_D) | \
512 BIT_ULL(POWER_DOMAIN_AUDIO) | \
513 BIT_ULL(POWER_DOMAIN_VGA) | \
514 BIT_ULL(POWER_DOMAIN_INIT))
515 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
516 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
517 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
518 BIT_ULL(POWER_DOMAIN_INIT))
519 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
520 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
521 BIT_ULL(POWER_DOMAIN_INIT))
522 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
523 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
524 BIT_ULL(POWER_DOMAIN_INIT))
525 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
526 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
527 BIT_ULL(POWER_DOMAIN_INIT))
528 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
529 BIT_ULL(POWER_DOMAIN_AUX_A) | \
530 BIT_ULL(POWER_DOMAIN_INIT))
531 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
532 BIT_ULL(POWER_DOMAIN_AUX_B) | \
533 BIT_ULL(POWER_DOMAIN_INIT))
534 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
535 BIT_ULL(POWER_DOMAIN_AUX_C) | \
536 BIT_ULL(POWER_DOMAIN_INIT))
537 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
538 BIT_ULL(POWER_DOMAIN_AUX_D) | \
539 BIT_ULL(POWER_DOMAIN_INIT))
540 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
541 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
542 BIT_ULL(POWER_DOMAIN_MODESET) | \
543 BIT_ULL(POWER_DOMAIN_AUX_A) | \
544 BIT_ULL(POWER_DOMAIN_INIT))
545
546 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
547 {
548 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
549 "DC9 already programmed to be enabled.\n");
550 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
551 "DC5 still not disabled to enable DC9.\n");
552 WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
553 WARN_ONCE(intel_irqs_enabled(dev_priv),
554 "Interrupts not disabled yet.\n");
555
556 /*
557 * TODO: check for the following to verify the conditions to enter DC9
558 * state are satisfied:
559 * 1] Check relevant display engine registers to verify if mode set
560 * disable sequence was followed.
561 * 2] Check if display uninitialize sequence is initialized.
562 */
563 }
564
565 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
566 {
567 WARN_ONCE(intel_irqs_enabled(dev_priv),
568 "Interrupts not disabled yet.\n");
569 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
570 "DC5 still not disabled.\n");
571
572 /*
573 * TODO: check for the following to verify DC9 state was indeed
574 * entered before programming to disable it:
575 * 1] Check relevant display engine registers to verify if mode
576 * set disable sequence was followed.
577 * 2] Check if display uninitialize sequence is initialized.
578 */
579 }
580
581 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
582 u32 state)
583 {
584 int rewrites = 0;
585 int rereads = 0;
586 u32 v;
587
588 I915_WRITE(DC_STATE_EN, state);
589
590 /* It has been observed that disabling the dc6 state sometimes
591 * doesn't stick and dmc keeps returning old value. Make sure
592 * the write really sticks enough times and also force rewrite until
593 * we are confident that state is exactly what we want.
594 */
595 do {
596 v = I915_READ(DC_STATE_EN);
597
598 if (v != state) {
599 I915_WRITE(DC_STATE_EN, state);
600 rewrites++;
601 rereads = 0;
602 } else if (rereads++ > 5) {
603 break;
604 }
605
606 } while (rewrites < 100);
607
608 if (v != state)
609 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
610 state, v);
611
612 /* Most of the times we need one retry, avoid spam */
613 if (rewrites > 1)
614 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
615 state, rewrites);
616 }
617
618 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
619 {
620 u32 mask;
621
622 mask = DC_STATE_EN_UPTO_DC5;
623 if (IS_GEN9_LP(dev_priv))
624 mask |= DC_STATE_EN_DC9;
625 else
626 mask |= DC_STATE_EN_UPTO_DC6;
627
628 return mask;
629 }
630
631 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
632 {
633 u32 val;
634
635 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
636
637 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
638 dev_priv->csr.dc_state, val);
639 dev_priv->csr.dc_state = val;
640 }
641
642 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
643 {
644 uint32_t val;
645 uint32_t mask;
646
647 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
648 state &= dev_priv->csr.allowed_dc_mask;
649
650 val = I915_READ(DC_STATE_EN);
651 mask = gen9_dc_mask(dev_priv);
652 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
653 val & mask, state);
654
655 /* Check if DMC is ignoring our DC state requests */
656 if ((val & mask) != dev_priv->csr.dc_state)
657 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
658 dev_priv->csr.dc_state, val & mask);
659
660 val &= ~mask;
661 val |= state;
662
663 gen9_write_dc_state(dev_priv, val);
664
665 dev_priv->csr.dc_state = val & mask;
666 }
667
668 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
669 {
670 assert_can_enable_dc9(dev_priv);
671
672 DRM_DEBUG_KMS("Enabling DC9\n");
673
674 intel_power_sequencer_reset(dev_priv);
675 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
676 }
677
678 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
679 {
680 assert_can_disable_dc9(dev_priv);
681
682 DRM_DEBUG_KMS("Disabling DC9\n");
683
684 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
685
686 intel_pps_unlock_regs_wa(dev_priv);
687 }
688
689 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
690 {
691 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
692 "CSR program storage start is NULL\n");
693 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
694 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
695 }
696
697 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
698 {
699 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
700 SKL_DISP_PW_2);
701
702 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
703
704 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
705 "DC5 already programmed to be enabled.\n");
706 assert_rpm_wakelock_held(dev_priv);
707
708 assert_csr_loaded(dev_priv);
709 }
710
711 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
712 {
713 assert_can_enable_dc5(dev_priv);
714
715 DRM_DEBUG_KMS("Enabling DC5\n");
716
717 /* Wa Display #1183: skl,kbl,cfl */
718 if (IS_GEN9_BC(dev_priv))
719 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
720 SKL_SELECT_ALTERNATE_DC_EXIT);
721
722 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
723 }
724
725 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
726 {
727 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
728 "Backlight is not disabled.\n");
729 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
730 "DC6 already programmed to be enabled.\n");
731
732 assert_csr_loaded(dev_priv);
733 }
734
735 void skl_enable_dc6(struct drm_i915_private *dev_priv)
736 {
737 assert_can_enable_dc6(dev_priv);
738
739 DRM_DEBUG_KMS("Enabling DC6\n");
740
741 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
742
743 }
744
745 void skl_disable_dc6(struct drm_i915_private *dev_priv)
746 {
747 DRM_DEBUG_KMS("Disabling DC6\n");
748
749 /* Wa Display #1183: skl,kbl,cfl */
750 if (IS_GEN9_BC(dev_priv))
751 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
752 SKL_SELECT_ALTERNATE_DC_EXIT);
753
754 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
755 }
756
757 static void
758 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
759 struct i915_power_well *power_well)
760 {
761 enum skl_disp_power_wells power_well_id = power_well->id;
762 u32 val;
763 u32 mask;
764
765 mask = SKL_POWER_WELL_REQ(power_well_id);
766
767 val = I915_READ(HSW_PWR_WELL_KVMR);
768 if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
769 power_well->name))
770 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
771
772 val = I915_READ(HSW_PWR_WELL_BIOS);
773 val |= I915_READ(HSW_PWR_WELL_DEBUG);
774
775 if (!(val & mask))
776 return;
777
778 /*
779 * DMC is known to force on the request bits for power well 1 on SKL
780 * and BXT and the misc IO power well on SKL but we don't expect any
781 * other request bits to be set, so WARN for those.
782 */
783 if (power_well_id == SKL_DISP_PW_1 ||
784 (IS_GEN9_BC(dev_priv) &&
785 power_well_id == SKL_DISP_PW_MISC_IO))
786 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
787 "by DMC\n", power_well->name);
788 else
789 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
790 power_well->name);
791
792 I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
793 I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
794 }
795
796 static void skl_set_power_well(struct drm_i915_private *dev_priv,
797 struct i915_power_well *power_well, bool enable)
798 {
799 uint32_t tmp, fuse_status;
800 uint32_t req_mask, state_mask;
801 bool is_enabled, enable_requested, check_fuse_status = false;
802
803 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
804 fuse_status = I915_READ(SKL_FUSE_STATUS);
805
806 switch (power_well->id) {
807 case SKL_DISP_PW_1:
808 if (intel_wait_for_register(dev_priv,
809 SKL_FUSE_STATUS,
810 SKL_FUSE_PG0_DIST_STATUS,
811 SKL_FUSE_PG0_DIST_STATUS,
812 1)) {
813 DRM_ERROR("PG0 not enabled\n");
814 return;
815 }
816 break;
817 case SKL_DISP_PW_2:
818 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
819 DRM_ERROR("PG1 in disabled state\n");
820 return;
821 }
822 break;
823 case SKL_DISP_PW_MISC_IO:
824 case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A, CNL_DISP_PW_DDI_A */
825 case SKL_DISP_PW_DDI_B:
826 case SKL_DISP_PW_DDI_C:
827 case SKL_DISP_PW_DDI_D:
828 case GLK_DISP_PW_AUX_A: /* CNL_DISP_PW_AUX_A */
829 case GLK_DISP_PW_AUX_B: /* CNL_DISP_PW_AUX_B */
830 case GLK_DISP_PW_AUX_C: /* CNL_DISP_PW_AUX_C */
831 case CNL_DISP_PW_AUX_D:
832 break;
833 default:
834 WARN(1, "Unknown power well %lu\n", power_well->id);
835 return;
836 }
837
838 req_mask = SKL_POWER_WELL_REQ(power_well->id);
839 enable_requested = tmp & req_mask;
840 state_mask = SKL_POWER_WELL_STATE(power_well->id);
841 is_enabled = tmp & state_mask;
842
843 if (!enable && enable_requested)
844 skl_power_well_pre_disable(dev_priv, power_well);
845
846 if (enable) {
847 if (!enable_requested) {
848 WARN((tmp & state_mask) &&
849 !I915_READ(HSW_PWR_WELL_BIOS),
850 "Invalid for power well status to be enabled, unless done by the BIOS, \
851 when request is to disable!\n");
852 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
853 }
854
855 if (!is_enabled) {
856 DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
857 check_fuse_status = true;
858 }
859 } else {
860 if (enable_requested) {
861 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
862 POSTING_READ(HSW_PWR_WELL_DRIVER);
863 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
864 }
865
866 gen9_sanitize_power_well_requests(dev_priv, power_well);
867 }
868
869 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
870 1))
871 DRM_ERROR("%s %s timeout\n",
872 power_well->name, enable ? "enable" : "disable");
873
874 if (check_fuse_status) {
875 if (power_well->id == SKL_DISP_PW_1) {
876 if (intel_wait_for_register(dev_priv,
877 SKL_FUSE_STATUS,
878 SKL_FUSE_PG1_DIST_STATUS,
879 SKL_FUSE_PG1_DIST_STATUS,
880 1))
881 DRM_ERROR("PG1 distributing status timeout\n");
882 } else if (power_well->id == SKL_DISP_PW_2) {
883 if (intel_wait_for_register(dev_priv,
884 SKL_FUSE_STATUS,
885 SKL_FUSE_PG2_DIST_STATUS,
886 SKL_FUSE_PG2_DIST_STATUS,
887 1))
888 DRM_ERROR("PG2 distributing status timeout\n");
889 }
890 }
891
892 if (enable && !is_enabled)
893 skl_power_well_post_enable(dev_priv, power_well);
894 }
895
896 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
897 struct i915_power_well *power_well)
898 {
899 /* Take over the request bit if set by BIOS. */
900 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
901 if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
902 HSW_PWR_WELL_ENABLE_REQUEST))
903 I915_WRITE(HSW_PWR_WELL_DRIVER,
904 HSW_PWR_WELL_ENABLE_REQUEST);
905 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
906 }
907 }
908
909 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
910 struct i915_power_well *power_well)
911 {
912 hsw_set_power_well(dev_priv, power_well, true);
913 }
914
915 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
916 struct i915_power_well *power_well)
917 {
918 hsw_set_power_well(dev_priv, power_well, false);
919 }
920
921 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
922 struct i915_power_well *power_well)
923 {
924 uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
925 SKL_POWER_WELL_STATE(power_well->id);
926
927 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
928 }
929
930 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
931 struct i915_power_well *power_well)
932 {
933 uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
934 uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
935
936 /* Take over the request bit if set by BIOS. */
937 if (bios_req & mask) {
938 uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
939
940 if (!(drv_req & mask))
941 I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
942 I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
943 }
944 }
945
946 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
947 struct i915_power_well *power_well)
948 {
949 skl_set_power_well(dev_priv, power_well, true);
950 }
951
952 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
953 struct i915_power_well *power_well)
954 {
955 skl_set_power_well(dev_priv, power_well, false);
956 }
957
958 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
959 struct i915_power_well *power_well)
960 {
961 bxt_ddi_phy_init(dev_priv, power_well->data);
962 }
963
964 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
965 struct i915_power_well *power_well)
966 {
967 bxt_ddi_phy_uninit(dev_priv, power_well->data);
968 }
969
970 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
971 struct i915_power_well *power_well)
972 {
973 return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
974 }
975
976 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
977 {
978 struct i915_power_well *power_well;
979
980 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
981 if (power_well->count > 0)
982 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
983
984 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
985 if (power_well->count > 0)
986 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
987
988 if (IS_GEMINILAKE(dev_priv)) {
989 power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
990 if (power_well->count > 0)
991 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
992 }
993 }
994
995 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
996 struct i915_power_well *power_well)
997 {
998 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
999 }
1000
1001 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1002 {
1003 u32 tmp = I915_READ(DBUF_CTL);
1004
1005 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1006 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1007 "Unexpected DBuf power power state (0x%08x)\n", tmp);
1008 }
1009
1010 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1011 struct i915_power_well *power_well)
1012 {
1013 struct intel_cdclk_state cdclk_state = {};
1014
1015 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1016
1017 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1018 WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
1019
1020 gen9_assert_dbuf_enabled(dev_priv);
1021
1022 if (IS_GEN9_LP(dev_priv))
1023 bxt_verify_ddi_phy_power_wells(dev_priv);
1024 }
1025
1026 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1027 struct i915_power_well *power_well)
1028 {
1029 if (!dev_priv->csr.dmc_payload)
1030 return;
1031
1032 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1033 skl_enable_dc6(dev_priv);
1034 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1035 gen9_enable_dc5(dev_priv);
1036 }
1037
1038 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1039 struct i915_power_well *power_well)
1040 {
1041 }
1042
1043 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1044 struct i915_power_well *power_well)
1045 {
1046 }
1047
1048 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1049 struct i915_power_well *power_well)
1050 {
1051 return true;
1052 }
1053
1054 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1055 struct i915_power_well *power_well)
1056 {
1057 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1058 i830_enable_pipe(dev_priv, PIPE_A);
1059 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1060 i830_enable_pipe(dev_priv, PIPE_B);
1061 }
1062
1063 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1064 struct i915_power_well *power_well)
1065 {
1066 i830_disable_pipe(dev_priv, PIPE_B);
1067 i830_disable_pipe(dev_priv, PIPE_A);
1068 }
1069
1070 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1071 struct i915_power_well *power_well)
1072 {
1073 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1074 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1075 }
1076
1077 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1078 struct i915_power_well *power_well)
1079 {
1080 if (power_well->count > 0)
1081 i830_pipes_power_well_enable(dev_priv, power_well);
1082 else
1083 i830_pipes_power_well_disable(dev_priv, power_well);
1084 }
1085
1086 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1087 struct i915_power_well *power_well, bool enable)
1088 {
1089 enum punit_power_well power_well_id = power_well->id;
1090 u32 mask;
1091 u32 state;
1092 u32 ctrl;
1093
1094 mask = PUNIT_PWRGT_MASK(power_well_id);
1095 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
1096 PUNIT_PWRGT_PWR_GATE(power_well_id);
1097
1098 mutex_lock(&dev_priv->rps.hw_lock);
1099
1100 #define COND \
1101 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1102
1103 if (COND)
1104 goto out;
1105
1106 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1107 ctrl &= ~mask;
1108 ctrl |= state;
1109 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1110
1111 if (wait_for(COND, 100))
1112 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1113 state,
1114 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1115
1116 #undef COND
1117
1118 out:
1119 mutex_unlock(&dev_priv->rps.hw_lock);
1120 }
1121
1122 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1123 struct i915_power_well *power_well)
1124 {
1125 vlv_set_power_well(dev_priv, power_well, true);
1126 }
1127
1128 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1129 struct i915_power_well *power_well)
1130 {
1131 vlv_set_power_well(dev_priv, power_well, false);
1132 }
1133
1134 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1135 struct i915_power_well *power_well)
1136 {
1137 int power_well_id = power_well->id;
1138 bool enabled = false;
1139 u32 mask;
1140 u32 state;
1141 u32 ctrl;
1142
1143 mask = PUNIT_PWRGT_MASK(power_well_id);
1144 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1145
1146 mutex_lock(&dev_priv->rps.hw_lock);
1147
1148 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1149 /*
1150 * We only ever set the power-on and power-gate states, anything
1151 * else is unexpected.
1152 */
1153 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1154 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1155 if (state == ctrl)
1156 enabled = true;
1157
1158 /*
1159 * A transient state at this point would mean some unexpected party
1160 * is poking at the power controls too.
1161 */
1162 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1163 WARN_ON(ctrl != state);
1164
1165 mutex_unlock(&dev_priv->rps.hw_lock);
1166
1167 return enabled;
1168 }
1169
1170 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1171 {
1172 u32 val;
1173
1174 /*
1175 * On driver load, a pipe may be active and driving a DSI display.
1176 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1177 * (and never recovering) in this case. intel_dsi_post_disable() will
1178 * clear it when we turn off the display.
1179 */
1180 val = I915_READ(DSPCLK_GATE_D);
1181 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1182 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1183 I915_WRITE(DSPCLK_GATE_D, val);
1184
1185 /*
1186 * Disable trickle feed and enable pnd deadline calculation
1187 */
1188 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1189 I915_WRITE(CBR1_VLV, 0);
1190
1191 WARN_ON(dev_priv->rawclk_freq == 0);
1192
1193 I915_WRITE(RAWCLK_FREQ_VLV,
1194 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1195 }
1196
1197 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1198 {
1199 struct intel_encoder *encoder;
1200 enum pipe pipe;
1201
1202 /*
1203 * Enable the CRI clock source so we can get at the
1204 * display and the reference clock for VGA
1205 * hotplug / manual detection. Supposedly DSI also
1206 * needs the ref clock up and running.
1207 *
1208 * CHV DPLL B/C have some issues if VGA mode is enabled.
1209 */
1210 for_each_pipe(dev_priv, pipe) {
1211 u32 val = I915_READ(DPLL(pipe));
1212
1213 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1214 if (pipe != PIPE_A)
1215 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1216
1217 I915_WRITE(DPLL(pipe), val);
1218 }
1219
1220 vlv_init_display_clock_gating(dev_priv);
1221
1222 spin_lock_irq(&dev_priv->irq_lock);
1223 valleyview_enable_display_irqs(dev_priv);
1224 spin_unlock_irq(&dev_priv->irq_lock);
1225
1226 /*
1227 * During driver initialization/resume we can avoid restoring the
1228 * part of the HW/SW state that will be inited anyway explicitly.
1229 */
1230 if (dev_priv->power_domains.initializing)
1231 return;
1232
1233 intel_hpd_init(dev_priv);
1234
1235 /* Re-enable the ADPA, if we have one */
1236 for_each_intel_encoder(&dev_priv->drm, encoder) {
1237 if (encoder->type == INTEL_OUTPUT_ANALOG)
1238 intel_crt_reset(&encoder->base);
1239 }
1240
1241 i915_redisable_vga_power_on(dev_priv);
1242
1243 intel_pps_unlock_regs_wa(dev_priv);
1244 }
1245
1246 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1247 {
1248 spin_lock_irq(&dev_priv->irq_lock);
1249 valleyview_disable_display_irqs(dev_priv);
1250 spin_unlock_irq(&dev_priv->irq_lock);
1251
1252 /* make sure we're done processing display irqs */
1253 synchronize_irq(dev_priv->drm.irq);
1254
1255 intel_power_sequencer_reset(dev_priv);
1256
1257 /* Prevent us from re-enabling polling on accident in late suspend */
1258 if (!dev_priv->drm.dev->power.is_suspended)
1259 intel_hpd_poll_init(dev_priv);
1260 }
1261
1262 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1263 struct i915_power_well *power_well)
1264 {
1265 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1266
1267 vlv_set_power_well(dev_priv, power_well, true);
1268
1269 vlv_display_power_well_init(dev_priv);
1270 }
1271
1272 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1273 struct i915_power_well *power_well)
1274 {
1275 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1276
1277 vlv_display_power_well_deinit(dev_priv);
1278
1279 vlv_set_power_well(dev_priv, power_well, false);
1280 }
1281
1282 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1283 struct i915_power_well *power_well)
1284 {
1285 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1286
1287 /* since ref/cri clock was enabled */
1288 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1289
1290 vlv_set_power_well(dev_priv, power_well, true);
1291
1292 /*
1293 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1294 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1295 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1296 * b. The other bits such as sfr settings / modesel may all
1297 * be set to 0.
1298 *
1299 * This should only be done on init and resume from S3 with
1300 * both PLLs disabled, or we risk losing DPIO and PLL
1301 * synchronization.
1302 */
1303 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1304 }
1305
1306 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1307 struct i915_power_well *power_well)
1308 {
1309 enum pipe pipe;
1310
1311 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1312
1313 for_each_pipe(dev_priv, pipe)
1314 assert_pll_disabled(dev_priv, pipe);
1315
1316 /* Assert common reset */
1317 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1318
1319 vlv_set_power_well(dev_priv, power_well, false);
1320 }
1321
1322 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1323
1324 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1325 int power_well_id)
1326 {
1327 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1328 int i;
1329
1330 for (i = 0; i < power_domains->power_well_count; i++) {
1331 struct i915_power_well *power_well;
1332
1333 power_well = &power_domains->power_wells[i];
1334 if (power_well->id == power_well_id)
1335 return power_well;
1336 }
1337
1338 return NULL;
1339 }
1340
1341 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1342
1343 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1344 {
1345 struct i915_power_well *cmn_bc =
1346 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1347 struct i915_power_well *cmn_d =
1348 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1349 u32 phy_control = dev_priv->chv_phy_control;
1350 u32 phy_status = 0;
1351 u32 phy_status_mask = 0xffffffff;
1352
1353 /*
1354 * The BIOS can leave the PHY is some weird state
1355 * where it doesn't fully power down some parts.
1356 * Disable the asserts until the PHY has been fully
1357 * reset (ie. the power well has been disabled at
1358 * least once).
1359 */
1360 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1361 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1362 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1363 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1364 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1365 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1366 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1367
1368 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1369 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1370 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1371 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1372
1373 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1374 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1375
1376 /* this assumes override is only used to enable lanes */
1377 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1378 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1379
1380 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1381 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1382
1383 /* CL1 is on whenever anything is on in either channel */
1384 if (BITS_SET(phy_control,
1385 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1386 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1387 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1388
1389 /*
1390 * The DPLLB check accounts for the pipe B + port A usage
1391 * with CL2 powered up but all the lanes in the second channel
1392 * powered down.
1393 */
1394 if (BITS_SET(phy_control,
1395 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1396 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1397 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1398
1399 if (BITS_SET(phy_control,
1400 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1401 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1402 if (BITS_SET(phy_control,
1403 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1404 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1405
1406 if (BITS_SET(phy_control,
1407 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1408 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1409 if (BITS_SET(phy_control,
1410 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1411 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1412 }
1413
1414 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1415 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1416
1417 /* this assumes override is only used to enable lanes */
1418 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1419 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1420
1421 if (BITS_SET(phy_control,
1422 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1423 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1424
1425 if (BITS_SET(phy_control,
1426 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1427 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1428 if (BITS_SET(phy_control,
1429 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1430 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1431 }
1432
1433 phy_status &= phy_status_mask;
1434
1435 /*
1436 * The PHY may be busy with some initial calibration and whatnot,
1437 * so the power state can take a while to actually change.
1438 */
1439 if (intel_wait_for_register(dev_priv,
1440 DISPLAY_PHY_STATUS,
1441 phy_status_mask,
1442 phy_status,
1443 10))
1444 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1445 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1446 phy_status, dev_priv->chv_phy_control);
1447 }
1448
1449 #undef BITS_SET
1450
1451 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1452 struct i915_power_well *power_well)
1453 {
1454 enum dpio_phy phy;
1455 enum pipe pipe;
1456 uint32_t tmp;
1457
1458 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1459 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1460
1461 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1462 pipe = PIPE_A;
1463 phy = DPIO_PHY0;
1464 } else {
1465 pipe = PIPE_C;
1466 phy = DPIO_PHY1;
1467 }
1468
1469 /* since ref/cri clock was enabled */
1470 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1471 vlv_set_power_well(dev_priv, power_well, true);
1472
1473 /* Poll for phypwrgood signal */
1474 if (intel_wait_for_register(dev_priv,
1475 DISPLAY_PHY_STATUS,
1476 PHY_POWERGOOD(phy),
1477 PHY_POWERGOOD(phy),
1478 1))
1479 DRM_ERROR("Display PHY %d is not power up\n", phy);
1480
1481 mutex_lock(&dev_priv->sb_lock);
1482
1483 /* Enable dynamic power down */
1484 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1485 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1486 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1487 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1488
1489 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1490 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1491 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1492 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1493 } else {
1494 /*
1495 * Force the non-existing CL2 off. BXT does this
1496 * too, so maybe it saves some power even though
1497 * CL2 doesn't exist?
1498 */
1499 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1500 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1501 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1502 }
1503
1504 mutex_unlock(&dev_priv->sb_lock);
1505
1506 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1507 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1508
1509 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1510 phy, dev_priv->chv_phy_control);
1511
1512 assert_chv_phy_status(dev_priv);
1513 }
1514
1515 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1516 struct i915_power_well *power_well)
1517 {
1518 enum dpio_phy phy;
1519
1520 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1521 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1522
1523 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1524 phy = DPIO_PHY0;
1525 assert_pll_disabled(dev_priv, PIPE_A);
1526 assert_pll_disabled(dev_priv, PIPE_B);
1527 } else {
1528 phy = DPIO_PHY1;
1529 assert_pll_disabled(dev_priv, PIPE_C);
1530 }
1531
1532 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1533 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1534
1535 vlv_set_power_well(dev_priv, power_well, false);
1536
1537 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1538 phy, dev_priv->chv_phy_control);
1539
1540 /* PHY is fully reset now, so we can enable the PHY state asserts */
1541 dev_priv->chv_phy_assert[phy] = true;
1542
1543 assert_chv_phy_status(dev_priv);
1544 }
1545
1546 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1547 enum dpio_channel ch, bool override, unsigned int mask)
1548 {
1549 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1550 u32 reg, val, expected, actual;
1551
1552 /*
1553 * The BIOS can leave the PHY is some weird state
1554 * where it doesn't fully power down some parts.
1555 * Disable the asserts until the PHY has been fully
1556 * reset (ie. the power well has been disabled at
1557 * least once).
1558 */
1559 if (!dev_priv->chv_phy_assert[phy])
1560 return;
1561
1562 if (ch == DPIO_CH0)
1563 reg = _CHV_CMN_DW0_CH0;
1564 else
1565 reg = _CHV_CMN_DW6_CH1;
1566
1567 mutex_lock(&dev_priv->sb_lock);
1568 val = vlv_dpio_read(dev_priv, pipe, reg);
1569 mutex_unlock(&dev_priv->sb_lock);
1570
1571 /*
1572 * This assumes !override is only used when the port is disabled.
1573 * All lanes should power down even without the override when
1574 * the port is disabled.
1575 */
1576 if (!override || mask == 0xf) {
1577 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1578 /*
1579 * If CH1 common lane is not active anymore
1580 * (eg. for pipe B DPLL) the entire channel will
1581 * shut down, which causes the common lane registers
1582 * to read as 0. That means we can't actually check
1583 * the lane power down status bits, but as the entire
1584 * register reads as 0 it's a good indication that the
1585 * channel is indeed entirely powered down.
1586 */
1587 if (ch == DPIO_CH1 && val == 0)
1588 expected = 0;
1589 } else if (mask != 0x0) {
1590 expected = DPIO_ANYDL_POWERDOWN;
1591 } else {
1592 expected = 0;
1593 }
1594
1595 if (ch == DPIO_CH0)
1596 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1597 else
1598 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1599 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1600
1601 WARN(actual != expected,
1602 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1603 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1604 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1605 reg, val);
1606 }
1607
1608 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1609 enum dpio_channel ch, bool override)
1610 {
1611 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1612 bool was_override;
1613
1614 mutex_lock(&power_domains->lock);
1615
1616 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1617
1618 if (override == was_override)
1619 goto out;
1620
1621 if (override)
1622 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1623 else
1624 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1625
1626 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1627
1628 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1629 phy, ch, dev_priv->chv_phy_control);
1630
1631 assert_chv_phy_status(dev_priv);
1632
1633 out:
1634 mutex_unlock(&power_domains->lock);
1635
1636 return was_override;
1637 }
1638
1639 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1640 bool override, unsigned int mask)
1641 {
1642 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1643 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1644 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1645 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1646
1647 mutex_lock(&power_domains->lock);
1648
1649 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1650 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1651
1652 if (override)
1653 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1654 else
1655 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1656
1657 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1658
1659 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1660 phy, ch, mask, dev_priv->chv_phy_control);
1661
1662 assert_chv_phy_status(dev_priv);
1663
1664 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1665
1666 mutex_unlock(&power_domains->lock);
1667 }
1668
1669 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1670 struct i915_power_well *power_well)
1671 {
1672 enum pipe pipe = power_well->id;
1673 bool enabled;
1674 u32 state, ctrl;
1675
1676 mutex_lock(&dev_priv->rps.hw_lock);
1677
1678 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1679 /*
1680 * We only ever set the power-on and power-gate states, anything
1681 * else is unexpected.
1682 */
1683 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1684 enabled = state == DP_SSS_PWR_ON(pipe);
1685
1686 /*
1687 * A transient state at this point would mean some unexpected party
1688 * is poking at the power controls too.
1689 */
1690 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1691 WARN_ON(ctrl << 16 != state);
1692
1693 mutex_unlock(&dev_priv->rps.hw_lock);
1694
1695 return enabled;
1696 }
1697
1698 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1699 struct i915_power_well *power_well,
1700 bool enable)
1701 {
1702 enum pipe pipe = power_well->id;
1703 u32 state;
1704 u32 ctrl;
1705
1706 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1707
1708 mutex_lock(&dev_priv->rps.hw_lock);
1709
1710 #define COND \
1711 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1712
1713 if (COND)
1714 goto out;
1715
1716 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1717 ctrl &= ~DP_SSC_MASK(pipe);
1718 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1719 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1720
1721 if (wait_for(COND, 100))
1722 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1723 state,
1724 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1725
1726 #undef COND
1727
1728 out:
1729 mutex_unlock(&dev_priv->rps.hw_lock);
1730 }
1731
1732 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1733 struct i915_power_well *power_well)
1734 {
1735 WARN_ON_ONCE(power_well->id != PIPE_A);
1736
1737 chv_set_pipe_power_well(dev_priv, power_well, true);
1738
1739 vlv_display_power_well_init(dev_priv);
1740 }
1741
1742 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1743 struct i915_power_well *power_well)
1744 {
1745 WARN_ON_ONCE(power_well->id != PIPE_A);
1746
1747 vlv_display_power_well_deinit(dev_priv);
1748
1749 chv_set_pipe_power_well(dev_priv, power_well, false);
1750 }
1751
1752 static void
1753 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1754 enum intel_display_power_domain domain)
1755 {
1756 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1757 struct i915_power_well *power_well;
1758
1759 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1760 intel_power_well_get(dev_priv, power_well);
1761
1762 power_domains->domain_use_count[domain]++;
1763 }
1764
1765 /**
1766 * intel_display_power_get - grab a power domain reference
1767 * @dev_priv: i915 device instance
1768 * @domain: power domain to reference
1769 *
1770 * This function grabs a power domain reference for @domain and ensures that the
1771 * power domain and all its parents are powered up. Therefore users should only
1772 * grab a reference to the innermost power domain they need.
1773 *
1774 * Any power domain reference obtained by this function must have a symmetric
1775 * call to intel_display_power_put() to release the reference again.
1776 */
1777 void intel_display_power_get(struct drm_i915_private *dev_priv,
1778 enum intel_display_power_domain domain)
1779 {
1780 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1781
1782 intel_runtime_pm_get(dev_priv);
1783
1784 mutex_lock(&power_domains->lock);
1785
1786 __intel_display_power_get_domain(dev_priv, domain);
1787
1788 mutex_unlock(&power_domains->lock);
1789 }
1790
1791 /**
1792 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1793 * @dev_priv: i915 device instance
1794 * @domain: power domain to reference
1795 *
1796 * This function grabs a power domain reference for @domain and ensures that the
1797 * power domain and all its parents are powered up. Therefore users should only
1798 * grab a reference to the innermost power domain they need.
1799 *
1800 * Any power domain reference obtained by this function must have a symmetric
1801 * call to intel_display_power_put() to release the reference again.
1802 */
1803 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1804 enum intel_display_power_domain domain)
1805 {
1806 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1807 bool is_enabled;
1808
1809 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1810 return false;
1811
1812 mutex_lock(&power_domains->lock);
1813
1814 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1815 __intel_display_power_get_domain(dev_priv, domain);
1816 is_enabled = true;
1817 } else {
1818 is_enabled = false;
1819 }
1820
1821 mutex_unlock(&power_domains->lock);
1822
1823 if (!is_enabled)
1824 intel_runtime_pm_put(dev_priv);
1825
1826 return is_enabled;
1827 }
1828
1829 /**
1830 * intel_display_power_put - release a power domain reference
1831 * @dev_priv: i915 device instance
1832 * @domain: power domain to reference
1833 *
1834 * This function drops the power domain reference obtained by
1835 * intel_display_power_get() and might power down the corresponding hardware
1836 * block right away if this is the last reference.
1837 */
1838 void intel_display_power_put(struct drm_i915_private *dev_priv,
1839 enum intel_display_power_domain domain)
1840 {
1841 struct i915_power_domains *power_domains;
1842 struct i915_power_well *power_well;
1843
1844 power_domains = &dev_priv->power_domains;
1845
1846 mutex_lock(&power_domains->lock);
1847
1848 WARN(!power_domains->domain_use_count[domain],
1849 "Use count on domain %s is already zero\n",
1850 intel_display_power_domain_str(domain));
1851 power_domains->domain_use_count[domain]--;
1852
1853 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1854 intel_power_well_put(dev_priv, power_well);
1855
1856 mutex_unlock(&power_domains->lock);
1857
1858 intel_runtime_pm_put(dev_priv);
1859 }
1860
1861 #define HSW_DISPLAY_POWER_DOMAINS ( \
1862 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1863 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1864 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1865 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1866 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1867 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1868 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1869 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1870 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1871 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1872 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1873 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1874 BIT_ULL(POWER_DOMAIN_VGA) | \
1875 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1876 BIT_ULL(POWER_DOMAIN_INIT))
1877
1878 #define BDW_DISPLAY_POWER_DOMAINS ( \
1879 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1880 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1881 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1882 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1883 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1884 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1885 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1886 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1887 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1888 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1889 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1890 BIT_ULL(POWER_DOMAIN_VGA) | \
1891 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1892 BIT_ULL(POWER_DOMAIN_INIT))
1893
1894 #define VLV_DISPLAY_POWER_DOMAINS ( \
1895 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1896 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1897 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1898 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1899 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1900 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1901 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1902 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1903 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1904 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1905 BIT_ULL(POWER_DOMAIN_VGA) | \
1906 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1907 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1908 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1909 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1910 BIT_ULL(POWER_DOMAIN_INIT))
1911
1912 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
1913 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1914 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1915 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1916 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1917 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1918 BIT_ULL(POWER_DOMAIN_INIT))
1919
1920 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
1921 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1922 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1923 BIT_ULL(POWER_DOMAIN_INIT))
1924
1925 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
1926 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1927 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1928 BIT_ULL(POWER_DOMAIN_INIT))
1929
1930 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
1931 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1932 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1933 BIT_ULL(POWER_DOMAIN_INIT))
1934
1935 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
1936 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1937 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1938 BIT_ULL(POWER_DOMAIN_INIT))
1939
1940 #define CHV_DISPLAY_POWER_DOMAINS ( \
1941 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1942 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1943 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1944 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1945 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1946 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1947 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1948 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1949 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1950 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1951 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1952 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1953 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1954 BIT_ULL(POWER_DOMAIN_VGA) | \
1955 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1956 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1957 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1958 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1959 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1960 BIT_ULL(POWER_DOMAIN_INIT))
1961
1962 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
1963 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1964 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1965 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1966 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1967 BIT_ULL(POWER_DOMAIN_INIT))
1968
1969 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
1970 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1971 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1972 BIT_ULL(POWER_DOMAIN_INIT))
1973
1974 #define I830_PIPES_POWER_DOMAINS ( \
1975 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1976 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1977 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1978 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1979 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1980 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1981 BIT_ULL(POWER_DOMAIN_INIT))
1982
1983 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1984 .sync_hw = i9xx_power_well_sync_hw_noop,
1985 .enable = i9xx_always_on_power_well_noop,
1986 .disable = i9xx_always_on_power_well_noop,
1987 .is_enabled = i9xx_always_on_power_well_enabled,
1988 };
1989
1990 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1991 .sync_hw = i9xx_power_well_sync_hw_noop,
1992 .enable = chv_pipe_power_well_enable,
1993 .disable = chv_pipe_power_well_disable,
1994 .is_enabled = chv_pipe_power_well_enabled,
1995 };
1996
1997 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1998 .sync_hw = i9xx_power_well_sync_hw_noop,
1999 .enable = chv_dpio_cmn_power_well_enable,
2000 .disable = chv_dpio_cmn_power_well_disable,
2001 .is_enabled = vlv_power_well_enabled,
2002 };
2003
2004 static struct i915_power_well i9xx_always_on_power_well[] = {
2005 {
2006 .name = "always-on",
2007 .always_on = 1,
2008 .domains = POWER_DOMAIN_MASK,
2009 .ops = &i9xx_always_on_power_well_ops,
2010 },
2011 };
2012
2013 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2014 .sync_hw = i830_pipes_power_well_sync_hw,
2015 .enable = i830_pipes_power_well_enable,
2016 .disable = i830_pipes_power_well_disable,
2017 .is_enabled = i830_pipes_power_well_enabled,
2018 };
2019
2020 static struct i915_power_well i830_power_wells[] = {
2021 {
2022 .name = "always-on",
2023 .always_on = 1,
2024 .domains = POWER_DOMAIN_MASK,
2025 .ops = &i9xx_always_on_power_well_ops,
2026 },
2027 {
2028 .name = "pipes",
2029 .domains = I830_PIPES_POWER_DOMAINS,
2030 .ops = &i830_pipes_power_well_ops,
2031 },
2032 };
2033
2034 static const struct i915_power_well_ops hsw_power_well_ops = {
2035 .sync_hw = hsw_power_well_sync_hw,
2036 .enable = hsw_power_well_enable,
2037 .disable = hsw_power_well_disable,
2038 .is_enabled = hsw_power_well_enabled,
2039 };
2040
2041 static const struct i915_power_well_ops skl_power_well_ops = {
2042 .sync_hw = skl_power_well_sync_hw,
2043 .enable = skl_power_well_enable,
2044 .disable = skl_power_well_disable,
2045 .is_enabled = skl_power_well_enabled,
2046 };
2047
2048 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2049 .sync_hw = i9xx_power_well_sync_hw_noop,
2050 .enable = gen9_dc_off_power_well_enable,
2051 .disable = gen9_dc_off_power_well_disable,
2052 .is_enabled = gen9_dc_off_power_well_enabled,
2053 };
2054
2055 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2056 .sync_hw = i9xx_power_well_sync_hw_noop,
2057 .enable = bxt_dpio_cmn_power_well_enable,
2058 .disable = bxt_dpio_cmn_power_well_disable,
2059 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2060 };
2061
2062 static struct i915_power_well hsw_power_wells[] = {
2063 {
2064 .name = "always-on",
2065 .always_on = 1,
2066 .domains = POWER_DOMAIN_MASK,
2067 .ops = &i9xx_always_on_power_well_ops,
2068 },
2069 {
2070 .name = "display",
2071 .domains = HSW_DISPLAY_POWER_DOMAINS,
2072 .ops = &hsw_power_well_ops,
2073 },
2074 };
2075
2076 static struct i915_power_well bdw_power_wells[] = {
2077 {
2078 .name = "always-on",
2079 .always_on = 1,
2080 .domains = POWER_DOMAIN_MASK,
2081 .ops = &i9xx_always_on_power_well_ops,
2082 },
2083 {
2084 .name = "display",
2085 .domains = BDW_DISPLAY_POWER_DOMAINS,
2086 .ops = &hsw_power_well_ops,
2087 },
2088 };
2089
2090 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2091 .sync_hw = i9xx_power_well_sync_hw_noop,
2092 .enable = vlv_display_power_well_enable,
2093 .disable = vlv_display_power_well_disable,
2094 .is_enabled = vlv_power_well_enabled,
2095 };
2096
2097 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2098 .sync_hw = i9xx_power_well_sync_hw_noop,
2099 .enable = vlv_dpio_cmn_power_well_enable,
2100 .disable = vlv_dpio_cmn_power_well_disable,
2101 .is_enabled = vlv_power_well_enabled,
2102 };
2103
2104 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2105 .sync_hw = i9xx_power_well_sync_hw_noop,
2106 .enable = vlv_power_well_enable,
2107 .disable = vlv_power_well_disable,
2108 .is_enabled = vlv_power_well_enabled,
2109 };
2110
2111 static struct i915_power_well vlv_power_wells[] = {
2112 {
2113 .name = "always-on",
2114 .always_on = 1,
2115 .domains = POWER_DOMAIN_MASK,
2116 .ops = &i9xx_always_on_power_well_ops,
2117 .id = PUNIT_POWER_WELL_ALWAYS_ON,
2118 },
2119 {
2120 .name = "display",
2121 .domains = VLV_DISPLAY_POWER_DOMAINS,
2122 .id = PUNIT_POWER_WELL_DISP2D,
2123 .ops = &vlv_display_power_well_ops,
2124 },
2125 {
2126 .name = "dpio-tx-b-01",
2127 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2128 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2129 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2130 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2131 .ops = &vlv_dpio_power_well_ops,
2132 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2133 },
2134 {
2135 .name = "dpio-tx-b-23",
2136 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2137 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2138 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2139 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2140 .ops = &vlv_dpio_power_well_ops,
2141 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2142 },
2143 {
2144 .name = "dpio-tx-c-01",
2145 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2146 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2147 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2148 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2149 .ops = &vlv_dpio_power_well_ops,
2150 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2151 },
2152 {
2153 .name = "dpio-tx-c-23",
2154 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2155 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2156 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2157 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2158 .ops = &vlv_dpio_power_well_ops,
2159 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2160 },
2161 {
2162 .name = "dpio-common",
2163 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2164 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2165 .ops = &vlv_dpio_cmn_power_well_ops,
2166 },
2167 };
2168
2169 static struct i915_power_well chv_power_wells[] = {
2170 {
2171 .name = "always-on",
2172 .always_on = 1,
2173 .domains = POWER_DOMAIN_MASK,
2174 .ops = &i9xx_always_on_power_well_ops,
2175 },
2176 {
2177 .name = "display",
2178 /*
2179 * Pipe A power well is the new disp2d well. Pipe B and C
2180 * power wells don't actually exist. Pipe A power well is
2181 * required for any pipe to work.
2182 */
2183 .domains = CHV_DISPLAY_POWER_DOMAINS,
2184 .id = PIPE_A,
2185 .ops = &chv_pipe_power_well_ops,
2186 },
2187 {
2188 .name = "dpio-common-bc",
2189 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2190 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2191 .ops = &chv_dpio_cmn_power_well_ops,
2192 },
2193 {
2194 .name = "dpio-common-d",
2195 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2196 .id = PUNIT_POWER_WELL_DPIO_CMN_D,
2197 .ops = &chv_dpio_cmn_power_well_ops,
2198 },
2199 };
2200
2201 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2202 int power_well_id)
2203 {
2204 struct i915_power_well *power_well;
2205 bool ret;
2206
2207 power_well = lookup_power_well(dev_priv, power_well_id);
2208 ret = power_well->ops->is_enabled(dev_priv, power_well);
2209
2210 return ret;
2211 }
2212
2213 static struct i915_power_well skl_power_wells[] = {
2214 {
2215 .name = "always-on",
2216 .always_on = 1,
2217 .domains = POWER_DOMAIN_MASK,
2218 .ops = &i9xx_always_on_power_well_ops,
2219 .id = SKL_DISP_PW_ALWAYS_ON,
2220 },
2221 {
2222 .name = "power well 1",
2223 /* Handled by the DMC firmware */
2224 .domains = 0,
2225 .ops = &skl_power_well_ops,
2226 .id = SKL_DISP_PW_1,
2227 },
2228 {
2229 .name = "MISC IO power well",
2230 /* Handled by the DMC firmware */
2231 .domains = 0,
2232 .ops = &skl_power_well_ops,
2233 .id = SKL_DISP_PW_MISC_IO,
2234 },
2235 {
2236 .name = "DC off",
2237 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2238 .ops = &gen9_dc_off_power_well_ops,
2239 .id = SKL_DISP_PW_DC_OFF,
2240 },
2241 {
2242 .name = "power well 2",
2243 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2244 .ops = &skl_power_well_ops,
2245 .id = SKL_DISP_PW_2,
2246 },
2247 {
2248 .name = "DDI A/E IO power well",
2249 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2250 .ops = &skl_power_well_ops,
2251 .id = SKL_DISP_PW_DDI_A_E,
2252 },
2253 {
2254 .name = "DDI B IO power well",
2255 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2256 .ops = &skl_power_well_ops,
2257 .id = SKL_DISP_PW_DDI_B,
2258 },
2259 {
2260 .name = "DDI C IO power well",
2261 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2262 .ops = &skl_power_well_ops,
2263 .id = SKL_DISP_PW_DDI_C,
2264 },
2265 {
2266 .name = "DDI D IO power well",
2267 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2268 .ops = &skl_power_well_ops,
2269 .id = SKL_DISP_PW_DDI_D,
2270 },
2271 };
2272
2273 static struct i915_power_well bxt_power_wells[] = {
2274 {
2275 .name = "always-on",
2276 .always_on = 1,
2277 .domains = POWER_DOMAIN_MASK,
2278 .ops = &i9xx_always_on_power_well_ops,
2279 },
2280 {
2281 .name = "power well 1",
2282 .domains = 0,
2283 .ops = &skl_power_well_ops,
2284 .id = SKL_DISP_PW_1,
2285 },
2286 {
2287 .name = "DC off",
2288 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2289 .ops = &gen9_dc_off_power_well_ops,
2290 .id = SKL_DISP_PW_DC_OFF,
2291 },
2292 {
2293 .name = "power well 2",
2294 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2295 .ops = &skl_power_well_ops,
2296 .id = SKL_DISP_PW_2,
2297 },
2298 {
2299 .name = "dpio-common-a",
2300 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2301 .ops = &bxt_dpio_cmn_power_well_ops,
2302 .id = BXT_DPIO_CMN_A,
2303 .data = DPIO_PHY1,
2304 },
2305 {
2306 .name = "dpio-common-bc",
2307 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2308 .ops = &bxt_dpio_cmn_power_well_ops,
2309 .id = BXT_DPIO_CMN_BC,
2310 .data = DPIO_PHY0,
2311 },
2312 };
2313
2314 static struct i915_power_well glk_power_wells[] = {
2315 {
2316 .name = "always-on",
2317 .always_on = 1,
2318 .domains = POWER_DOMAIN_MASK,
2319 .ops = &i9xx_always_on_power_well_ops,
2320 },
2321 {
2322 .name = "power well 1",
2323 /* Handled by the DMC firmware */
2324 .domains = 0,
2325 .ops = &skl_power_well_ops,
2326 .id = SKL_DISP_PW_1,
2327 },
2328 {
2329 .name = "DC off",
2330 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2331 .ops = &gen9_dc_off_power_well_ops,
2332 .id = SKL_DISP_PW_DC_OFF,
2333 },
2334 {
2335 .name = "power well 2",
2336 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2337 .ops = &skl_power_well_ops,
2338 .id = SKL_DISP_PW_2,
2339 },
2340 {
2341 .name = "dpio-common-a",
2342 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2343 .ops = &bxt_dpio_cmn_power_well_ops,
2344 .id = BXT_DPIO_CMN_A,
2345 .data = DPIO_PHY1,
2346 },
2347 {
2348 .name = "dpio-common-b",
2349 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2350 .ops = &bxt_dpio_cmn_power_well_ops,
2351 .id = BXT_DPIO_CMN_BC,
2352 .data = DPIO_PHY0,
2353 },
2354 {
2355 .name = "dpio-common-c",
2356 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2357 .ops = &bxt_dpio_cmn_power_well_ops,
2358 .id = GLK_DPIO_CMN_C,
2359 .data = DPIO_PHY2,
2360 },
2361 {
2362 .name = "AUX A",
2363 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2364 .ops = &skl_power_well_ops,
2365 .id = GLK_DISP_PW_AUX_A,
2366 },
2367 {
2368 .name = "AUX B",
2369 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2370 .ops = &skl_power_well_ops,
2371 .id = GLK_DISP_PW_AUX_B,
2372 },
2373 {
2374 .name = "AUX C",
2375 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2376 .ops = &skl_power_well_ops,
2377 .id = GLK_DISP_PW_AUX_C,
2378 },
2379 {
2380 .name = "DDI A IO power well",
2381 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2382 .ops = &skl_power_well_ops,
2383 .id = GLK_DISP_PW_DDI_A,
2384 },
2385 {
2386 .name = "DDI B IO power well",
2387 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2388 .ops = &skl_power_well_ops,
2389 .id = SKL_DISP_PW_DDI_B,
2390 },
2391 {
2392 .name = "DDI C IO power well",
2393 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2394 .ops = &skl_power_well_ops,
2395 .id = SKL_DISP_PW_DDI_C,
2396 },
2397 };
2398
2399 static struct i915_power_well cnl_power_wells[] = {
2400 {
2401 .name = "always-on",
2402 .always_on = 1,
2403 .domains = POWER_DOMAIN_MASK,
2404 .ops = &i9xx_always_on_power_well_ops,
2405 },
2406 {
2407 .name = "power well 1",
2408 /* Handled by the DMC firmware */
2409 .domains = 0,
2410 .ops = &skl_power_well_ops,
2411 .id = SKL_DISP_PW_1,
2412 },
2413 {
2414 .name = "AUX A",
2415 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2416 .ops = &skl_power_well_ops,
2417 .id = CNL_DISP_PW_AUX_A,
2418 },
2419 {
2420 .name = "AUX B",
2421 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2422 .ops = &skl_power_well_ops,
2423 .id = CNL_DISP_PW_AUX_B,
2424 },
2425 {
2426 .name = "AUX C",
2427 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2428 .ops = &skl_power_well_ops,
2429 .id = CNL_DISP_PW_AUX_C,
2430 },
2431 {
2432 .name = "AUX D",
2433 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2434 .ops = &skl_power_well_ops,
2435 .id = CNL_DISP_PW_AUX_D,
2436 },
2437 {
2438 .name = "DC off",
2439 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2440 .ops = &gen9_dc_off_power_well_ops,
2441 .id = SKL_DISP_PW_DC_OFF,
2442 },
2443 {
2444 .name = "power well 2",
2445 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2446 .ops = &skl_power_well_ops,
2447 .id = SKL_DISP_PW_2,
2448 },
2449 {
2450 .name = "DDI A IO power well",
2451 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2452 .ops = &skl_power_well_ops,
2453 .id = CNL_DISP_PW_DDI_A,
2454 },
2455 {
2456 .name = "DDI B IO power well",
2457 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2458 .ops = &skl_power_well_ops,
2459 .id = SKL_DISP_PW_DDI_B,
2460 },
2461 {
2462 .name = "DDI C IO power well",
2463 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2464 .ops = &skl_power_well_ops,
2465 .id = SKL_DISP_PW_DDI_C,
2466 },
2467 {
2468 .name = "DDI D IO power well",
2469 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2470 .ops = &skl_power_well_ops,
2471 .id = SKL_DISP_PW_DDI_D,
2472 },
2473 };
2474
2475 static int
2476 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2477 int disable_power_well)
2478 {
2479 if (disable_power_well >= 0)
2480 return !!disable_power_well;
2481
2482 return 1;
2483 }
2484
2485 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2486 int enable_dc)
2487 {
2488 uint32_t mask;
2489 int requested_dc;
2490 int max_dc;
2491
2492 if (IS_GEN9_BC(dev_priv)) {
2493 max_dc = 2;
2494 mask = 0;
2495 } else if (IS_GEN9_LP(dev_priv)) {
2496 max_dc = 1;
2497 /*
2498 * DC9 has a separate HW flow from the rest of the DC states,
2499 * not depending on the DMC firmware. It's needed by system
2500 * suspend/resume, so allow it unconditionally.
2501 */
2502 mask = DC_STATE_EN_DC9;
2503 } else {
2504 max_dc = 0;
2505 mask = 0;
2506 }
2507
2508 if (!i915.disable_power_well)
2509 max_dc = 0;
2510
2511 if (enable_dc >= 0 && enable_dc <= max_dc) {
2512 requested_dc = enable_dc;
2513 } else if (enable_dc == -1) {
2514 requested_dc = max_dc;
2515 } else if (enable_dc > max_dc && enable_dc <= 2) {
2516 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2517 enable_dc, max_dc);
2518 requested_dc = max_dc;
2519 } else {
2520 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2521 requested_dc = max_dc;
2522 }
2523
2524 if (requested_dc > 1)
2525 mask |= DC_STATE_EN_UPTO_DC6;
2526 if (requested_dc > 0)
2527 mask |= DC_STATE_EN_UPTO_DC5;
2528
2529 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2530
2531 return mask;
2532 }
2533
2534 #define set_power_wells(power_domains, __power_wells) ({ \
2535 (power_domains)->power_wells = (__power_wells); \
2536 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
2537 })
2538
2539 /**
2540 * intel_power_domains_init - initializes the power domain structures
2541 * @dev_priv: i915 device instance
2542 *
2543 * Initializes the power domain structures for @dev_priv depending upon the
2544 * supported platform.
2545 */
2546 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2547 {
2548 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2549
2550 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2551 i915.disable_power_well);
2552 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2553 i915.enable_dc);
2554
2555 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2556
2557 mutex_init(&power_domains->lock);
2558
2559 /*
2560 * The enabling order will be from lower to higher indexed wells,
2561 * the disabling order is reversed.
2562 */
2563 if (IS_HASWELL(dev_priv)) {
2564 set_power_wells(power_domains, hsw_power_wells);
2565 } else if (IS_BROADWELL(dev_priv)) {
2566 set_power_wells(power_domains, bdw_power_wells);
2567 } else if (IS_GEN9_BC(dev_priv)) {
2568 set_power_wells(power_domains, skl_power_wells);
2569 } else if (IS_CANNONLAKE(dev_priv)) {
2570 set_power_wells(power_domains, cnl_power_wells);
2571 } else if (IS_BROXTON(dev_priv)) {
2572 set_power_wells(power_domains, bxt_power_wells);
2573 } else if (IS_GEMINILAKE(dev_priv)) {
2574 set_power_wells(power_domains, glk_power_wells);
2575 } else if (IS_CHERRYVIEW(dev_priv)) {
2576 set_power_wells(power_domains, chv_power_wells);
2577 } else if (IS_VALLEYVIEW(dev_priv)) {
2578 set_power_wells(power_domains, vlv_power_wells);
2579 } else if (IS_I830(dev_priv)) {
2580 set_power_wells(power_domains, i830_power_wells);
2581 } else {
2582 set_power_wells(power_domains, i9xx_always_on_power_well);
2583 }
2584
2585 return 0;
2586 }
2587
2588 /**
2589 * intel_power_domains_fini - finalizes the power domain structures
2590 * @dev_priv: i915 device instance
2591 *
2592 * Finalizes the power domain structures for @dev_priv depending upon the
2593 * supported platform. This function also disables runtime pm and ensures that
2594 * the device stays powered up so that the driver can be reloaded.
2595 */
2596 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2597 {
2598 struct device *kdev = &dev_priv->drm.pdev->dev;
2599
2600 /*
2601 * The i915.ko module is still not prepared to be loaded when
2602 * the power well is not enabled, so just enable it in case
2603 * we're going to unload/reload.
2604 * The following also reacquires the RPM reference the core passed
2605 * to the driver during loading, which is dropped in
2606 * intel_runtime_pm_enable(). We have to hand back the control of the
2607 * device to the core with this reference held.
2608 */
2609 intel_display_set_init_power(dev_priv, true);
2610
2611 /* Remove the refcount we took to keep power well support disabled. */
2612 if (!i915.disable_power_well)
2613 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2614
2615 /*
2616 * Remove the refcount we took in intel_runtime_pm_enable() in case
2617 * the platform doesn't support runtime PM.
2618 */
2619 if (!HAS_RUNTIME_PM(dev_priv))
2620 pm_runtime_put(kdev);
2621 }
2622
2623 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2624 {
2625 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2626 struct i915_power_well *power_well;
2627
2628 mutex_lock(&power_domains->lock);
2629 for_each_power_well(dev_priv, power_well) {
2630 power_well->ops->sync_hw(dev_priv, power_well);
2631 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2632 power_well);
2633 }
2634 mutex_unlock(&power_domains->lock);
2635 }
2636
2637 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2638 {
2639 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2640 POSTING_READ(DBUF_CTL);
2641
2642 udelay(10);
2643
2644 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2645 DRM_ERROR("DBuf power enable timeout\n");
2646 }
2647
2648 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2649 {
2650 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2651 POSTING_READ(DBUF_CTL);
2652
2653 udelay(10);
2654
2655 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2656 DRM_ERROR("DBuf power disable timeout!\n");
2657 }
2658
2659 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2660 bool resume)
2661 {
2662 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2663 struct i915_power_well *well;
2664 uint32_t val;
2665
2666 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2667
2668 /* enable PCH reset handshake */
2669 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2670 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2671
2672 /* enable PG1 and Misc I/O */
2673 mutex_lock(&power_domains->lock);
2674
2675 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2676 intel_power_well_enable(dev_priv, well);
2677
2678 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2679 intel_power_well_enable(dev_priv, well);
2680
2681 mutex_unlock(&power_domains->lock);
2682
2683 skl_init_cdclk(dev_priv);
2684
2685 gen9_dbuf_enable(dev_priv);
2686
2687 if (resume && dev_priv->csr.dmc_payload)
2688 intel_csr_load_program(dev_priv);
2689 }
2690
2691 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2692 {
2693 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2694 struct i915_power_well *well;
2695
2696 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2697
2698 gen9_dbuf_disable(dev_priv);
2699
2700 skl_uninit_cdclk(dev_priv);
2701
2702 /* The spec doesn't call for removing the reset handshake flag */
2703 /* disable PG1 and Misc I/O */
2704
2705 mutex_lock(&power_domains->lock);
2706
2707 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2708 intel_power_well_disable(dev_priv, well);
2709
2710 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2711 intel_power_well_disable(dev_priv, well);
2712
2713 mutex_unlock(&power_domains->lock);
2714 }
2715
2716 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2717 bool resume)
2718 {
2719 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2720 struct i915_power_well *well;
2721 uint32_t val;
2722
2723 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2724
2725 /*
2726 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2727 * or else the reset will hang because there is no PCH to respond.
2728 * Move the handshake programming to initialization sequence.
2729 * Previously was left up to BIOS.
2730 */
2731 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2732 val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2733 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2734
2735 /* Enable PG1 */
2736 mutex_lock(&power_domains->lock);
2737
2738 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2739 intel_power_well_enable(dev_priv, well);
2740
2741 mutex_unlock(&power_domains->lock);
2742
2743 bxt_init_cdclk(dev_priv);
2744
2745 gen9_dbuf_enable(dev_priv);
2746
2747 if (resume && dev_priv->csr.dmc_payload)
2748 intel_csr_load_program(dev_priv);
2749 }
2750
2751 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2752 {
2753 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2754 struct i915_power_well *well;
2755
2756 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2757
2758 gen9_dbuf_disable(dev_priv);
2759
2760 bxt_uninit_cdclk(dev_priv);
2761
2762 /* The spec doesn't call for removing the reset handshake flag */
2763
2764 /* Disable PG1 */
2765 mutex_lock(&power_domains->lock);
2766
2767 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2768 intel_power_well_disable(dev_priv, well);
2769
2770 mutex_unlock(&power_domains->lock);
2771 }
2772
2773 #define CNL_PROCMON_IDX(val) \
2774 (((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
2775 #define NUM_CNL_PROCMON \
2776 (CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
2777
2778 static const struct cnl_procmon {
2779 u32 dw1, dw9, dw10;
2780 } cnl_procmon_values[NUM_CNL_PROCMON] = {
2781 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
2782 { .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
2783 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
2784 { .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
2785 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
2786 { .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
2787 [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
2788 { .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
2789 [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
2790 { .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
2791 };
2792
2793 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
2794 {
2795 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2796 const struct cnl_procmon *procmon;
2797 struct i915_power_well *well;
2798 u32 val;
2799
2800 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2801
2802 /* 1. Enable PCH Reset Handshake */
2803 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2804 val |= RESET_PCH_HANDSHAKE_ENABLE;
2805 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2806
2807 /* 2. Enable Comp */
2808 val = I915_READ(CHICKEN_MISC_2);
2809 val &= ~COMP_PWR_DOWN;
2810 I915_WRITE(CHICKEN_MISC_2, val);
2811
2812 val = I915_READ(CNL_PORT_COMP_DW3);
2813 procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
2814
2815 WARN_ON(procmon->dw10 == 0);
2816
2817 val = I915_READ(CNL_PORT_COMP_DW1);
2818 val &= ~((0xff << 16) | 0xff);
2819 val |= procmon->dw1;
2820 I915_WRITE(CNL_PORT_COMP_DW1, val);
2821
2822 I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
2823 I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
2824
2825 val = I915_READ(CNL_PORT_COMP_DW0);
2826 val |= COMP_INIT;
2827 I915_WRITE(CNL_PORT_COMP_DW0, val);
2828
2829 /* 3. */
2830 val = I915_READ(CNL_PORT_CL1CM_DW5);
2831 val |= CL_POWER_DOWN_ENABLE;
2832 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
2833
2834 /* 4. Enable Power Well 1 (PG1) and Aux IO Power */
2835 mutex_lock(&power_domains->lock);
2836 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2837 intel_power_well_enable(dev_priv, well);
2838 mutex_unlock(&power_domains->lock);
2839
2840 /* 5. Enable CD clock */
2841 cnl_init_cdclk(dev_priv);
2842
2843 /* 6. Enable DBUF */
2844 gen9_dbuf_enable(dev_priv);
2845 }
2846
2847 #undef CNL_PROCMON_IDX
2848 #undef NUM_CNL_PROCMON
2849
2850 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2851 {
2852 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2853 struct i915_power_well *well;
2854 u32 val;
2855
2856 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2857
2858 /* 1. Disable all display engine functions -> aready done */
2859
2860 /* 2. Disable DBUF */
2861 gen9_dbuf_disable(dev_priv);
2862
2863 /* 3. Disable CD clock */
2864 cnl_uninit_cdclk(dev_priv);
2865
2866 /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
2867 mutex_lock(&power_domains->lock);
2868 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2869 intel_power_well_disable(dev_priv, well);
2870 mutex_unlock(&power_domains->lock);
2871
2872 /* 5. Disable Comp */
2873 val = I915_READ(CHICKEN_MISC_2);
2874 val |= COMP_PWR_DOWN;
2875 I915_WRITE(CHICKEN_MISC_2, val);
2876 }
2877
2878 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2879 {
2880 struct i915_power_well *cmn_bc =
2881 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2882 struct i915_power_well *cmn_d =
2883 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2884
2885 /*
2886 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2887 * workaround never ever read DISPLAY_PHY_CONTROL, and
2888 * instead maintain a shadow copy ourselves. Use the actual
2889 * power well state and lane status to reconstruct the
2890 * expected initial value.
2891 */
2892 dev_priv->chv_phy_control =
2893 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2894 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2895 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2896 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2897 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2898
2899 /*
2900 * If all lanes are disabled we leave the override disabled
2901 * with all power down bits cleared to match the state we
2902 * would use after disabling the port. Otherwise enable the
2903 * override and set the lane powerdown bits accding to the
2904 * current lane status.
2905 */
2906 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2907 uint32_t status = I915_READ(DPLL(PIPE_A));
2908 unsigned int mask;
2909
2910 mask = status & DPLL_PORTB_READY_MASK;
2911 if (mask == 0xf)
2912 mask = 0x0;
2913 else
2914 dev_priv->chv_phy_control |=
2915 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2916
2917 dev_priv->chv_phy_control |=
2918 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2919
2920 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2921 if (mask == 0xf)
2922 mask = 0x0;
2923 else
2924 dev_priv->chv_phy_control |=
2925 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2926
2927 dev_priv->chv_phy_control |=
2928 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2929
2930 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2931
2932 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2933 } else {
2934 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2935 }
2936
2937 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2938 uint32_t status = I915_READ(DPIO_PHY_STATUS);
2939 unsigned int mask;
2940
2941 mask = status & DPLL_PORTD_READY_MASK;
2942
2943 if (mask == 0xf)
2944 mask = 0x0;
2945 else
2946 dev_priv->chv_phy_control |=
2947 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2948
2949 dev_priv->chv_phy_control |=
2950 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2951
2952 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2953
2954 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2955 } else {
2956 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2957 }
2958
2959 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2960
2961 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2962 dev_priv->chv_phy_control);
2963 }
2964
2965 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2966 {
2967 struct i915_power_well *cmn =
2968 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2969 struct i915_power_well *disp2d =
2970 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2971
2972 /* If the display might be already active skip this */
2973 if (cmn->ops->is_enabled(dev_priv, cmn) &&
2974 disp2d->ops->is_enabled(dev_priv, disp2d) &&
2975 I915_READ(DPIO_CTL) & DPIO_CMNRST)
2976 return;
2977
2978 DRM_DEBUG_KMS("toggling display PHY side reset\n");
2979
2980 /* cmnlane needs DPLL registers */
2981 disp2d->ops->enable(dev_priv, disp2d);
2982
2983 /*
2984 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2985 * Need to assert and de-assert PHY SB reset by gating the
2986 * common lane power, then un-gating it.
2987 * Simply ungating isn't enough to reset the PHY enough to get
2988 * ports and lanes running.
2989 */
2990 cmn->ops->disable(dev_priv, cmn);
2991 }
2992
2993 /**
2994 * intel_power_domains_init_hw - initialize hardware power domain state
2995 * @dev_priv: i915 device instance
2996 * @resume: Called from resume code paths or not
2997 *
2998 * This function initializes the hardware power domain state and enables all
2999 * power wells belonging to the INIT power domain. Power wells in other
3000 * domains (and not in the INIT domain) are referenced or disabled during the
3001 * modeset state HW readout. After that the reference count of each power well
3002 * must match its HW enabled state, see intel_power_domains_verify_state().
3003 */
3004 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3005 {
3006 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3007
3008 power_domains->initializing = true;
3009
3010 if (IS_CANNONLAKE(dev_priv)) {
3011 cnl_display_core_init(dev_priv, resume);
3012 } else if (IS_GEN9_BC(dev_priv)) {
3013 skl_display_core_init(dev_priv, resume);
3014 } else if (IS_GEN9_LP(dev_priv)) {
3015 bxt_display_core_init(dev_priv, resume);
3016 } else if (IS_CHERRYVIEW(dev_priv)) {
3017 mutex_lock(&power_domains->lock);
3018 chv_phy_control_init(dev_priv);
3019 mutex_unlock(&power_domains->lock);
3020 } else if (IS_VALLEYVIEW(dev_priv)) {
3021 mutex_lock(&power_domains->lock);
3022 vlv_cmnlane_wa(dev_priv);
3023 mutex_unlock(&power_domains->lock);
3024 }
3025
3026 /* For now, we need the power well to be always enabled. */
3027 intel_display_set_init_power(dev_priv, true);
3028 /* Disable power support if the user asked so. */
3029 if (!i915.disable_power_well)
3030 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3031 intel_power_domains_sync_hw(dev_priv);
3032 power_domains->initializing = false;
3033 }
3034
3035 /**
3036 * intel_power_domains_suspend - suspend power domain state
3037 * @dev_priv: i915 device instance
3038 *
3039 * This function prepares the hardware power domain state before entering
3040 * system suspend. It must be paired with intel_power_domains_init_hw().
3041 */
3042 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3043 {
3044 /*
3045 * Even if power well support was disabled we still want to disable
3046 * power wells while we are system suspended.
3047 */
3048 if (!i915.disable_power_well)
3049 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3050
3051 if (IS_CANNONLAKE(dev_priv))
3052 cnl_display_core_uninit(dev_priv);
3053 else if (IS_GEN9_BC(dev_priv))
3054 skl_display_core_uninit(dev_priv);
3055 else if (IS_GEN9_LP(dev_priv))
3056 bxt_display_core_uninit(dev_priv);
3057 }
3058
3059 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3060 {
3061 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3062 struct i915_power_well *power_well;
3063
3064 for_each_power_well(dev_priv, power_well) {
3065 enum intel_display_power_domain domain;
3066
3067 DRM_DEBUG_DRIVER("%-25s %d\n",
3068 power_well->name, power_well->count);
3069
3070 for_each_power_domain(domain, power_well->domains)
3071 DRM_DEBUG_DRIVER(" %-23s %d\n",
3072 intel_display_power_domain_str(domain),
3073 power_domains->domain_use_count[domain]);
3074 }
3075 }
3076
3077 /**
3078 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3079 * @dev_priv: i915 device instance
3080 *
3081 * Verify if the reference count of each power well matches its HW enabled
3082 * state and the total refcount of the domains it belongs to. This must be
3083 * called after modeset HW state sanitization, which is responsible for
3084 * acquiring reference counts for any power wells in use and disabling the
3085 * ones left on by BIOS but not required by any active output.
3086 */
3087 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3088 {
3089 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3090 struct i915_power_well *power_well;
3091 bool dump_domain_info;
3092
3093 mutex_lock(&power_domains->lock);
3094
3095 dump_domain_info = false;
3096 for_each_power_well(dev_priv, power_well) {
3097 enum intel_display_power_domain domain;
3098 int domains_count;
3099 bool enabled;
3100
3101 /*
3102 * Power wells not belonging to any domain (like the MISC_IO
3103 * and PW1 power wells) are under FW control, so ignore them,
3104 * since their state can change asynchronously.
3105 */
3106 if (!power_well->domains)
3107 continue;
3108
3109 enabled = power_well->ops->is_enabled(dev_priv, power_well);
3110 if ((power_well->count || power_well->always_on) != enabled)
3111 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3112 power_well->name, power_well->count, enabled);
3113
3114 domains_count = 0;
3115 for_each_power_domain(domain, power_well->domains)
3116 domains_count += power_domains->domain_use_count[domain];
3117
3118 if (power_well->count != domains_count) {
3119 DRM_ERROR("power well %s refcount/domain refcount mismatch "
3120 "(refcount %d/domains refcount %d)\n",
3121 power_well->name, power_well->count,
3122 domains_count);
3123 dump_domain_info = true;
3124 }
3125 }
3126
3127 if (dump_domain_info) {
3128 static bool dumped;
3129
3130 if (!dumped) {
3131 intel_power_domains_dump_info(dev_priv);
3132 dumped = true;
3133 }
3134 }
3135
3136 mutex_unlock(&power_domains->lock);
3137 }
3138
3139 /**
3140 * intel_runtime_pm_get - grab a runtime pm reference
3141 * @dev_priv: i915 device instance
3142 *
3143 * This function grabs a device-level runtime pm reference (mostly used for GEM
3144 * code to ensure the GTT or GT is on) and ensures that it is powered up.
3145 *
3146 * Any runtime pm reference obtained by this function must have a symmetric
3147 * call to intel_runtime_pm_put() to release the reference again.
3148 */
3149 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3150 {
3151 struct pci_dev *pdev = dev_priv->drm.pdev;
3152 struct device *kdev = &pdev->dev;
3153 int ret;
3154
3155 ret = pm_runtime_get_sync(kdev);
3156 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3157
3158 atomic_inc(&dev_priv->pm.wakeref_count);
3159 assert_rpm_wakelock_held(dev_priv);
3160 }
3161
3162 /**
3163 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3164 * @dev_priv: i915 device instance
3165 *
3166 * This function grabs a device-level runtime pm reference if the device is
3167 * already in use and ensures that it is powered up.
3168 *
3169 * Any runtime pm reference obtained by this function must have a symmetric
3170 * call to intel_runtime_pm_put() to release the reference again.
3171 */
3172 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3173 {
3174 struct pci_dev *pdev = dev_priv->drm.pdev;
3175 struct device *kdev = &pdev->dev;
3176
3177 if (IS_ENABLED(CONFIG_PM)) {
3178 int ret = pm_runtime_get_if_in_use(kdev);
3179
3180 /*
3181 * In cases runtime PM is disabled by the RPM core and we get
3182 * an -EINVAL return value we are not supposed to call this
3183 * function, since the power state is undefined. This applies
3184 * atm to the late/early system suspend/resume handlers.
3185 */
3186 WARN_ONCE(ret < 0,
3187 "pm_runtime_get_if_in_use() failed: %d\n", ret);
3188 if (ret <= 0)
3189 return false;
3190 }
3191
3192 atomic_inc(&dev_priv->pm.wakeref_count);
3193 assert_rpm_wakelock_held(dev_priv);
3194
3195 return true;
3196 }
3197
3198 /**
3199 * intel_runtime_pm_get_noresume - grab a runtime pm reference
3200 * @dev_priv: i915 device instance
3201 *
3202 * This function grabs a device-level runtime pm reference (mostly used for GEM
3203 * code to ensure the GTT or GT is on).
3204 *
3205 * It will _not_ power up the device but instead only check that it's powered
3206 * on. Therefore it is only valid to call this functions from contexts where
3207 * the device is known to be powered up and where trying to power it up would
3208 * result in hilarity and deadlocks. That pretty much means only the system
3209 * suspend/resume code where this is used to grab runtime pm references for
3210 * delayed setup down in work items.
3211 *
3212 * Any runtime pm reference obtained by this function must have a symmetric
3213 * call to intel_runtime_pm_put() to release the reference again.
3214 */
3215 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3216 {
3217 struct pci_dev *pdev = dev_priv->drm.pdev;
3218 struct device *kdev = &pdev->dev;
3219
3220 assert_rpm_wakelock_held(dev_priv);
3221 pm_runtime_get_noresume(kdev);
3222
3223 atomic_inc(&dev_priv->pm.wakeref_count);
3224 }
3225
3226 /**
3227 * intel_runtime_pm_put - release a runtime pm reference
3228 * @dev_priv: i915 device instance
3229 *
3230 * This function drops the device-level runtime pm reference obtained by
3231 * intel_runtime_pm_get() and might power down the corresponding
3232 * hardware block right away if this is the last reference.
3233 */
3234 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3235 {
3236 struct pci_dev *pdev = dev_priv->drm.pdev;
3237 struct device *kdev = &pdev->dev;
3238
3239 assert_rpm_wakelock_held(dev_priv);
3240 atomic_dec(&dev_priv->pm.wakeref_count);
3241
3242 pm_runtime_mark_last_busy(kdev);
3243 pm_runtime_put_autosuspend(kdev);
3244 }
3245
3246 /**
3247 * intel_runtime_pm_enable - enable runtime pm
3248 * @dev_priv: i915 device instance
3249 *
3250 * This function enables runtime pm at the end of the driver load sequence.
3251 *
3252 * Note that this function does currently not enable runtime pm for the
3253 * subordinate display power domains. That is only done on the first modeset
3254 * using intel_display_set_init_power().
3255 */
3256 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3257 {
3258 struct pci_dev *pdev = dev_priv->drm.pdev;
3259 struct device *kdev = &pdev->dev;
3260
3261 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3262 pm_runtime_mark_last_busy(kdev);
3263
3264 /*
3265 * Take a permanent reference to disable the RPM functionality and drop
3266 * it only when unloading the driver. Use the low level get/put helpers,
3267 * so the driver's own RPM reference tracking asserts also work on
3268 * platforms without RPM support.
3269 */
3270 if (!HAS_RUNTIME_PM(dev_priv)) {
3271 int ret;
3272
3273 pm_runtime_dont_use_autosuspend(kdev);
3274 ret = pm_runtime_get_sync(kdev);
3275 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3276 } else {
3277 pm_runtime_use_autosuspend(kdev);
3278 }
3279
3280 /*
3281 * The core calls the driver load handler with an RPM reference held.
3282 * We drop that here and will reacquire it during unloading in
3283 * intel_power_domains_fini().
3284 */
3285 pm_runtime_put_autosuspend(kdev);
3286 }