]>
Commit | Line | Data |
---|---|---|
0bc12bcb RV |
1 | /* |
2 | * Copyright © 2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | */ | |
23 | ||
b2b89f55 RV |
24 | /** |
25 | * DOC: Panel Self Refresh (PSR/SRD) | |
26 | * | |
27 | * Since Haswell Display controller supports Panel Self-Refresh on display | |
28 | * panels witch have a remote frame buffer (RFB) implemented according to PSR | |
29 | * spec in eDP1.3. PSR feature allows the display to go to lower standby states | |
30 | * when system is idle but display is on as it eliminates display refresh | |
31 | * request to DDR memory completely as long as the frame buffer for that | |
32 | * display is unchanged. | |
33 | * | |
34 | * Panel Self Refresh must be supported by both Hardware (source) and | |
35 | * Panel (sink). | |
36 | * | |
37 | * PSR saves power by caching the framebuffer in the panel RFB, which allows us | |
38 | * to power down the link and memory controller. For DSI panels the same idea | |
39 | * is called "manual mode". | |
40 | * | |
41 | * The implementation uses the hardware-based PSR support which automatically | |
42 | * enters/exits self-refresh mode. The hardware takes care of sending the | |
43 | * required DP aux message and could even retrain the link (that part isn't | |
44 | * enabled yet though). The hardware also keeps track of any frontbuffer | |
45 | * changes to know when to exit self-refresh mode again. Unfortunately that | |
46 | * part doesn't work too well, hence why the i915 PSR support uses the | |
47 | * software frontbuffer tracking to make sure it doesn't miss a screen | |
48 | * update. For this integration intel_psr_invalidate() and intel_psr_flush() | |
49 | * get called by the frontbuffer tracking code. Note that because of locking | |
50 | * issues the self-refresh re-enable code is done from a work queue, which | |
51 | * must be correctly synchronized/cancelled when shutting down the pipe." | |
52 | */ | |
53 | ||
0bc12bcb RV |
54 | #include <drm/drmP.h> |
55 | ||
56 | #include "intel_drv.h" | |
57 | #include "i915_drv.h" | |
58 | ||
b891d5e4 DP |
59 | static inline enum intel_display_power_domain |
60 | psr_aux_domain(struct intel_dp *intel_dp) | |
61 | { | |
62 | /* CNL HW requires corresponding AUX IOs to be powered up for PSR. | |
63 | * However, for non-A AUX ports the corresponding non-EDP transcoders | |
64 | * would have already enabled power well 2 and DC_OFF. This means we can | |
65 | * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a | |
66 | * specific AUX_IO reference without powering up any extra wells. | |
67 | * Note that PSR is enabled only on Port A even though this function | |
68 | * returns the correct domain for other ports too. | |
69 | */ | |
70 | return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : | |
71 | intel_dp->aux_power_domain; | |
72 | } | |
73 | ||
74 | static void psr_aux_io_power_get(struct intel_dp *intel_dp) | |
75 | { | |
76 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
77 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); | |
78 | ||
79 | if (INTEL_GEN(dev_priv) < 10) | |
80 | return; | |
81 | ||
82 | intel_display_power_get(dev_priv, psr_aux_domain(intel_dp)); | |
83 | } | |
84 | ||
85 | static void psr_aux_io_power_put(struct intel_dp *intel_dp) | |
86 | { | |
87 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
88 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); | |
89 | ||
90 | if (INTEL_GEN(dev_priv) < 10) | |
91 | return; | |
92 | ||
93 | intel_display_power_put(dev_priv, psr_aux_domain(intel_dp)); | |
94 | } | |
95 | ||
54fd3149 DP |
96 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) |
97 | { | |
98 | u32 debug_mask, mask; | |
99 | ||
54fd3149 DP |
100 | mask = EDP_PSR_ERROR(TRANSCODER_EDP); |
101 | debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | | |
102 | EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); | |
103 | ||
104 | if (INTEL_GEN(dev_priv) >= 8) { | |
105 | mask |= EDP_PSR_ERROR(TRANSCODER_A) | | |
106 | EDP_PSR_ERROR(TRANSCODER_B) | | |
107 | EDP_PSR_ERROR(TRANSCODER_C); | |
108 | ||
109 | debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) | | |
110 | EDP_PSR_PRE_ENTRY(TRANSCODER_A) | | |
111 | EDP_PSR_POST_EXIT(TRANSCODER_B) | | |
112 | EDP_PSR_PRE_ENTRY(TRANSCODER_B) | | |
113 | EDP_PSR_POST_EXIT(TRANSCODER_C) | | |
114 | EDP_PSR_PRE_ENTRY(TRANSCODER_C); | |
115 | } | |
116 | ||
117 | if (debug) | |
118 | mask |= debug_mask; | |
119 | ||
120 | WRITE_ONCE(dev_priv->psr.debug, debug); | |
121 | I915_WRITE(EDP_PSR_IMR, ~mask); | |
122 | } | |
123 | ||
bc18b4df JRS |
124 | static void psr_event_print(u32 val, bool psr2_enabled) |
125 | { | |
126 | DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val); | |
127 | if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) | |
128 | DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n"); | |
129 | if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) | |
130 | DRM_DEBUG_KMS("\tPSR2 disabled\n"); | |
131 | if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) | |
132 | DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n"); | |
133 | if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) | |
134 | DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n"); | |
135 | if (val & PSR_EVENT_GRAPHICS_RESET) | |
136 | DRM_DEBUG_KMS("\tGraphics reset\n"); | |
137 | if (val & PSR_EVENT_PCH_INTERRUPT) | |
138 | DRM_DEBUG_KMS("\tPCH interrupt\n"); | |
139 | if (val & PSR_EVENT_MEMORY_UP) | |
140 | DRM_DEBUG_KMS("\tMemory up\n"); | |
141 | if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) | |
142 | DRM_DEBUG_KMS("\tFront buffer modification\n"); | |
143 | if (val & PSR_EVENT_WD_TIMER_EXPIRE) | |
144 | DRM_DEBUG_KMS("\tPSR watchdog timer expired\n"); | |
145 | if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) | |
146 | DRM_DEBUG_KMS("\tPIPE registers updated\n"); | |
147 | if (val & PSR_EVENT_REGISTER_UPDATE) | |
148 | DRM_DEBUG_KMS("\tRegister updated\n"); | |
149 | if (val & PSR_EVENT_HDCP_ENABLE) | |
150 | DRM_DEBUG_KMS("\tHDCP enabled\n"); | |
151 | if (val & PSR_EVENT_KVMR_SESSION_ENABLE) | |
152 | DRM_DEBUG_KMS("\tKVMR session enabled\n"); | |
153 | if (val & PSR_EVENT_VBI_ENABLE) | |
154 | DRM_DEBUG_KMS("\tVBI enabled\n"); | |
155 | if (val & PSR_EVENT_LPSP_MODE_EXIT) | |
156 | DRM_DEBUG_KMS("\tLPSP mode exited\n"); | |
157 | if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) | |
158 | DRM_DEBUG_KMS("\tPSR disabled\n"); | |
159 | } | |
160 | ||
54fd3149 DP |
161 | void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) |
162 | { | |
163 | u32 transcoders = BIT(TRANSCODER_EDP); | |
164 | enum transcoder cpu_transcoder; | |
3f983e54 | 165 | ktime_t time_ns = ktime_get(); |
54fd3149 DP |
166 | |
167 | if (INTEL_GEN(dev_priv) >= 8) | |
168 | transcoders |= BIT(TRANSCODER_A) | | |
169 | BIT(TRANSCODER_B) | | |
170 | BIT(TRANSCODER_C); | |
171 | ||
172 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { | |
173 | /* FIXME: Exit PSR and link train manually when this happens. */ | |
174 | if (psr_iir & EDP_PSR_ERROR(cpu_transcoder)) | |
175 | DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n", | |
176 | transcoder_name(cpu_transcoder)); | |
177 | ||
3f983e54 DP |
178 | if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) { |
179 | dev_priv->psr.last_entry_attempt = time_ns; | |
54fd3149 DP |
180 | DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", |
181 | transcoder_name(cpu_transcoder)); | |
3f983e54 | 182 | } |
54fd3149 | 183 | |
3f983e54 DP |
184 | if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) { |
185 | dev_priv->psr.last_exit = time_ns; | |
54fd3149 DP |
186 | DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", |
187 | transcoder_name(cpu_transcoder)); | |
bc18b4df JRS |
188 | |
189 | if (INTEL_GEN(dev_priv) >= 9) { | |
190 | u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); | |
191 | bool psr2_enabled = dev_priv->psr.psr2_enabled; | |
192 | ||
193 | I915_WRITE(PSR_EVENT(cpu_transcoder), val); | |
194 | psr_event_print(val, psr2_enabled); | |
195 | } | |
3f983e54 | 196 | } |
54fd3149 DP |
197 | } |
198 | } | |
199 | ||
aee3bac0 | 200 | static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp) |
77fe36ff DP |
201 | { |
202 | uint8_t psr_caps = 0; | |
203 | ||
204 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1) | |
205 | return false; | |
206 | return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED; | |
207 | } | |
208 | ||
209 | static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) | |
210 | { | |
211 | uint8_t dprx = 0; | |
212 | ||
213 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, | |
214 | &dprx) != 1) | |
215 | return false; | |
216 | return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; | |
217 | } | |
218 | ||
219 | static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) | |
220 | { | |
221 | uint8_t alpm_caps = 0; | |
222 | ||
223 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, | |
224 | &alpm_caps) != 1) | |
225 | return false; | |
226 | return alpm_caps & DP_ALPM_CAP; | |
227 | } | |
228 | ||
26e5378d JRS |
229 | static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) |
230 | { | |
231 | u8 val = 0; | |
232 | ||
233 | if (drm_dp_dpcd_readb(&intel_dp->aux, | |
234 | DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) | |
235 | val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; | |
236 | else | |
237 | DRM_ERROR("Unable to get sink synchronization latency\n"); | |
238 | return val; | |
239 | } | |
240 | ||
77fe36ff DP |
241 | void intel_psr_init_dpcd(struct intel_dp *intel_dp) |
242 | { | |
243 | struct drm_i915_private *dev_priv = | |
244 | to_i915(dp_to_dig_port(intel_dp)->base.base.dev); | |
245 | ||
246 | drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, | |
247 | sizeof(intel_dp->psr_dpcd)); | |
248 | ||
aee3bac0 | 249 | if (intel_dp->psr_dpcd[0]) { |
77fe36ff DP |
250 | dev_priv->psr.sink_support = true; |
251 | DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); | |
252 | } | |
253 | ||
254 | if (INTEL_GEN(dev_priv) >= 9 && | |
aee3bac0 JRS |
255 | (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { |
256 | /* | |
257 | * All panels that supports PSR version 03h (PSR2 + | |
258 | * Y-coordinate) can handle Y-coordinates in VSC but we are | |
259 | * only sure that it is going to be used when required by the | |
260 | * panel. This way panel is capable to do selective update | |
261 | * without a aux frame sync. | |
262 | * | |
263 | * To support PSR version 02h and PSR version 03h without | |
264 | * Y-coordinate requirement panels we would need to enable | |
265 | * GTC first. | |
266 | */ | |
95f28d2e JRS |
267 | dev_priv->psr.sink_psr2_support = |
268 | intel_dp_get_y_coord_required(intel_dp); | |
269 | DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support | |
270 | ? "supported" : "not supported"); | |
77fe36ff | 271 | |
95f28d2e | 272 | if (dev_priv->psr.sink_psr2_support) { |
77fe36ff DP |
273 | dev_priv->psr.colorimetry_support = |
274 | intel_dp_get_colorimetry_status(intel_dp); | |
275 | dev_priv->psr.alpm = | |
276 | intel_dp_get_alpm_status(intel_dp); | |
26e5378d JRS |
277 | dev_priv->psr.sink_sync_latency = |
278 | intel_dp_get_sink_sync_latency(intel_dp); | |
77fe36ff DP |
279 | } |
280 | } | |
281 | } | |
282 | ||
2ce4df87 RV |
283 | static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, |
284 | const struct intel_crtc_state *crtc_state) | |
474d1ec4 | 285 | { |
97da2ef4 | 286 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
d2419ffc VS |
287 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); |
288 | struct edp_vsc_psr psr_vsc; | |
474d1ec4 | 289 | |
95f28d2e | 290 | if (dev_priv->psr.psr2_enabled) { |
2ce4df87 RV |
291 | /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ |
292 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
293 | psr_vsc.sdp_header.HB0 = 0; | |
294 | psr_vsc.sdp_header.HB1 = 0x7; | |
aee3bac0 | 295 | if (dev_priv->psr.colorimetry_support) { |
2ce4df87 RV |
296 | psr_vsc.sdp_header.HB2 = 0x5; |
297 | psr_vsc.sdp_header.HB3 = 0x13; | |
aee3bac0 | 298 | } else { |
2ce4df87 RV |
299 | psr_vsc.sdp_header.HB2 = 0x4; |
300 | psr_vsc.sdp_header.HB3 = 0xe; | |
2ce4df87 | 301 | } |
97da2ef4 | 302 | } else { |
2ce4df87 RV |
303 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ |
304 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
305 | psr_vsc.sdp_header.HB0 = 0; | |
306 | psr_vsc.sdp_header.HB1 = 0x7; | |
307 | psr_vsc.sdp_header.HB2 = 0x2; | |
308 | psr_vsc.sdp_header.HB3 = 0x8; | |
97da2ef4 NV |
309 | } |
310 | ||
1d776538 VS |
311 | intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state, |
312 | DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); | |
474d1ec4 SJ |
313 | } |
314 | ||
b90eed08 | 315 | static void hsw_psr_setup_aux(struct intel_dp *intel_dp) |
0bc12bcb RV |
316 | { |
317 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
d544e918 DP |
318 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); |
319 | u32 aux_clock_divider, aux_ctl; | |
320 | int i; | |
0bc12bcb RV |
321 | static const uint8_t aux_msg[] = { |
322 | [0] = DP_AUX_NATIVE_WRITE << 4, | |
323 | [1] = DP_SET_POWER >> 8, | |
324 | [2] = DP_SET_POWER & 0xff, | |
325 | [3] = 1 - 1, | |
326 | [4] = DP_SET_POWER_D0, | |
327 | }; | |
d544e918 DP |
328 | u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | |
329 | EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | | |
330 | EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | | |
331 | EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; | |
0bc12bcb RV |
332 | |
333 | BUILD_BUG_ON(sizeof(aux_msg) > 20); | |
b90eed08 | 334 | for (i = 0; i < sizeof(aux_msg); i += 4) |
d544e918 | 335 | I915_WRITE(EDP_PSR_AUX_DATA(i >> 2), |
b90eed08 DP |
336 | intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); |
337 | ||
d544e918 DP |
338 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
339 | ||
340 | /* Start with bits set for DDI_AUX_CTL register */ | |
b90eed08 DP |
341 | aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), |
342 | aux_clock_divider); | |
d544e918 DP |
343 | |
344 | /* Select only valid bits for SRD_AUX_CTL */ | |
345 | aux_ctl &= psr_aux_mask; | |
346 | I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); | |
b90eed08 DP |
347 | } |
348 | ||
349 | static void hsw_psr_enable_sink(struct intel_dp *intel_dp) | |
350 | { | |
351 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
352 | struct drm_device *dev = dig_port->base.base.dev; | |
353 | struct drm_i915_private *dev_priv = to_i915(dev); | |
4df4925b | 354 | u8 dpcd_val = DP_PSR_ENABLE; |
b90eed08 | 355 | |
340c93c0 | 356 | /* Enable ALPM at sink for psr2 */ |
95f28d2e | 357 | if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm) |
340c93c0 NV |
358 | drm_dp_dpcd_writeb(&intel_dp->aux, |
359 | DP_RECEIVER_ALPM_CONFIG, | |
360 | DP_ALPM_ENABLE); | |
4df4925b JRS |
361 | |
362 | if (dev_priv->psr.psr2_enabled) | |
363 | dpcd_val |= DP_PSR_ENABLE_PSR2; | |
6f32ea7e | 364 | if (dev_priv->psr.link_standby) |
4df4925b JRS |
365 | dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; |
366 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); | |
6f32ea7e | 367 | |
d544e918 | 368 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); |
0bc12bcb RV |
369 | } |
370 | ||
ed63d24b | 371 | static void hsw_activate_psr1(struct intel_dp *intel_dp) |
0bc12bcb RV |
372 | { |
373 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
374 | struct drm_device *dev = dig_port->base.base.dev; | |
fac5e23e | 375 | struct drm_i915_private *dev_priv = to_i915(dev); |
474d1ec4 | 376 | |
0bc12bcb | 377 | uint32_t max_sleep_time = 0x1f; |
40918e0b RV |
378 | /* |
379 | * Let's respect VBT in case VBT asks a higher idle_frame value. | |
380 | * Let's use 6 as the minimum to cover all known cases including | |
381 | * the off-by-one issue that HW has in some cases. Also there are | |
382 | * cases where sink should be able to train | |
383 | * with the 5 or 6 idle patterns. | |
d44b4dcb | 384 | */ |
40918e0b | 385 | uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
50db1390 DV |
386 | uint32_t val = EDP_PSR_ENABLE; |
387 | ||
388 | val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; | |
389 | val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; | |
7370c68d | 390 | |
772c2a51 | 391 | if (IS_HASWELL(dev_priv)) |
7370c68d | 392 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
0bc12bcb | 393 | |
60e5ffe3 RV |
394 | if (dev_priv->psr.link_standby) |
395 | val |= EDP_PSR_LINK_STANDBY; | |
396 | ||
77312ae8 VN |
397 | if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) |
398 | val |= EDP_PSR_TP1_TIME_0us; | |
399 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) | |
50db1390 | 400 | val |= EDP_PSR_TP1_TIME_100us; |
77312ae8 VN |
401 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) |
402 | val |= EDP_PSR_TP1_TIME_500us; | |
50db1390 | 403 | else |
77312ae8 | 404 | val |= EDP_PSR_TP1_TIME_2500us; |
50db1390 | 405 | |
77312ae8 VN |
406 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) |
407 | val |= EDP_PSR_TP2_TP3_TIME_0us; | |
408 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) | |
50db1390 | 409 | val |= EDP_PSR_TP2_TP3_TIME_100us; |
77312ae8 VN |
410 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) |
411 | val |= EDP_PSR_TP2_TP3_TIME_500us; | |
50db1390 | 412 | else |
77312ae8 | 413 | val |= EDP_PSR_TP2_TP3_TIME_2500us; |
50db1390 DV |
414 | |
415 | if (intel_dp_source_supports_hbr2(intel_dp) && | |
416 | drm_dp_tps3_supported(intel_dp->dpcd)) | |
417 | val |= EDP_PSR_TP1_TP3_SEL; | |
418 | else | |
419 | val |= EDP_PSR_TP1_TP2_SEL; | |
420 | ||
912d6412 | 421 | val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; |
50db1390 | 422 | I915_WRITE(EDP_PSR_CTL, val); |
3fcb0ca1 | 423 | } |
50db1390 | 424 | |
ed63d24b | 425 | static void hsw_activate_psr2(struct intel_dp *intel_dp) |
3fcb0ca1 NV |
426 | { |
427 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
428 | struct drm_device *dev = dig_port->base.base.dev; | |
429 | struct drm_i915_private *dev_priv = to_i915(dev); | |
430 | /* | |
431 | * Let's respect VBT in case VBT asks a higher idle_frame value. | |
432 | * Let's use 6 as the minimum to cover all known cases including | |
433 | * the off-by-one issue that HW has in some cases. Also there are | |
434 | * cases where sink should be able to train | |
435 | * with the 5 or 6 idle patterns. | |
436 | */ | |
437 | uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); | |
26e5378d | 438 | u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; |
50db1390 DV |
439 | |
440 | /* FIXME: selective update is probably totally broken because it doesn't | |
441 | * mesh at all with our frontbuffer tracking. And the hw alone isn't | |
442 | * good enough. */ | |
5e87325f | 443 | val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; |
2a34b005 JRS |
444 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) |
445 | val |= EDP_Y_COORDINATE_ENABLE; | |
977da084 | 446 | |
26e5378d | 447 | val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); |
50db1390 | 448 | |
77312ae8 VN |
449 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 && |
450 | dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50) | |
451 | val |= EDP_PSR2_TP2_TIME_50us; | |
452 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) | |
453 | val |= EDP_PSR2_TP2_TIME_100us; | |
454 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) | |
455 | val |= EDP_PSR2_TP2_TIME_500us; | |
50db1390 | 456 | else |
77312ae8 | 457 | val |= EDP_PSR2_TP2_TIME_2500us; |
474d1ec4 | 458 | |
50db1390 | 459 | I915_WRITE(EDP_PSR2_CTL, val); |
0bc12bcb RV |
460 | } |
461 | ||
ed63d24b | 462 | static void hsw_psr_activate(struct intel_dp *intel_dp) |
3fcb0ca1 NV |
463 | { |
464 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
465 | struct drm_device *dev = dig_port->base.base.dev; | |
466 | struct drm_i915_private *dev_priv = to_i915(dev); | |
467 | ||
ed63d24b RV |
468 | /* On HSW+ after we enable PSR on source it will activate it |
469 | * as soon as it match configure idle_frame count. So | |
470 | * we just actually enable it here on activation time. | |
471 | */ | |
472 | ||
3fcb0ca1 | 473 | /* psr1 and psr2 are mutually exclusive.*/ |
95f28d2e | 474 | if (dev_priv->psr.psr2_enabled) |
ed63d24b | 475 | hsw_activate_psr2(intel_dp); |
3fcb0ca1 | 476 | else |
ed63d24b | 477 | hsw_activate_psr1(intel_dp); |
3fcb0ca1 NV |
478 | } |
479 | ||
c4932d79 RV |
480 | static bool intel_psr2_config_valid(struct intel_dp *intel_dp, |
481 | struct intel_crtc_state *crtc_state) | |
482 | { | |
483 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
484 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | |
c90c275c DP |
485 | int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay; |
486 | int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; | |
487 | int psr_max_h = 0, psr_max_v = 0; | |
c4932d79 RV |
488 | |
489 | /* | |
490 | * FIXME psr2_support is messed up. It's both computed | |
491 | * dynamically during PSR enable, and extracted from sink | |
492 | * caps during eDP detection. | |
493 | */ | |
95f28d2e | 494 | if (!dev_priv->psr.sink_psr2_support) |
c4932d79 RV |
495 | return false; |
496 | ||
c90c275c DP |
497 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { |
498 | psr_max_h = 4096; | |
499 | psr_max_v = 2304; | |
500 | } else if (IS_GEN9(dev_priv)) { | |
501 | psr_max_h = 3640; | |
502 | psr_max_v = 2304; | |
503 | } | |
504 | ||
505 | if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { | |
506 | DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", | |
507 | crtc_hdisplay, crtc_vdisplay, | |
508 | psr_max_h, psr_max_v); | |
c4932d79 RV |
509 | return false; |
510 | } | |
511 | ||
c4932d79 RV |
512 | return true; |
513 | } | |
514 | ||
4d90f2d5 VS |
515 | void intel_psr_compute_config(struct intel_dp *intel_dp, |
516 | struct intel_crtc_state *crtc_state) | |
0bc12bcb RV |
517 | { |
518 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
4d90f2d5 | 519 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); |
dfd2e9ab | 520 | const struct drm_display_mode *adjusted_mode = |
4d90f2d5 | 521 | &crtc_state->base.adjusted_mode; |
dfd2e9ab | 522 | int psr_setup_time; |
0bc12bcb | 523 | |
4371d896 | 524 | if (!CAN_PSR(dev_priv)) |
4d90f2d5 VS |
525 | return; |
526 | ||
527 | if (!i915_modparams.enable_psr) { | |
528 | DRM_DEBUG_KMS("PSR disable by flag\n"); | |
529 | return; | |
530 | } | |
0bc12bcb | 531 | |
dc9b5a0c RV |
532 | /* |
533 | * HSW spec explicitly says PSR is tied to port A. | |
534 | * BDW+ platforms with DDI implementation of PSR have different | |
535 | * PSR registers per transcoder and we only implement transcoder EDP | |
536 | * ones. Since by Display design transcoder EDP is tied to port A | |
537 | * we can safely escape based on the port A. | |
538 | */ | |
ce3508fd | 539 | if (dig_port->base.port != PORT_A) { |
dc9b5a0c | 540 | DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); |
4d90f2d5 | 541 | return; |
0bc12bcb RV |
542 | } |
543 | ||
772c2a51 | 544 | if (IS_HASWELL(dev_priv) && |
4d90f2d5 | 545 | I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) & |
c8e68b7e | 546 | S3D_ENABLE) { |
0bc12bcb | 547 | DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); |
4d90f2d5 | 548 | return; |
0bc12bcb RV |
549 | } |
550 | ||
772c2a51 | 551 | if (IS_HASWELL(dev_priv) && |
dfd2e9ab | 552 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
0bc12bcb | 553 | DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); |
4d90f2d5 | 554 | return; |
0bc12bcb RV |
555 | } |
556 | ||
dfd2e9ab VS |
557 | psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); |
558 | if (psr_setup_time < 0) { | |
559 | DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", | |
560 | intel_dp->psr_dpcd[1]); | |
4d90f2d5 | 561 | return; |
dfd2e9ab VS |
562 | } |
563 | ||
564 | if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > | |
565 | adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { | |
566 | DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", | |
567 | psr_setup_time); | |
4d90f2d5 VS |
568 | return; |
569 | } | |
570 | ||
06d058e1 DP |
571 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { |
572 | DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n"); | |
573 | return; | |
574 | } | |
575 | ||
4d90f2d5 | 576 | crtc_state->has_psr = true; |
c4932d79 RV |
577 | crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); |
578 | DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); | |
0bc12bcb RV |
579 | } |
580 | ||
e2bbc343 | 581 | static void intel_psr_activate(struct intel_dp *intel_dp) |
0bc12bcb RV |
582 | { |
583 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
584 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
fac5e23e | 585 | struct drm_i915_private *dev_priv = to_i915(dev); |
0bc12bcb | 586 | |
95f28d2e | 587 | if (dev_priv->psr.psr2_enabled) |
3fcb0ca1 NV |
588 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); |
589 | else | |
590 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); | |
0bc12bcb RV |
591 | WARN_ON(dev_priv->psr.active); |
592 | lockdep_assert_held(&dev_priv->psr.lock); | |
593 | ||
e3702ac9 | 594 | dev_priv->psr.activate(intel_dp); |
0bc12bcb RV |
595 | dev_priv->psr.active = true; |
596 | } | |
597 | ||
4d1fa22f RV |
598 | static void hsw_psr_enable_source(struct intel_dp *intel_dp, |
599 | const struct intel_crtc_state *crtc_state) | |
600 | { | |
601 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
602 | struct drm_device *dev = dig_port->base.base.dev; | |
603 | struct drm_i915_private *dev_priv = to_i915(dev); | |
604 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | |
4d1fa22f | 605 | |
b891d5e4 DP |
606 | psr_aux_io_power_get(intel_dp); |
607 | ||
d544e918 DP |
608 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ |
609 | * use hardcoded values PSR AUX transactions | |
610 | */ | |
611 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
612 | hsw_psr_setup_aux(intel_dp); | |
613 | ||
95f28d2e | 614 | if (dev_priv->psr.psr2_enabled) { |
5e87325f JRS |
615 | u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder)); |
616 | ||
617 | if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) | |
618 | chicken |= (PSR2_VSC_ENABLE_PROG_HEADER | |
619 | | PSR2_ADD_VERTICAL_LINE_COUNT); | |
620 | ||
621 | else | |
622 | chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; | |
4d1fa22f RV |
623 | I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); |
624 | ||
861023e0 | 625 | I915_WRITE(EDP_PSR_DEBUG, |
4d1fa22f RV |
626 | EDP_PSR_DEBUG_MASK_MEMUP | |
627 | EDP_PSR_DEBUG_MASK_HPD | | |
628 | EDP_PSR_DEBUG_MASK_LPSP | | |
629 | EDP_PSR_DEBUG_MASK_MAX_SLEEP | | |
630 | EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); | |
631 | } else { | |
632 | /* | |
633 | * Per Spec: Avoid continuous PSR exit by masking MEMUP | |
634 | * and HPD. also mask LPSP to avoid dependency on other | |
635 | * drivers that might block runtime_pm besides | |
636 | * preventing other hw tracking issues now we can rely | |
637 | * on frontbuffer tracking. | |
638 | */ | |
861023e0 | 639 | I915_WRITE(EDP_PSR_DEBUG, |
4d1fa22f RV |
640 | EDP_PSR_DEBUG_MASK_MEMUP | |
641 | EDP_PSR_DEBUG_MASK_HPD | | |
75cbec03 RS |
642 | EDP_PSR_DEBUG_MASK_LPSP | |
643 | EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); | |
4d1fa22f RV |
644 | } |
645 | } | |
646 | ||
b2b89f55 RV |
647 | /** |
648 | * intel_psr_enable - Enable PSR | |
649 | * @intel_dp: Intel DP | |
d2419ffc | 650 | * @crtc_state: new CRTC state |
b2b89f55 RV |
651 | * |
652 | * This function can only be called after the pipe is fully trained and enabled. | |
653 | */ | |
d2419ffc VS |
654 | void intel_psr_enable(struct intel_dp *intel_dp, |
655 | const struct intel_crtc_state *crtc_state) | |
0bc12bcb RV |
656 | { |
657 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
658 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
fac5e23e | 659 | struct drm_i915_private *dev_priv = to_i915(dev); |
0bc12bcb | 660 | |
4d90f2d5 | 661 | if (!crtc_state->has_psr) |
0bc12bcb | 662 | return; |
0bc12bcb | 663 | |
c9ef291a DP |
664 | if (WARN_ON(!CAN_PSR(dev_priv))) |
665 | return; | |
666 | ||
da83ef85 | 667 | WARN_ON(dev_priv->drrs.dp); |
0bc12bcb RV |
668 | mutex_lock(&dev_priv->psr.lock); |
669 | if (dev_priv->psr.enabled) { | |
670 | DRM_DEBUG_KMS("PSR already in use\n"); | |
671 | goto unlock; | |
672 | } | |
673 | ||
95f28d2e | 674 | dev_priv->psr.psr2_enabled = crtc_state->has_psr2; |
0bc12bcb RV |
675 | dev_priv->psr.busy_frontbuffer_bits = 0; |
676 | ||
2a5db87f | 677 | dev_priv->psr.setup_vsc(intel_dp, crtc_state); |
49ad316f | 678 | dev_priv->psr.enable_sink(intel_dp); |
d0d5e0d7 | 679 | dev_priv->psr.enable_source(intel_dp, crtc_state); |
29d1efe0 RV |
680 | dev_priv->psr.enabled = intel_dp; |
681 | ||
682 | if (INTEL_GEN(dev_priv) >= 9) { | |
683 | intel_psr_activate(intel_dp); | |
684 | } else { | |
685 | /* | |
686 | * FIXME: Activation should happen immediately since this | |
687 | * function is just called after pipe is fully trained and | |
688 | * enabled. | |
689 | * However on some platforms we face issues when first | |
690 | * activation follows a modeset so quickly. | |
29d1efe0 RV |
691 | * - On HSW/BDW we get a recoverable frozen screen until |
692 | * next exit-activate sequence. | |
693 | */ | |
d0ac896a RV |
694 | schedule_delayed_work(&dev_priv->psr.work, |
695 | msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); | |
29d1efe0 | 696 | } |
d0ac896a | 697 | |
0bc12bcb RV |
698 | unlock: |
699 | mutex_unlock(&dev_priv->psr.lock); | |
700 | } | |
701 | ||
d2419ffc VS |
702 | static void hsw_psr_disable(struct intel_dp *intel_dp, |
703 | const struct intel_crtc_state *old_crtc_state) | |
e2bbc343 RV |
704 | { |
705 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
706 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
fac5e23e | 707 | struct drm_i915_private *dev_priv = to_i915(dev); |
0bc12bcb RV |
708 | |
709 | if (dev_priv->psr.active) { | |
14c6547d | 710 | i915_reg_t psr_status; |
77affa31 CW |
711 | u32 psr_status_mask; |
712 | ||
95f28d2e | 713 | if (dev_priv->psr.psr2_enabled) { |
861023e0 | 714 | psr_status = EDP_PSR2_STATUS; |
77affa31 CW |
715 | psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; |
716 | ||
14c6547d DP |
717 | I915_WRITE(EDP_PSR2_CTL, |
718 | I915_READ(EDP_PSR2_CTL) & | |
77affa31 CW |
719 | ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE)); |
720 | ||
3fcb0ca1 | 721 | } else { |
861023e0 | 722 | psr_status = EDP_PSR_STATUS; |
77affa31 CW |
723 | psr_status_mask = EDP_PSR_STATUS_STATE_MASK; |
724 | ||
14c6547d DP |
725 | I915_WRITE(EDP_PSR_CTL, |
726 | I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); | |
3fcb0ca1 | 727 | } |
77affa31 CW |
728 | |
729 | /* Wait till PSR is idle */ | |
730 | if (intel_wait_for_register(dev_priv, | |
14c6547d | 731 | psr_status, psr_status_mask, 0, |
77affa31 CW |
732 | 2000)) |
733 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); | |
734 | ||
0bc12bcb RV |
735 | dev_priv->psr.active = false; |
736 | } else { | |
95f28d2e | 737 | if (dev_priv->psr.psr2_enabled) |
3fcb0ca1 NV |
738 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); |
739 | else | |
740 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); | |
0bc12bcb | 741 | } |
b891d5e4 DP |
742 | |
743 | psr_aux_io_power_put(intel_dp); | |
e2bbc343 RV |
744 | } |
745 | ||
746 | /** | |
747 | * intel_psr_disable - Disable PSR | |
748 | * @intel_dp: Intel DP | |
d2419ffc | 749 | * @old_crtc_state: old CRTC state |
e2bbc343 RV |
750 | * |
751 | * This function needs to be called before disabling pipe. | |
752 | */ | |
d2419ffc VS |
753 | void intel_psr_disable(struct intel_dp *intel_dp, |
754 | const struct intel_crtc_state *old_crtc_state) | |
e2bbc343 RV |
755 | { |
756 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
757 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
fac5e23e | 758 | struct drm_i915_private *dev_priv = to_i915(dev); |
e2bbc343 | 759 | |
4d90f2d5 | 760 | if (!old_crtc_state->has_psr) |
0f328da6 RV |
761 | return; |
762 | ||
c9ef291a DP |
763 | if (WARN_ON(!CAN_PSR(dev_priv))) |
764 | return; | |
765 | ||
e2bbc343 RV |
766 | mutex_lock(&dev_priv->psr.lock); |
767 | if (!dev_priv->psr.enabled) { | |
768 | mutex_unlock(&dev_priv->psr.lock); | |
769 | return; | |
770 | } | |
771 | ||
424644c2 | 772 | dev_priv->psr.disable_source(intel_dp, old_crtc_state); |
0bc12bcb | 773 | |
b6e4d534 RV |
774 | /* Disable PSR on Sink */ |
775 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); | |
776 | ||
0bc12bcb RV |
777 | dev_priv->psr.enabled = NULL; |
778 | mutex_unlock(&dev_priv->psr.lock); | |
779 | ||
780 | cancel_delayed_work_sync(&dev_priv->psr.work); | |
781 | } | |
782 | ||
daeb725e | 783 | static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) |
0bc12bcb | 784 | { |
daeb725e CW |
785 | struct intel_dp *intel_dp; |
786 | i915_reg_t reg; | |
787 | u32 mask; | |
788 | int err; | |
789 | ||
790 | intel_dp = dev_priv->psr.enabled; | |
791 | if (!intel_dp) | |
792 | return false; | |
0bc12bcb | 793 | |
ce3508fd DP |
794 | if (dev_priv->psr.psr2_enabled) { |
795 | reg = EDP_PSR2_STATUS; | |
796 | mask = EDP_PSR2_STATUS_STATE_MASK; | |
995d3047 | 797 | } else { |
ce3508fd DP |
798 | reg = EDP_PSR_STATUS; |
799 | mask = EDP_PSR_STATUS_STATE_MASK; | |
0bc12bcb | 800 | } |
daeb725e CW |
801 | |
802 | mutex_unlock(&dev_priv->psr.lock); | |
803 | ||
804 | err = intel_wait_for_register(dev_priv, reg, mask, 0, 50); | |
805 | if (err) | |
806 | DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); | |
807 | ||
808 | /* After the unlocked wait, verify that PSR is still wanted! */ | |
0bc12bcb | 809 | mutex_lock(&dev_priv->psr.lock); |
daeb725e CW |
810 | return err == 0 && dev_priv->psr.enabled; |
811 | } | |
0bc12bcb | 812 | |
daeb725e CW |
813 | static void intel_psr_work(struct work_struct *work) |
814 | { | |
815 | struct drm_i915_private *dev_priv = | |
816 | container_of(work, typeof(*dev_priv), psr.work.work); | |
817 | ||
818 | mutex_lock(&dev_priv->psr.lock); | |
819 | ||
820 | /* | |
821 | * We have to make sure PSR is ready for re-enable | |
822 | * otherwise it keeps disabled until next full enable/disable cycle. | |
823 | * PSR might take some time to get fully disabled | |
824 | * and be ready for re-enable. | |
825 | */ | |
826 | if (!psr_wait_for_idle(dev_priv)) | |
0bc12bcb RV |
827 | goto unlock; |
828 | ||
829 | /* | |
830 | * The delayed work can race with an invalidate hence we need to | |
831 | * recheck. Since psr_flush first clears this and then reschedules we | |
832 | * won't ever miss a flush when bailing out here. | |
833 | */ | |
834 | if (dev_priv->psr.busy_frontbuffer_bits) | |
835 | goto unlock; | |
836 | ||
daeb725e | 837 | intel_psr_activate(dev_priv->psr.enabled); |
0bc12bcb RV |
838 | unlock: |
839 | mutex_unlock(&dev_priv->psr.lock); | |
840 | } | |
841 | ||
5748b6a1 | 842 | static void intel_psr_exit(struct drm_i915_private *dev_priv) |
0bc12bcb | 843 | { |
995d3047 | 844 | u32 val; |
0bc12bcb | 845 | |
995d3047 RV |
846 | if (!dev_priv->psr.active) |
847 | return; | |
848 | ||
ce3508fd DP |
849 | if (dev_priv->psr.psr2_enabled) { |
850 | val = I915_READ(EDP_PSR2_CTL); | |
851 | WARN_ON(!(val & EDP_PSR2_ENABLE)); | |
852 | I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); | |
995d3047 | 853 | } else { |
ce3508fd DP |
854 | val = I915_READ(EDP_PSR_CTL); |
855 | WARN_ON(!(val & EDP_PSR_ENABLE)); | |
856 | I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); | |
0bc12bcb | 857 | } |
995d3047 | 858 | dev_priv->psr.active = false; |
0bc12bcb RV |
859 | } |
860 | ||
b2b89f55 RV |
861 | /** |
862 | * intel_psr_invalidate - Invalidade PSR | |
5748b6a1 | 863 | * @dev_priv: i915 device |
b2b89f55 | 864 | * @frontbuffer_bits: frontbuffer plane tracking bits |
5baf63cc | 865 | * @origin: which operation caused the invalidate |
b2b89f55 RV |
866 | * |
867 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
868 | * with the software frontbuffer tracking. This function gets called every | |
869 | * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be | |
870 | * disabled if the frontbuffer mask contains a buffer relevant to PSR. | |
871 | * | |
872 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." | |
873 | */ | |
5748b6a1 | 874 | void intel_psr_invalidate(struct drm_i915_private *dev_priv, |
5baf63cc | 875 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 876 | { |
0bc12bcb RV |
877 | struct drm_crtc *crtc; |
878 | enum pipe pipe; | |
879 | ||
4371d896 | 880 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
881 | return; |
882 | ||
ce3508fd | 883 | if (origin == ORIGIN_FLIP) |
5baf63cc RV |
884 | return; |
885 | ||
0bc12bcb RV |
886 | mutex_lock(&dev_priv->psr.lock); |
887 | if (!dev_priv->psr.enabled) { | |
888 | mutex_unlock(&dev_priv->psr.lock); | |
889 | return; | |
890 | } | |
891 | ||
892 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | |
893 | pipe = to_intel_crtc(crtc)->pipe; | |
894 | ||
0bc12bcb | 895 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); |
0bc12bcb | 896 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
ec76d629 DV |
897 | |
898 | if (frontbuffer_bits) | |
5748b6a1 | 899 | intel_psr_exit(dev_priv); |
ec76d629 | 900 | |
0bc12bcb RV |
901 | mutex_unlock(&dev_priv->psr.lock); |
902 | } | |
903 | ||
b2b89f55 RV |
904 | /** |
905 | * intel_psr_flush - Flush PSR | |
5748b6a1 | 906 | * @dev_priv: i915 device |
b2b89f55 | 907 | * @frontbuffer_bits: frontbuffer plane tracking bits |
169de131 | 908 | * @origin: which operation caused the flush |
b2b89f55 RV |
909 | * |
910 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
911 | * with the software frontbuffer tracking. This function gets called every | |
912 | * time frontbuffer rendering has completed and flushed out to memory. PSR | |
913 | * can be enabled again if no other frontbuffer relevant to PSR is dirty. | |
914 | * | |
915 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. | |
916 | */ | |
5748b6a1 | 917 | void intel_psr_flush(struct drm_i915_private *dev_priv, |
169de131 | 918 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 919 | { |
0bc12bcb RV |
920 | struct drm_crtc *crtc; |
921 | enum pipe pipe; | |
922 | ||
4371d896 | 923 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
924 | return; |
925 | ||
ce3508fd | 926 | if (origin == ORIGIN_FLIP) |
5baf63cc RV |
927 | return; |
928 | ||
0bc12bcb RV |
929 | mutex_lock(&dev_priv->psr.lock); |
930 | if (!dev_priv->psr.enabled) { | |
931 | mutex_unlock(&dev_priv->psr.lock); | |
932 | return; | |
933 | } | |
934 | ||
935 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | |
936 | pipe = to_intel_crtc(crtc)->pipe; | |
ec76d629 DV |
937 | |
938 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); | |
0bc12bcb RV |
939 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
940 | ||
921ec285 | 941 | /* By definition flush = invalidate + flush */ |
caa1fd66 | 942 | if (frontbuffer_bits) { |
ce3508fd | 943 | if (dev_priv->psr.psr2_enabled) { |
caa1fd66 RV |
944 | intel_psr_exit(dev_priv); |
945 | } else { | |
946 | /* | |
947 | * Display WA #0884: all | |
948 | * This documented WA for bxt can be safely applied | |
949 | * broadly so we can force HW tracking to exit PSR | |
950 | * instead of disabling and re-enabling. | |
a8ada068 | 951 | * Workaround tells us to write 0 to CUR_SURFLIVE_A, |
caa1fd66 RV |
952 | * but it makes more sense write to the current active |
953 | * pipe. | |
954 | */ | |
a8ada068 | 955 | I915_WRITE(CURSURFLIVE(pipe), 0); |
caa1fd66 RV |
956 | } |
957 | } | |
995d3047 | 958 | |
0bc12bcb | 959 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
d0ac896a RV |
960 | if (!work_busy(&dev_priv->psr.work.work)) |
961 | schedule_delayed_work(&dev_priv->psr.work, | |
20bb97fe | 962 | msecs_to_jiffies(100)); |
0bc12bcb RV |
963 | mutex_unlock(&dev_priv->psr.lock); |
964 | } | |
965 | ||
b2b89f55 RV |
966 | /** |
967 | * intel_psr_init - Init basic PSR work and mutex. | |
93de056b | 968 | * @dev_priv: i915 device private |
b2b89f55 RV |
969 | * |
970 | * This function is called only once at driver load to initialize basic | |
971 | * PSR stuff. | |
972 | */ | |
c39055b0 | 973 | void intel_psr_init(struct drm_i915_private *dev_priv) |
0bc12bcb | 974 | { |
0f328da6 RV |
975 | if (!HAS_PSR(dev_priv)) |
976 | return; | |
977 | ||
443a389f VS |
978 | dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? |
979 | HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; | |
980 | ||
c9ef291a DP |
981 | if (!dev_priv->psr.sink_support) |
982 | return; | |
983 | ||
2bdd045e DP |
984 | if (i915_modparams.enable_psr == -1) { |
985 | i915_modparams.enable_psr = dev_priv->vbt.psr.enable; | |
986 | ||
987 | /* Per platform default: all disabled. */ | |
4f044a88 | 988 | i915_modparams.enable_psr = 0; |
2bdd045e | 989 | } |
d94d6e87 | 990 | |
65f61b42 | 991 | /* Set link_standby x link_off defaults */ |
8652744b | 992 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
60e5ffe3 RV |
993 | /* HSW and BDW require workarounds that we don't implement. */ |
994 | dev_priv->psr.link_standby = false; | |
60e5ffe3 RV |
995 | else |
996 | /* For new platforms let's respect VBT back again */ | |
997 | dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; | |
998 | ||
65f61b42 | 999 | /* Override link_standby x link_off defaults */ |
4f044a88 | 1000 | if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) { |
65f61b42 RV |
1001 | DRM_DEBUG_KMS("PSR: Forcing link standby\n"); |
1002 | dev_priv->psr.link_standby = true; | |
1003 | } | |
4f044a88 | 1004 | if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) { |
65f61b42 RV |
1005 | DRM_DEBUG_KMS("PSR: Forcing main link off\n"); |
1006 | dev_priv->psr.link_standby = false; | |
1007 | } | |
1008 | ||
0bc12bcb RV |
1009 | INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); |
1010 | mutex_init(&dev_priv->psr.lock); | |
424644c2 | 1011 | |
ce3508fd DP |
1012 | dev_priv->psr.enable_source = hsw_psr_enable_source; |
1013 | dev_priv->psr.disable_source = hsw_psr_disable; | |
1014 | dev_priv->psr.enable_sink = hsw_psr_enable_sink; | |
1015 | dev_priv->psr.activate = hsw_psr_activate; | |
1016 | dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; | |
1017 | ||
0bc12bcb | 1018 | } |