]>
Commit | Line | Data |
---|---|---|
0bc12bcb RV |
1 | /* |
2 | * Copyright © 2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | */ | |
23 | ||
b2b89f55 RV |
24 | /** |
25 | * DOC: Panel Self Refresh (PSR/SRD) | |
26 | * | |
27 | * Since Haswell Display controller supports Panel Self-Refresh on display | |
28 | * panels witch have a remote frame buffer (RFB) implemented according to PSR | |
29 | * spec in eDP1.3. PSR feature allows the display to go to lower standby states | |
30 | * when system is idle but display is on as it eliminates display refresh | |
31 | * request to DDR memory completely as long as the frame buffer for that | |
32 | * display is unchanged. | |
33 | * | |
34 | * Panel Self Refresh must be supported by both Hardware (source) and | |
35 | * Panel (sink). | |
36 | * | |
37 | * PSR saves power by caching the framebuffer in the panel RFB, which allows us | |
38 | * to power down the link and memory controller. For DSI panels the same idea | |
39 | * is called "manual mode". | |
40 | * | |
41 | * The implementation uses the hardware-based PSR support which automatically | |
42 | * enters/exits self-refresh mode. The hardware takes care of sending the | |
43 | * required DP aux message and could even retrain the link (that part isn't | |
44 | * enabled yet though). The hardware also keeps track of any frontbuffer | |
45 | * changes to know when to exit self-refresh mode again. Unfortunately that | |
46 | * part doesn't work too well, hence why the i915 PSR support uses the | |
47 | * software frontbuffer tracking to make sure it doesn't miss a screen | |
48 | * update. For this integration intel_psr_invalidate() and intel_psr_flush() | |
49 | * get called by the frontbuffer tracking code. Note that because of locking | |
50 | * issues the self-refresh re-enable code is done from a work queue, which | |
51 | * must be correctly synchronized/cancelled when shutting down the pipe." | |
52 | */ | |
53 | ||
23ec9f52 JRS |
54 | #include <drm/drmP.h> |
55 | #include <drm/drm_atomic_helper.h> | |
0bc12bcb RV |
56 | |
57 | #include "intel_drv.h" | |
58 | #include "i915_drv.h" | |
59 | ||
c44301fc ML |
60 | static bool psr_global_enabled(u32 debug) |
61 | { | |
62 | switch (debug & I915_PSR_DEBUG_MODE_MASK) { | |
63 | case I915_PSR_DEBUG_DEFAULT: | |
64 | return i915_modparams.enable_psr; | |
65 | case I915_PSR_DEBUG_DISABLE: | |
66 | return false; | |
67 | default: | |
68 | return true; | |
69 | } | |
70 | } | |
71 | ||
2ac45bdd ML |
72 | static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, |
73 | const struct intel_crtc_state *crtc_state) | |
74 | { | |
8228c42f MN |
75 | /* Cannot enable DSC and PSR2 simultaneously */ |
76 | WARN_ON(crtc_state->dsc_params.compression_enable && | |
77 | crtc_state->has_psr2); | |
78 | ||
2ac45bdd | 79 | switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { |
235ca26f | 80 | case I915_PSR_DEBUG_DISABLE: |
2ac45bdd ML |
81 | case I915_PSR_DEBUG_FORCE_PSR1: |
82 | return false; | |
83 | default: | |
84 | return crtc_state->has_psr2; | |
85 | } | |
86 | } | |
87 | ||
c0871805 ID |
88 | static int edp_psr_shift(enum transcoder cpu_transcoder) |
89 | { | |
90 | switch (cpu_transcoder) { | |
91 | case TRANSCODER_A: | |
92 | return EDP_PSR_TRANSCODER_A_SHIFT; | |
93 | case TRANSCODER_B: | |
94 | return EDP_PSR_TRANSCODER_B_SHIFT; | |
95 | case TRANSCODER_C: | |
96 | return EDP_PSR_TRANSCODER_C_SHIFT; | |
97 | default: | |
98 | MISSING_CASE(cpu_transcoder); | |
99 | /* fallthrough */ | |
100 | case TRANSCODER_EDP: | |
101 | return EDP_PSR_TRANSCODER_EDP_SHIFT; | |
102 | } | |
103 | } | |
104 | ||
1aeb1b5f | 105 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) |
54fd3149 DP |
106 | { |
107 | u32 debug_mask, mask; | |
c0871805 ID |
108 | enum transcoder cpu_transcoder; |
109 | u32 transcoders = BIT(TRANSCODER_EDP); | |
54fd3149 | 110 | |
c0871805 ID |
111 | if (INTEL_GEN(dev_priv) >= 8) |
112 | transcoders |= BIT(TRANSCODER_A) | | |
113 | BIT(TRANSCODER_B) | | |
114 | BIT(TRANSCODER_C); | |
115 | ||
116 | debug_mask = 0; | |
117 | mask = 0; | |
118 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { | |
119 | int shift = edp_psr_shift(cpu_transcoder); | |
120 | ||
121 | mask |= EDP_PSR_ERROR(shift); | |
122 | debug_mask |= EDP_PSR_POST_EXIT(shift) | | |
123 | EDP_PSR_PRE_ENTRY(shift); | |
54fd3149 DP |
124 | } |
125 | ||
1aeb1b5f | 126 | if (debug & I915_PSR_DEBUG_IRQ) |
54fd3149 DP |
127 | mask |= debug_mask; |
128 | ||
54fd3149 DP |
129 | I915_WRITE(EDP_PSR_IMR, ~mask); |
130 | } | |
131 | ||
bc18b4df JRS |
132 | static void psr_event_print(u32 val, bool psr2_enabled) |
133 | { | |
134 | DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val); | |
135 | if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) | |
136 | DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n"); | |
137 | if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) | |
138 | DRM_DEBUG_KMS("\tPSR2 disabled\n"); | |
139 | if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) | |
140 | DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n"); | |
141 | if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) | |
142 | DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n"); | |
143 | if (val & PSR_EVENT_GRAPHICS_RESET) | |
144 | DRM_DEBUG_KMS("\tGraphics reset\n"); | |
145 | if (val & PSR_EVENT_PCH_INTERRUPT) | |
146 | DRM_DEBUG_KMS("\tPCH interrupt\n"); | |
147 | if (val & PSR_EVENT_MEMORY_UP) | |
148 | DRM_DEBUG_KMS("\tMemory up\n"); | |
149 | if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) | |
150 | DRM_DEBUG_KMS("\tFront buffer modification\n"); | |
151 | if (val & PSR_EVENT_WD_TIMER_EXPIRE) | |
152 | DRM_DEBUG_KMS("\tPSR watchdog timer expired\n"); | |
153 | if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) | |
154 | DRM_DEBUG_KMS("\tPIPE registers updated\n"); | |
155 | if (val & PSR_EVENT_REGISTER_UPDATE) | |
156 | DRM_DEBUG_KMS("\tRegister updated\n"); | |
157 | if (val & PSR_EVENT_HDCP_ENABLE) | |
158 | DRM_DEBUG_KMS("\tHDCP enabled\n"); | |
159 | if (val & PSR_EVENT_KVMR_SESSION_ENABLE) | |
160 | DRM_DEBUG_KMS("\tKVMR session enabled\n"); | |
161 | if (val & PSR_EVENT_VBI_ENABLE) | |
162 | DRM_DEBUG_KMS("\tVBI enabled\n"); | |
163 | if (val & PSR_EVENT_LPSP_MODE_EXIT) | |
164 | DRM_DEBUG_KMS("\tLPSP mode exited\n"); | |
165 | if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) | |
166 | DRM_DEBUG_KMS("\tPSR disabled\n"); | |
167 | } | |
168 | ||
54fd3149 DP |
169 | void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) |
170 | { | |
171 | u32 transcoders = BIT(TRANSCODER_EDP); | |
172 | enum transcoder cpu_transcoder; | |
3f983e54 | 173 | ktime_t time_ns = ktime_get(); |
183b8e67 | 174 | u32 mask = 0; |
54fd3149 DP |
175 | |
176 | if (INTEL_GEN(dev_priv) >= 8) | |
177 | transcoders |= BIT(TRANSCODER_A) | | |
178 | BIT(TRANSCODER_B) | | |
179 | BIT(TRANSCODER_C); | |
180 | ||
181 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { | |
c0871805 ID |
182 | int shift = edp_psr_shift(cpu_transcoder); |
183 | ||
183b8e67 JRS |
184 | if (psr_iir & EDP_PSR_ERROR(shift)) { |
185 | DRM_WARN("[transcoder %s] PSR aux error\n", | |
186 | transcoder_name(cpu_transcoder)); | |
187 | ||
188 | dev_priv->psr.irq_aux_error = true; | |
189 | ||
190 | /* | |
191 | * If this interruption is not masked it will keep | |
192 | * interrupting so fast that it prevents the scheduled | |
193 | * work to run. | |
194 | * Also after a PSR error, we don't want to arm PSR | |
195 | * again so we don't care about unmask the interruption | |
196 | * or unset irq_aux_error. | |
197 | */ | |
198 | mask |= EDP_PSR_ERROR(shift); | |
199 | } | |
54fd3149 | 200 | |
c0871805 | 201 | if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) { |
3f983e54 | 202 | dev_priv->psr.last_entry_attempt = time_ns; |
54fd3149 DP |
203 | DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", |
204 | transcoder_name(cpu_transcoder)); | |
3f983e54 | 205 | } |
54fd3149 | 206 | |
c0871805 | 207 | if (psr_iir & EDP_PSR_POST_EXIT(shift)) { |
3f983e54 | 208 | dev_priv->psr.last_exit = time_ns; |
54fd3149 DP |
209 | DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", |
210 | transcoder_name(cpu_transcoder)); | |
bc18b4df JRS |
211 | |
212 | if (INTEL_GEN(dev_priv) >= 9) { | |
213 | u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); | |
214 | bool psr2_enabled = dev_priv->psr.psr2_enabled; | |
215 | ||
216 | I915_WRITE(PSR_EVENT(cpu_transcoder), val); | |
217 | psr_event_print(val, psr2_enabled); | |
218 | } | |
3f983e54 | 219 | } |
54fd3149 | 220 | } |
183b8e67 JRS |
221 | |
222 | if (mask) { | |
223 | mask |= I915_READ(EDP_PSR_IMR); | |
224 | I915_WRITE(EDP_PSR_IMR, mask); | |
225 | ||
226 | schedule_work(&dev_priv->psr.work); | |
227 | } | |
54fd3149 DP |
228 | } |
229 | ||
77fe36ff DP |
230 | static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) |
231 | { | |
739f3abd | 232 | u8 dprx = 0; |
77fe36ff DP |
233 | |
234 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, | |
235 | &dprx) != 1) | |
236 | return false; | |
237 | return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; | |
238 | } | |
239 | ||
240 | static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) | |
241 | { | |
739f3abd | 242 | u8 alpm_caps = 0; |
77fe36ff DP |
243 | |
244 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, | |
245 | &alpm_caps) != 1) | |
246 | return false; | |
247 | return alpm_caps & DP_ALPM_CAP; | |
248 | } | |
249 | ||
26e5378d JRS |
250 | static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) |
251 | { | |
264ff016 | 252 | u8 val = 8; /* assume the worst if we can't read the value */ |
26e5378d JRS |
253 | |
254 | if (drm_dp_dpcd_readb(&intel_dp->aux, | |
255 | DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) | |
256 | val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; | |
257 | else | |
264ff016 | 258 | DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); |
26e5378d JRS |
259 | return val; |
260 | } | |
261 | ||
8c0d2c29 JRS |
262 | static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) |
263 | { | |
264 | u16 val; | |
265 | ssize_t r; | |
266 | ||
267 | /* | |
268 | * Returning the default X granularity if granularity not required or | |
269 | * if DPCD read fails | |
270 | */ | |
271 | if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) | |
272 | return 4; | |
273 | ||
274 | r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); | |
275 | if (r != 2) | |
276 | DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n"); | |
277 | ||
278 | /* | |
279 | * Spec says that if the value read is 0 the default granularity should | |
280 | * be used instead. | |
281 | */ | |
282 | if (r != 2 || val == 0) | |
283 | val = 4; | |
284 | ||
285 | return val; | |
286 | } | |
287 | ||
77fe36ff DP |
288 | void intel_psr_init_dpcd(struct intel_dp *intel_dp) |
289 | { | |
290 | struct drm_i915_private *dev_priv = | |
291 | to_i915(dp_to_dig_port(intel_dp)->base.base.dev); | |
292 | ||
293 | drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, | |
294 | sizeof(intel_dp->psr_dpcd)); | |
295 | ||
8cf6da7e DP |
296 | if (!intel_dp->psr_dpcd[0]) |
297 | return; | |
8cf6da7e DP |
298 | DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", |
299 | intel_dp->psr_dpcd[0]); | |
84bb2916 | 300 | |
7c5c641a JRS |
301 | if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { |
302 | DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); | |
303 | return; | |
304 | } | |
305 | ||
84bb2916 DP |
306 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { |
307 | DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); | |
308 | return; | |
309 | } | |
7c5c641a | 310 | |
8cf6da7e | 311 | dev_priv->psr.sink_support = true; |
a3db1428 DP |
312 | dev_priv->psr.sink_sync_latency = |
313 | intel_dp_get_sink_sync_latency(intel_dp); | |
77fe36ff | 314 | |
c44301fc ML |
315 | WARN_ON(dev_priv->psr.dp); |
316 | dev_priv->psr.dp = intel_dp; | |
317 | ||
77fe36ff | 318 | if (INTEL_GEN(dev_priv) >= 9 && |
aee3bac0 | 319 | (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { |
97c9de66 DP |
320 | bool y_req = intel_dp->psr_dpcd[1] & |
321 | DP_PSR2_SU_Y_COORDINATE_REQUIRED; | |
322 | bool alpm = intel_dp_get_alpm_status(intel_dp); | |
323 | ||
aee3bac0 JRS |
324 | /* |
325 | * All panels that supports PSR version 03h (PSR2 + | |
326 | * Y-coordinate) can handle Y-coordinates in VSC but we are | |
327 | * only sure that it is going to be used when required by the | |
328 | * panel. This way panel is capable to do selective update | |
329 | * without a aux frame sync. | |
330 | * | |
331 | * To support PSR version 02h and PSR version 03h without | |
332 | * Y-coordinate requirement panels we would need to enable | |
333 | * GTC first. | |
334 | */ | |
97c9de66 | 335 | dev_priv->psr.sink_psr2_support = y_req && alpm; |
8cf6da7e DP |
336 | DRM_DEBUG_KMS("PSR2 %ssupported\n", |
337 | dev_priv->psr.sink_psr2_support ? "" : "not "); | |
77fe36ff | 338 | |
95f28d2e | 339 | if (dev_priv->psr.sink_psr2_support) { |
77fe36ff DP |
340 | dev_priv->psr.colorimetry_support = |
341 | intel_dp_get_colorimetry_status(intel_dp); | |
8c0d2c29 JRS |
342 | dev_priv->psr.su_x_granularity = |
343 | intel_dp_get_su_x_granulartiy(intel_dp); | |
77fe36ff DP |
344 | } |
345 | } | |
346 | } | |
347 | ||
cf5d862d RV |
348 | static void intel_psr_setup_vsc(struct intel_dp *intel_dp, |
349 | const struct intel_crtc_state *crtc_state) | |
474d1ec4 | 350 | { |
97da2ef4 | 351 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1895759e | 352 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
d2419ffc | 353 | struct edp_vsc_psr psr_vsc; |
474d1ec4 | 354 | |
95f28d2e | 355 | if (dev_priv->psr.psr2_enabled) { |
2ce4df87 RV |
356 | /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ |
357 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
358 | psr_vsc.sdp_header.HB0 = 0; | |
359 | psr_vsc.sdp_header.HB1 = 0x7; | |
aee3bac0 | 360 | if (dev_priv->psr.colorimetry_support) { |
2ce4df87 RV |
361 | psr_vsc.sdp_header.HB2 = 0x5; |
362 | psr_vsc.sdp_header.HB3 = 0x13; | |
aee3bac0 | 363 | } else { |
2ce4df87 RV |
364 | psr_vsc.sdp_header.HB2 = 0x4; |
365 | psr_vsc.sdp_header.HB3 = 0xe; | |
2ce4df87 | 366 | } |
97da2ef4 | 367 | } else { |
2ce4df87 RV |
368 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ |
369 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
370 | psr_vsc.sdp_header.HB0 = 0; | |
371 | psr_vsc.sdp_header.HB1 = 0x7; | |
372 | psr_vsc.sdp_header.HB2 = 0x2; | |
373 | psr_vsc.sdp_header.HB3 = 0x8; | |
97da2ef4 NV |
374 | } |
375 | ||
790ea70c VS |
376 | intel_dig_port->write_infoframe(&intel_dig_port->base, |
377 | crtc_state, | |
1d776538 | 378 | DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); |
474d1ec4 SJ |
379 | } |
380 | ||
b90eed08 | 381 | static void hsw_psr_setup_aux(struct intel_dp *intel_dp) |
0bc12bcb | 382 | { |
1895759e | 383 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
d544e918 DP |
384 | u32 aux_clock_divider, aux_ctl; |
385 | int i; | |
739f3abd | 386 | static const u8 aux_msg[] = { |
0bc12bcb RV |
387 | [0] = DP_AUX_NATIVE_WRITE << 4, |
388 | [1] = DP_SET_POWER >> 8, | |
389 | [2] = DP_SET_POWER & 0xff, | |
390 | [3] = 1 - 1, | |
391 | [4] = DP_SET_POWER_D0, | |
392 | }; | |
d544e918 DP |
393 | u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | |
394 | EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | | |
395 | EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | | |
396 | EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; | |
0bc12bcb RV |
397 | |
398 | BUILD_BUG_ON(sizeof(aux_msg) > 20); | |
b90eed08 | 399 | for (i = 0; i < sizeof(aux_msg); i += 4) |
d544e918 | 400 | I915_WRITE(EDP_PSR_AUX_DATA(i >> 2), |
b90eed08 DP |
401 | intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); |
402 | ||
d544e918 DP |
403 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
404 | ||
405 | /* Start with bits set for DDI_AUX_CTL register */ | |
8a29c778 | 406 | aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), |
b90eed08 | 407 | aux_clock_divider); |
d544e918 DP |
408 | |
409 | /* Select only valid bits for SRD_AUX_CTL */ | |
410 | aux_ctl &= psr_aux_mask; | |
411 | I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); | |
b90eed08 DP |
412 | } |
413 | ||
cf5d862d | 414 | static void intel_psr_enable_sink(struct intel_dp *intel_dp) |
b90eed08 | 415 | { |
1895759e | 416 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
4df4925b | 417 | u8 dpcd_val = DP_PSR_ENABLE; |
b90eed08 | 418 | |
340c93c0 | 419 | /* Enable ALPM at sink for psr2 */ |
97c9de66 DP |
420 | if (dev_priv->psr.psr2_enabled) { |
421 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, | |
422 | DP_ALPM_ENABLE); | |
98751b8c | 423 | dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; |
60cae442 JRS |
424 | } else { |
425 | if (dev_priv->psr.link_standby) | |
426 | dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; | |
de570946 JRS |
427 | |
428 | if (INTEL_GEN(dev_priv) >= 8) | |
429 | dpcd_val |= DP_PSR_CRC_VERIFICATION; | |
97c9de66 DP |
430 | } |
431 | ||
4df4925b | 432 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); |
6f32ea7e | 433 | |
d544e918 | 434 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); |
0bc12bcb RV |
435 | } |
436 | ||
1e0c05c0 | 437 | static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) |
0bc12bcb | 438 | { |
1895759e | 439 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1e0c05c0 | 440 | u32 val = 0; |
60e5ffe3 | 441 | |
77312ae8 | 442 | if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) |
1e0c05c0 | 443 | val |= EDP_PSR_TP1_TIME_0us; |
77312ae8 | 444 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) |
50db1390 | 445 | val |= EDP_PSR_TP1_TIME_100us; |
77312ae8 VN |
446 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) |
447 | val |= EDP_PSR_TP1_TIME_500us; | |
50db1390 | 448 | else |
77312ae8 | 449 | val |= EDP_PSR_TP1_TIME_2500us; |
50db1390 | 450 | |
77312ae8 | 451 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) |
1e0c05c0 | 452 | val |= EDP_PSR_TP2_TP3_TIME_0us; |
77312ae8 | 453 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) |
50db1390 | 454 | val |= EDP_PSR_TP2_TP3_TIME_100us; |
77312ae8 VN |
455 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) |
456 | val |= EDP_PSR_TP2_TP3_TIME_500us; | |
50db1390 | 457 | else |
77312ae8 | 458 | val |= EDP_PSR_TP2_TP3_TIME_2500us; |
50db1390 DV |
459 | |
460 | if (intel_dp_source_supports_hbr2(intel_dp) && | |
461 | drm_dp_tps3_supported(intel_dp->dpcd)) | |
462 | val |= EDP_PSR_TP1_TP3_SEL; | |
463 | else | |
464 | val |= EDP_PSR_TP1_TP2_SEL; | |
465 | ||
1e0c05c0 JRS |
466 | return val; |
467 | } | |
468 | ||
469 | static void hsw_activate_psr1(struct intel_dp *intel_dp) | |
470 | { | |
471 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
472 | u32 max_sleep_time = 0x1f; | |
473 | u32 val = EDP_PSR_ENABLE; | |
474 | ||
475 | /* Let's use 6 as the minimum to cover all known cases including the | |
476 | * off-by-one issue that HW has in some cases. | |
477 | */ | |
478 | int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); | |
479 | ||
480 | /* sink_sync_latency of 8 means source has to wait for more than 8 | |
481 | * frames, we'll go with 9 frames for now | |
482 | */ | |
483 | idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); | |
484 | val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; | |
485 | ||
486 | val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; | |
487 | if (IS_HASWELL(dev_priv)) | |
488 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | |
489 | ||
490 | if (dev_priv->psr.link_standby) | |
491 | val |= EDP_PSR_LINK_STANDBY; | |
492 | ||
493 | val |= intel_psr1_get_tp_time(intel_dp); | |
494 | ||
00c8f194 JRS |
495 | if (INTEL_GEN(dev_priv) >= 8) |
496 | val |= EDP_PSR_CRC_ENABLE; | |
497 | ||
912d6412 | 498 | val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; |
50db1390 | 499 | I915_WRITE(EDP_PSR_CTL, val); |
3fcb0ca1 | 500 | } |
50db1390 | 501 | |
ed63d24b | 502 | static void hsw_activate_psr2(struct intel_dp *intel_dp) |
3fcb0ca1 | 503 | { |
1895759e | 504 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
a3db1428 DP |
505 | u32 val; |
506 | ||
507 | /* Let's use 6 as the minimum to cover all known cases including the | |
508 | * off-by-one issue that HW has in some cases. | |
3fcb0ca1 | 509 | */ |
a3db1428 DP |
510 | int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
511 | ||
512 | idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); | |
513 | val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; | |
50db1390 | 514 | |
5e87325f | 515 | val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; |
2a34b005 JRS |
516 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) |
517 | val |= EDP_Y_COORDINATE_ENABLE; | |
977da084 | 518 | |
26e5378d | 519 | val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); |
50db1390 | 520 | |
88a0d960 JRS |
521 | if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && |
522 | dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) | |
77312ae8 | 523 | val |= EDP_PSR2_TP2_TIME_50us; |
88a0d960 | 524 | else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) |
77312ae8 | 525 | val |= EDP_PSR2_TP2_TIME_100us; |
88a0d960 | 526 | else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) |
77312ae8 | 527 | val |= EDP_PSR2_TP2_TIME_500us; |
50db1390 | 528 | else |
77312ae8 | 529 | val |= EDP_PSR2_TP2_TIME_2500us; |
474d1ec4 | 530 | |
50db1390 | 531 | I915_WRITE(EDP_PSR2_CTL, val); |
0bc12bcb RV |
532 | } |
533 | ||
c4932d79 RV |
534 | static bool intel_psr2_config_valid(struct intel_dp *intel_dp, |
535 | struct intel_crtc_state *crtc_state) | |
536 | { | |
1895759e | 537 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
c90c275c DP |
538 | int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay; |
539 | int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; | |
540 | int psr_max_h = 0, psr_max_v = 0; | |
c4932d79 | 541 | |
95f28d2e | 542 | if (!dev_priv->psr.sink_psr2_support) |
c4932d79 RV |
543 | return false; |
544 | ||
8228c42f MN |
545 | /* |
546 | * DSC and PSR2 cannot be enabled simultaneously. If a requested | |
547 | * resolution requires DSC to be enabled, priority is given to DSC | |
548 | * over PSR2. | |
549 | */ | |
550 | if (crtc_state->dsc_params.compression_enable) { | |
551 | DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); | |
552 | return false; | |
553 | } | |
554 | ||
c90c275c DP |
555 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { |
556 | psr_max_h = 4096; | |
557 | psr_max_v = 2304; | |
cf819eff | 558 | } else if (IS_GEN(dev_priv, 9)) { |
c90c275c DP |
559 | psr_max_h = 3640; |
560 | psr_max_v = 2304; | |
561 | } | |
562 | ||
563 | if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { | |
564 | DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", | |
565 | crtc_hdisplay, crtc_vdisplay, | |
566 | psr_max_h, psr_max_v); | |
c4932d79 RV |
567 | return false; |
568 | } | |
569 | ||
bef5e5b3 JRS |
570 | /* |
571 | * HW sends SU blocks of size four scan lines, which means the starting | |
572 | * X coordinate and Y granularity requirements will always be met. We | |
8c0d2c29 JRS |
573 | * only need to validate the SU block width is a multiple of |
574 | * x granularity. | |
bef5e5b3 | 575 | */ |
8c0d2c29 JRS |
576 | if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { |
577 | DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n", | |
578 | crtc_hdisplay, dev_priv->psr.su_x_granularity); | |
bef5e5b3 JRS |
579 | return false; |
580 | } | |
581 | ||
618cf883 JRS |
582 | if (crtc_state->crc_enabled) { |
583 | DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n"); | |
584 | return false; | |
585 | } | |
586 | ||
c4932d79 RV |
587 | return true; |
588 | } | |
589 | ||
4d90f2d5 VS |
590 | void intel_psr_compute_config(struct intel_dp *intel_dp, |
591 | struct intel_crtc_state *crtc_state) | |
0bc12bcb RV |
592 | { |
593 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
1895759e | 594 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
dfd2e9ab | 595 | const struct drm_display_mode *adjusted_mode = |
4d90f2d5 | 596 | &crtc_state->base.adjusted_mode; |
dfd2e9ab | 597 | int psr_setup_time; |
0bc12bcb | 598 | |
4371d896 | 599 | if (!CAN_PSR(dev_priv)) |
4d90f2d5 VS |
600 | return; |
601 | ||
c44301fc | 602 | if (intel_dp != dev_priv->psr.dp) |
4d90f2d5 | 603 | return; |
0bc12bcb | 604 | |
dc9b5a0c RV |
605 | /* |
606 | * HSW spec explicitly says PSR is tied to port A. | |
607 | * BDW+ platforms with DDI implementation of PSR have different | |
608 | * PSR registers per transcoder and we only implement transcoder EDP | |
609 | * ones. Since by Display design transcoder EDP is tied to port A | |
610 | * we can safely escape based on the port A. | |
611 | */ | |
ce3508fd | 612 | if (dig_port->base.port != PORT_A) { |
dc9b5a0c | 613 | DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); |
4d90f2d5 | 614 | return; |
0bc12bcb RV |
615 | } |
616 | ||
50a12d8f JRS |
617 | if (dev_priv->psr.sink_not_reliable) { |
618 | DRM_DEBUG_KMS("PSR sink implementation is not reliable\n"); | |
619 | return; | |
620 | } | |
621 | ||
772c2a51 | 622 | if (IS_HASWELL(dev_priv) && |
dfd2e9ab | 623 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
0bc12bcb | 624 | DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); |
4d90f2d5 | 625 | return; |
0bc12bcb RV |
626 | } |
627 | ||
dfd2e9ab VS |
628 | psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); |
629 | if (psr_setup_time < 0) { | |
630 | DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", | |
631 | intel_dp->psr_dpcd[1]); | |
4d90f2d5 | 632 | return; |
dfd2e9ab VS |
633 | } |
634 | ||
635 | if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > | |
636 | adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { | |
637 | DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", | |
638 | psr_setup_time); | |
4d90f2d5 VS |
639 | return; |
640 | } | |
641 | ||
4d90f2d5 | 642 | crtc_state->has_psr = true; |
c4932d79 | 643 | crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); |
0bc12bcb RV |
644 | } |
645 | ||
e2bbc343 | 646 | static void intel_psr_activate(struct intel_dp *intel_dp) |
0bc12bcb | 647 | { |
1895759e | 648 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
0bc12bcb | 649 | |
bcc233b2 | 650 | if (INTEL_GEN(dev_priv) >= 9) |
3fcb0ca1 | 651 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); |
bcc233b2 | 652 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); |
0bc12bcb RV |
653 | WARN_ON(dev_priv->psr.active); |
654 | lockdep_assert_held(&dev_priv->psr.lock); | |
655 | ||
cf5d862d RV |
656 | /* psr1 and psr2 are mutually exclusive.*/ |
657 | if (dev_priv->psr.psr2_enabled) | |
658 | hsw_activate_psr2(intel_dp); | |
659 | else | |
660 | hsw_activate_psr1(intel_dp); | |
661 | ||
0bc12bcb RV |
662 | dev_priv->psr.active = true; |
663 | } | |
664 | ||
8f19b401 ID |
665 | static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv, |
666 | enum transcoder cpu_transcoder) | |
667 | { | |
668 | static const i915_reg_t regs[] = { | |
669 | [TRANSCODER_A] = CHICKEN_TRANS_A, | |
670 | [TRANSCODER_B] = CHICKEN_TRANS_B, | |
671 | [TRANSCODER_C] = CHICKEN_TRANS_C, | |
672 | [TRANSCODER_EDP] = CHICKEN_TRANS_EDP, | |
673 | }; | |
674 | ||
675 | WARN_ON(INTEL_GEN(dev_priv) < 9); | |
676 | ||
677 | if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) || | |
678 | !regs[cpu_transcoder].reg)) | |
679 | cpu_transcoder = TRANSCODER_A; | |
680 | ||
681 | return regs[cpu_transcoder]; | |
682 | } | |
683 | ||
cf5d862d RV |
684 | static void intel_psr_enable_source(struct intel_dp *intel_dp, |
685 | const struct intel_crtc_state *crtc_state) | |
4d1fa22f | 686 | { |
1895759e | 687 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
4d1fa22f | 688 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
fc6ff9dc | 689 | u32 mask; |
4d1fa22f | 690 | |
d544e918 DP |
691 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ |
692 | * use hardcoded values PSR AUX transactions | |
693 | */ | |
694 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
695 | hsw_psr_setup_aux(intel_dp); | |
696 | ||
cf819eff | 697 | if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && |
d15f9cdd | 698 | !IS_GEMINILAKE(dev_priv))) { |
8f19b401 ID |
699 | i915_reg_t reg = gen9_chicken_trans_reg(dev_priv, |
700 | cpu_transcoder); | |
701 | u32 chicken = I915_READ(reg); | |
5e87325f | 702 | |
d15f9cdd JRS |
703 | chicken |= PSR2_VSC_ENABLE_PROG_HEADER | |
704 | PSR2_ADD_VERTICAL_LINE_COUNT; | |
8f19b401 | 705 | I915_WRITE(reg, chicken); |
4d1fa22f | 706 | } |
bf80928f JRS |
707 | |
708 | /* | |
709 | * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also | |
710 | * mask LPSP to avoid dependency on other drivers that might block | |
711 | * runtime_pm besides preventing other hw tracking issues now we | |
712 | * can rely on frontbuffer tracking. | |
713 | */ | |
fc6ff9dc JRS |
714 | mask = EDP_PSR_DEBUG_MASK_MEMUP | |
715 | EDP_PSR_DEBUG_MASK_HPD | | |
716 | EDP_PSR_DEBUG_MASK_LPSP | | |
717 | EDP_PSR_DEBUG_MASK_MAX_SLEEP; | |
718 | ||
719 | if (INTEL_GEN(dev_priv) < 11) | |
720 | mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; | |
721 | ||
722 | I915_WRITE(EDP_PSR_DEBUG, mask); | |
4d1fa22f RV |
723 | } |
724 | ||
c44301fc ML |
725 | static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, |
726 | const struct intel_crtc_state *crtc_state) | |
727 | { | |
728 | struct intel_dp *intel_dp = dev_priv->psr.dp; | |
729 | ||
23ec9f52 JRS |
730 | WARN_ON(dev_priv->psr.enabled); |
731 | ||
732 | dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); | |
733 | dev_priv->psr.busy_frontbuffer_bits = 0; | |
734 | dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; | |
c44301fc ML |
735 | |
736 | DRM_DEBUG_KMS("Enabling PSR%s\n", | |
737 | dev_priv->psr.psr2_enabled ? "2" : "1"); | |
738 | intel_psr_setup_vsc(intel_dp, crtc_state); | |
739 | intel_psr_enable_sink(intel_dp); | |
740 | intel_psr_enable_source(intel_dp, crtc_state); | |
741 | dev_priv->psr.enabled = true; | |
742 | ||
743 | intel_psr_activate(intel_dp); | |
744 | } | |
745 | ||
b2b89f55 RV |
746 | /** |
747 | * intel_psr_enable - Enable PSR | |
748 | * @intel_dp: Intel DP | |
d2419ffc | 749 | * @crtc_state: new CRTC state |
b2b89f55 RV |
750 | * |
751 | * This function can only be called after the pipe is fully trained and enabled. | |
752 | */ | |
d2419ffc VS |
753 | void intel_psr_enable(struct intel_dp *intel_dp, |
754 | const struct intel_crtc_state *crtc_state) | |
0bc12bcb | 755 | { |
1895759e | 756 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
0bc12bcb | 757 | |
4d90f2d5 | 758 | if (!crtc_state->has_psr) |
0bc12bcb | 759 | return; |
0bc12bcb | 760 | |
c9ef291a DP |
761 | if (WARN_ON(!CAN_PSR(dev_priv))) |
762 | return; | |
763 | ||
da83ef85 | 764 | WARN_ON(dev_priv->drrs.dp); |
c44301fc | 765 | |
0bc12bcb | 766 | mutex_lock(&dev_priv->psr.lock); |
23ec9f52 JRS |
767 | |
768 | if (!psr_global_enabled(dev_priv->psr.debug)) { | |
769 | DRM_DEBUG_KMS("PSR disabled by flag\n"); | |
0bc12bcb RV |
770 | goto unlock; |
771 | } | |
772 | ||
23ec9f52 | 773 | intel_psr_enable_locked(dev_priv, crtc_state); |
d0ac896a | 774 | |
0bc12bcb RV |
775 | unlock: |
776 | mutex_unlock(&dev_priv->psr.lock); | |
777 | } | |
778 | ||
26f9ec9a JRS |
779 | static void intel_psr_exit(struct drm_i915_private *dev_priv) |
780 | { | |
781 | u32 val; | |
782 | ||
b2fc2252 JRS |
783 | if (!dev_priv->psr.active) { |
784 | if (INTEL_GEN(dev_priv) >= 9) | |
785 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); | |
786 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); | |
26f9ec9a | 787 | return; |
b2fc2252 | 788 | } |
26f9ec9a JRS |
789 | |
790 | if (dev_priv->psr.psr2_enabled) { | |
791 | val = I915_READ(EDP_PSR2_CTL); | |
792 | WARN_ON(!(val & EDP_PSR2_ENABLE)); | |
793 | I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); | |
794 | } else { | |
795 | val = I915_READ(EDP_PSR_CTL); | |
796 | WARN_ON(!(val & EDP_PSR_ENABLE)); | |
797 | I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); | |
798 | } | |
799 | dev_priv->psr.active = false; | |
800 | } | |
801 | ||
2ee936e3 | 802 | static void intel_psr_disable_locked(struct intel_dp *intel_dp) |
e2bbc343 | 803 | { |
1895759e | 804 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
b2fc2252 JRS |
805 | i915_reg_t psr_status; |
806 | u32 psr_status_mask; | |
0bc12bcb | 807 | |
2ee936e3 JRS |
808 | lockdep_assert_held(&dev_priv->psr.lock); |
809 | ||
810 | if (!dev_priv->psr.enabled) | |
811 | return; | |
812 | ||
813 | DRM_DEBUG_KMS("Disabling PSR%s\n", | |
814 | dev_priv->psr.psr2_enabled ? "2" : "1"); | |
815 | ||
b2fc2252 | 816 | intel_psr_exit(dev_priv); |
77affa31 | 817 | |
b2fc2252 JRS |
818 | if (dev_priv->psr.psr2_enabled) { |
819 | psr_status = EDP_PSR2_STATUS; | |
820 | psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; | |
0bc12bcb | 821 | } else { |
b2fc2252 JRS |
822 | psr_status = EDP_PSR_STATUS; |
823 | psr_status_mask = EDP_PSR_STATUS_STATE_MASK; | |
0bc12bcb | 824 | } |
b2fc2252 JRS |
825 | |
826 | /* Wait till PSR is idle */ | |
827 | if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0, | |
828 | 2000)) | |
829 | DRM_ERROR("Timed out waiting PSR idle state\n"); | |
cc3054ff JRS |
830 | |
831 | /* Disable PSR on Sink */ | |
832 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); | |
833 | ||
c44301fc | 834 | dev_priv->psr.enabled = false; |
cc3054ff JRS |
835 | } |
836 | ||
e2bbc343 RV |
837 | /** |
838 | * intel_psr_disable - Disable PSR | |
839 | * @intel_dp: Intel DP | |
d2419ffc | 840 | * @old_crtc_state: old CRTC state |
e2bbc343 RV |
841 | * |
842 | * This function needs to be called before disabling pipe. | |
843 | */ | |
d2419ffc VS |
844 | void intel_psr_disable(struct intel_dp *intel_dp, |
845 | const struct intel_crtc_state *old_crtc_state) | |
e2bbc343 | 846 | { |
1895759e | 847 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
e2bbc343 | 848 | |
4d90f2d5 | 849 | if (!old_crtc_state->has_psr) |
0f328da6 RV |
850 | return; |
851 | ||
c9ef291a DP |
852 | if (WARN_ON(!CAN_PSR(dev_priv))) |
853 | return; | |
854 | ||
e2bbc343 | 855 | mutex_lock(&dev_priv->psr.lock); |
c44301fc | 856 | |
cc3054ff | 857 | intel_psr_disable_locked(intel_dp); |
c44301fc | 858 | |
0bc12bcb | 859 | mutex_unlock(&dev_priv->psr.lock); |
98fa2aec | 860 | cancel_work_sync(&dev_priv->psr.work); |
0bc12bcb RV |
861 | } |
862 | ||
88e05aff JRS |
863 | static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) |
864 | { | |
865 | /* | |
866 | * Display WA #0884: all | |
867 | * This documented WA for bxt can be safely applied | |
868 | * broadly so we can force HW tracking to exit PSR | |
869 | * instead of disabling and re-enabling. | |
870 | * Workaround tells us to write 0 to CUR_SURFLIVE_A, | |
871 | * but it makes more sense write to the current active | |
872 | * pipe. | |
873 | */ | |
874 | I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); | |
875 | } | |
876 | ||
23ec9f52 JRS |
877 | /** |
878 | * intel_psr_update - Update PSR state | |
879 | * @intel_dp: Intel DP | |
880 | * @crtc_state: new CRTC state | |
881 | * | |
882 | * This functions will update PSR states, disabling, enabling or switching PSR | |
883 | * version when executing fastsets. For full modeset, intel_psr_disable() and | |
884 | * intel_psr_enable() should be called instead. | |
885 | */ | |
886 | void intel_psr_update(struct intel_dp *intel_dp, | |
887 | const struct intel_crtc_state *crtc_state) | |
888 | { | |
889 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
890 | struct i915_psr *psr = &dev_priv->psr; | |
891 | bool enable, psr2_enable; | |
892 | ||
893 | if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) | |
894 | return; | |
895 | ||
896 | mutex_lock(&dev_priv->psr.lock); | |
897 | ||
898 | enable = crtc_state->has_psr && psr_global_enabled(psr->debug); | |
899 | psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); | |
900 | ||
88e05aff JRS |
901 | if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { |
902 | /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ | |
903 | if (crtc_state->crc_enabled && psr->enabled) | |
904 | psr_force_hw_tracking_exit(dev_priv); | |
905 | ||
23ec9f52 | 906 | goto unlock; |
88e05aff | 907 | } |
23ec9f52 | 908 | |
9f952664 JRS |
909 | if (psr->enabled) |
910 | intel_psr_disable_locked(intel_dp); | |
23ec9f52 | 911 | |
9f952664 JRS |
912 | if (enable) |
913 | intel_psr_enable_locked(dev_priv, crtc_state); | |
23ec9f52 JRS |
914 | |
915 | unlock: | |
916 | mutex_unlock(&dev_priv->psr.lock); | |
917 | } | |
918 | ||
65df9c79 DP |
919 | /** |
920 | * intel_psr_wait_for_idle - wait for PSR1 to idle | |
921 | * @new_crtc_state: new CRTC state | |
922 | * @out_value: PSR status in case of failure | |
923 | * | |
924 | * This function is expected to be called from pipe_update_start() where it is | |
925 | * not expected to race with PSR enable or disable. | |
926 | * | |
927 | * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. | |
928 | */ | |
63ec132d DP |
929 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, |
930 | u32 *out_value) | |
c43dbcbb | 931 | { |
c3d43361 TV |
932 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); |
933 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | |
c43dbcbb | 934 | |
c44301fc | 935 | if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) |
c3d43361 TV |
936 | return 0; |
937 | ||
fd255f6e DP |
938 | /* FIXME: Update this for PSR2 if we need to wait for idle */ |
939 | if (READ_ONCE(dev_priv->psr.psr2_enabled)) | |
940 | return 0; | |
c43dbcbb TV |
941 | |
942 | /* | |
65df9c79 DP |
943 | * From bspec: Panel Self Refresh (BDW+) |
944 | * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of | |
945 | * exit training time + 1.5 ms of aux channel handshake. 50 ms is | |
946 | * defensive enough to cover everything. | |
c43dbcbb | 947 | */ |
63ec132d | 948 | |
fd255f6e DP |
949 | return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS, |
950 | EDP_PSR_STATUS_STATE_MASK, | |
63ec132d DP |
951 | EDP_PSR_STATUS_STATE_IDLE, 2, 50, |
952 | out_value); | |
c43dbcbb TV |
953 | } |
954 | ||
955 | static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) | |
0bc12bcb | 956 | { |
daeb725e CW |
957 | i915_reg_t reg; |
958 | u32 mask; | |
959 | int err; | |
960 | ||
c44301fc | 961 | if (!dev_priv->psr.enabled) |
daeb725e | 962 | return false; |
0bc12bcb | 963 | |
ce3508fd DP |
964 | if (dev_priv->psr.psr2_enabled) { |
965 | reg = EDP_PSR2_STATUS; | |
966 | mask = EDP_PSR2_STATUS_STATE_MASK; | |
995d3047 | 967 | } else { |
ce3508fd DP |
968 | reg = EDP_PSR_STATUS; |
969 | mask = EDP_PSR_STATUS_STATE_MASK; | |
0bc12bcb | 970 | } |
daeb725e CW |
971 | |
972 | mutex_unlock(&dev_priv->psr.lock); | |
973 | ||
974 | err = intel_wait_for_register(dev_priv, reg, mask, 0, 50); | |
975 | if (err) | |
976 | DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); | |
977 | ||
978 | /* After the unlocked wait, verify that PSR is still wanted! */ | |
0bc12bcb | 979 | mutex_lock(&dev_priv->psr.lock); |
daeb725e CW |
980 | return err == 0 && dev_priv->psr.enabled; |
981 | } | |
0bc12bcb | 982 | |
23ec9f52 | 983 | static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) |
2ac45bdd | 984 | { |
23ec9f52 JRS |
985 | struct drm_device *dev = &dev_priv->drm; |
986 | struct drm_modeset_acquire_ctx ctx; | |
987 | struct drm_atomic_state *state; | |
988 | struct drm_crtc *crtc; | |
989 | int err; | |
2ac45bdd | 990 | |
23ec9f52 JRS |
991 | state = drm_atomic_state_alloc(dev); |
992 | if (!state) | |
993 | return -ENOMEM; | |
2ac45bdd | 994 | |
23ec9f52 JRS |
995 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); |
996 | state->acquire_ctx = &ctx; | |
997 | ||
998 | retry: | |
999 | drm_for_each_crtc(crtc, dev) { | |
1000 | struct drm_crtc_state *crtc_state; | |
1001 | struct intel_crtc_state *intel_crtc_state; | |
1002 | ||
1003 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | |
1004 | if (IS_ERR(crtc_state)) { | |
1005 | err = PTR_ERR(crtc_state); | |
1006 | goto error; | |
1007 | } | |
1008 | ||
1009 | intel_crtc_state = to_intel_crtc_state(crtc_state); | |
1010 | ||
458e0977 | 1011 | if (crtc_state->active && intel_crtc_state->has_psr) { |
23ec9f52 JRS |
1012 | /* Mark mode as changed to trigger a pipe->update() */ |
1013 | crtc_state->mode_changed = true; | |
1014 | break; | |
1015 | } | |
1016 | } | |
1017 | ||
1018 | err = drm_atomic_commit(state); | |
2ac45bdd | 1019 | |
23ec9f52 JRS |
1020 | error: |
1021 | if (err == -EDEADLK) { | |
1022 | drm_atomic_state_clear(state); | |
1023 | err = drm_modeset_backoff(&ctx); | |
1024 | if (!err) | |
1025 | goto retry; | |
1026 | } | |
1027 | ||
1028 | drm_modeset_drop_locks(&ctx); | |
1029 | drm_modeset_acquire_fini(&ctx); | |
1030 | drm_atomic_state_put(state); | |
1031 | ||
1032 | return err; | |
2ac45bdd ML |
1033 | } |
1034 | ||
23ec9f52 | 1035 | int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) |
c44301fc | 1036 | { |
23ec9f52 JRS |
1037 | const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; |
1038 | u32 old_mode; | |
c44301fc | 1039 | int ret; |
c44301fc ML |
1040 | |
1041 | if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || | |
2ac45bdd | 1042 | mode > I915_PSR_DEBUG_FORCE_PSR1) { |
c44301fc ML |
1043 | DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); |
1044 | return -EINVAL; | |
1045 | } | |
1046 | ||
c44301fc ML |
1047 | ret = mutex_lock_interruptible(&dev_priv->psr.lock); |
1048 | if (ret) | |
1049 | return ret; | |
1050 | ||
23ec9f52 | 1051 | old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; |
c44301fc | 1052 | dev_priv->psr.debug = val; |
1aeb1b5f | 1053 | intel_psr_irq_control(dev_priv, dev_priv->psr.debug); |
c44301fc | 1054 | |
c44301fc | 1055 | mutex_unlock(&dev_priv->psr.lock); |
23ec9f52 JRS |
1056 | |
1057 | if (old_mode != mode) | |
1058 | ret = intel_psr_fastset_force(dev_priv); | |
1059 | ||
c44301fc ML |
1060 | return ret; |
1061 | } | |
1062 | ||
183b8e67 JRS |
1063 | static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) |
1064 | { | |
1065 | struct i915_psr *psr = &dev_priv->psr; | |
1066 | ||
1067 | intel_psr_disable_locked(psr->dp); | |
1068 | psr->sink_not_reliable = true; | |
1069 | /* let's make sure that sink is awaken */ | |
1070 | drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); | |
1071 | } | |
1072 | ||
daeb725e CW |
1073 | static void intel_psr_work(struct work_struct *work) |
1074 | { | |
1075 | struct drm_i915_private *dev_priv = | |
5422b37c | 1076 | container_of(work, typeof(*dev_priv), psr.work); |
daeb725e CW |
1077 | |
1078 | mutex_lock(&dev_priv->psr.lock); | |
1079 | ||
5422b37c RV |
1080 | if (!dev_priv->psr.enabled) |
1081 | goto unlock; | |
1082 | ||
183b8e67 JRS |
1083 | if (READ_ONCE(dev_priv->psr.irq_aux_error)) |
1084 | intel_psr_handle_irq(dev_priv); | |
1085 | ||
daeb725e CW |
1086 | /* |
1087 | * We have to make sure PSR is ready for re-enable | |
1088 | * otherwise it keeps disabled until next full enable/disable cycle. | |
1089 | * PSR might take some time to get fully disabled | |
1090 | * and be ready for re-enable. | |
1091 | */ | |
c43dbcbb | 1092 | if (!__psr_wait_for_idle_locked(dev_priv)) |
0bc12bcb RV |
1093 | goto unlock; |
1094 | ||
1095 | /* | |
1096 | * The delayed work can race with an invalidate hence we need to | |
1097 | * recheck. Since psr_flush first clears this and then reschedules we | |
1098 | * won't ever miss a flush when bailing out here. | |
1099 | */ | |
c12e0643 | 1100 | if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) |
0bc12bcb RV |
1101 | goto unlock; |
1102 | ||
c44301fc | 1103 | intel_psr_activate(dev_priv->psr.dp); |
0bc12bcb RV |
1104 | unlock: |
1105 | mutex_unlock(&dev_priv->psr.lock); | |
1106 | } | |
1107 | ||
b2b89f55 RV |
1108 | /** |
1109 | * intel_psr_invalidate - Invalidade PSR | |
5748b6a1 | 1110 | * @dev_priv: i915 device |
b2b89f55 | 1111 | * @frontbuffer_bits: frontbuffer plane tracking bits |
5baf63cc | 1112 | * @origin: which operation caused the invalidate |
b2b89f55 RV |
1113 | * |
1114 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
1115 | * with the software frontbuffer tracking. This function gets called every | |
1116 | * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be | |
1117 | * disabled if the frontbuffer mask contains a buffer relevant to PSR. | |
1118 | * | |
1119 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." | |
1120 | */ | |
5748b6a1 | 1121 | void intel_psr_invalidate(struct drm_i915_private *dev_priv, |
5baf63cc | 1122 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 1123 | { |
4371d896 | 1124 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
1125 | return; |
1126 | ||
ce3508fd | 1127 | if (origin == ORIGIN_FLIP) |
5baf63cc RV |
1128 | return; |
1129 | ||
0bc12bcb RV |
1130 | mutex_lock(&dev_priv->psr.lock); |
1131 | if (!dev_priv->psr.enabled) { | |
1132 | mutex_unlock(&dev_priv->psr.lock); | |
1133 | return; | |
1134 | } | |
1135 | ||
f0ad62a6 | 1136 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); |
0bc12bcb | 1137 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
ec76d629 DV |
1138 | |
1139 | if (frontbuffer_bits) | |
5748b6a1 | 1140 | intel_psr_exit(dev_priv); |
ec76d629 | 1141 | |
0bc12bcb RV |
1142 | mutex_unlock(&dev_priv->psr.lock); |
1143 | } | |
1144 | ||
b2b89f55 RV |
1145 | /** |
1146 | * intel_psr_flush - Flush PSR | |
5748b6a1 | 1147 | * @dev_priv: i915 device |
b2b89f55 | 1148 | * @frontbuffer_bits: frontbuffer plane tracking bits |
169de131 | 1149 | * @origin: which operation caused the flush |
b2b89f55 RV |
1150 | * |
1151 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
1152 | * with the software frontbuffer tracking. This function gets called every | |
1153 | * time frontbuffer rendering has completed and flushed out to memory. PSR | |
1154 | * can be enabled again if no other frontbuffer relevant to PSR is dirty. | |
1155 | * | |
1156 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. | |
1157 | */ | |
5748b6a1 | 1158 | void intel_psr_flush(struct drm_i915_private *dev_priv, |
169de131 | 1159 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 1160 | { |
4371d896 | 1161 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
1162 | return; |
1163 | ||
ce3508fd | 1164 | if (origin == ORIGIN_FLIP) |
5baf63cc RV |
1165 | return; |
1166 | ||
0bc12bcb RV |
1167 | mutex_lock(&dev_priv->psr.lock); |
1168 | if (!dev_priv->psr.enabled) { | |
1169 | mutex_unlock(&dev_priv->psr.lock); | |
1170 | return; | |
1171 | } | |
1172 | ||
f0ad62a6 | 1173 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); |
0bc12bcb RV |
1174 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
1175 | ||
921ec285 | 1176 | /* By definition flush = invalidate + flush */ |
88e05aff JRS |
1177 | if (frontbuffer_bits) |
1178 | psr_force_hw_tracking_exit(dev_priv); | |
995d3047 | 1179 | |
0bc12bcb | 1180 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
5422b37c | 1181 | schedule_work(&dev_priv->psr.work); |
0bc12bcb RV |
1182 | mutex_unlock(&dev_priv->psr.lock); |
1183 | } | |
1184 | ||
b2b89f55 RV |
1185 | /** |
1186 | * intel_psr_init - Init basic PSR work and mutex. | |
93de056b | 1187 | * @dev_priv: i915 device private |
b2b89f55 RV |
1188 | * |
1189 | * This function is called only once at driver load to initialize basic | |
1190 | * PSR stuff. | |
1191 | */ | |
c39055b0 | 1192 | void intel_psr_init(struct drm_i915_private *dev_priv) |
0bc12bcb | 1193 | { |
888bf84d JRS |
1194 | u32 val; |
1195 | ||
0f328da6 RV |
1196 | if (!HAS_PSR(dev_priv)) |
1197 | return; | |
1198 | ||
443a389f VS |
1199 | dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? |
1200 | HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; | |
1201 | ||
c9ef291a DP |
1202 | if (!dev_priv->psr.sink_support) |
1203 | return; | |
1204 | ||
598c6cfe DP |
1205 | if (i915_modparams.enable_psr == -1) |
1206 | if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) | |
1207 | i915_modparams.enable_psr = 0; | |
d94d6e87 | 1208 | |
888bf84d JRS |
1209 | /* |
1210 | * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR | |
1211 | * will still keep the error set even after the reset done in the | |
1212 | * irq_preinstall and irq_uninstall hooks. | |
1213 | * And enabling in this situation cause the screen to freeze in the | |
1214 | * first time that PSR HW tries to activate so lets keep PSR disabled | |
1215 | * to avoid any rendering problems. | |
1216 | */ | |
1217 | val = I915_READ(EDP_PSR_IIR); | |
1218 | val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP)); | |
1219 | if (val) { | |
1220 | DRM_DEBUG_KMS("PSR interruption error set\n"); | |
1221 | dev_priv->psr.sink_not_reliable = true; | |
1222 | return; | |
1223 | } | |
1224 | ||
65f61b42 | 1225 | /* Set link_standby x link_off defaults */ |
8652744b | 1226 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
60e5ffe3 RV |
1227 | /* HSW and BDW require workarounds that we don't implement. */ |
1228 | dev_priv->psr.link_standby = false; | |
60e5ffe3 RV |
1229 | else |
1230 | /* For new platforms let's respect VBT back again */ | |
1231 | dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; | |
1232 | ||
5422b37c | 1233 | INIT_WORK(&dev_priv->psr.work, intel_psr_work); |
0bc12bcb RV |
1234 | mutex_init(&dev_priv->psr.lock); |
1235 | } | |
cc3054ff JRS |
1236 | |
1237 | void intel_psr_short_pulse(struct intel_dp *intel_dp) | |
1238 | { | |
1895759e | 1239 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
cc3054ff JRS |
1240 | struct i915_psr *psr = &dev_priv->psr; |
1241 | u8 val; | |
93bf76ed | 1242 | const u8 errors = DP_PSR_RFB_STORAGE_ERROR | |
00c8f194 JRS |
1243 | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | |
1244 | DP_PSR_LINK_CRC_ERROR; | |
cc3054ff JRS |
1245 | |
1246 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | |
1247 | return; | |
1248 | ||
1249 | mutex_lock(&psr->lock); | |
1250 | ||
c44301fc | 1251 | if (!psr->enabled || psr->dp != intel_dp) |
cc3054ff JRS |
1252 | goto exit; |
1253 | ||
1254 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { | |
1255 | DRM_ERROR("PSR_STATUS dpcd read failed\n"); | |
1256 | goto exit; | |
1257 | } | |
1258 | ||
1259 | if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { | |
1260 | DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); | |
1261 | intel_psr_disable_locked(intel_dp); | |
50a12d8f | 1262 | psr->sink_not_reliable = true; |
cc3054ff JRS |
1263 | } |
1264 | ||
93bf76ed JRS |
1265 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { |
1266 | DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n"); | |
1267 | goto exit; | |
1268 | } | |
1269 | ||
1270 | if (val & DP_PSR_RFB_STORAGE_ERROR) | |
1271 | DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); | |
1272 | if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) | |
1273 | DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); | |
00c8f194 JRS |
1274 | if (val & DP_PSR_LINK_CRC_ERROR) |
1275 | DRM_ERROR("PSR Link CRC error, disabling PSR\n"); | |
93bf76ed JRS |
1276 | |
1277 | if (val & ~errors) | |
1278 | DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", | |
1279 | val & ~errors); | |
50a12d8f | 1280 | if (val & errors) { |
93bf76ed | 1281 | intel_psr_disable_locked(intel_dp); |
50a12d8f JRS |
1282 | psr->sink_not_reliable = true; |
1283 | } | |
93bf76ed JRS |
1284 | /* clear status register */ |
1285 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); | |
cc3054ff JRS |
1286 | exit: |
1287 | mutex_unlock(&psr->lock); | |
1288 | } | |
2f8e7ea9 JRS |
1289 | |
1290 | bool intel_psr_enabled(struct intel_dp *intel_dp) | |
1291 | { | |
1292 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1293 | bool ret; | |
1294 | ||
1295 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | |
1296 | return false; | |
1297 | ||
1298 | mutex_lock(&dev_priv->psr.lock); | |
1299 | ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); | |
1300 | mutex_unlock(&dev_priv->psr.lock); | |
1301 | ||
1302 | return ret; | |
1303 | } |