]>
Commit | Line | Data |
---|---|---|
0bc12bcb RV |
1 | /* |
2 | * Copyright © 2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | */ | |
23 | ||
55367a27 JN |
24 | #include <drm/drm_atomic_helper.h> |
25 | ||
379bc100 JN |
26 | #include "display/intel_dp.h" |
27 | ||
55367a27 | 28 | #include "i915_drv.h" |
3558cafc | 29 | #include "intel_atomic.h" |
1d455f8d | 30 | #include "intel_display_types.h" |
55367a27 | 31 | #include "intel_psr.h" |
f9a79f9a | 32 | #include "intel_sprite.h" |
55367a27 | 33 | |
b2b89f55 RV |
34 | /** |
35 | * DOC: Panel Self Refresh (PSR/SRD) | |
36 | * | |
37 | * Since Haswell Display controller supports Panel Self-Refresh on display | |
38 | * panels witch have a remote frame buffer (RFB) implemented according to PSR | |
39 | * spec in eDP1.3. PSR feature allows the display to go to lower standby states | |
40 | * when system is idle but display is on as it eliminates display refresh | |
41 | * request to DDR memory completely as long as the frame buffer for that | |
42 | * display is unchanged. | |
43 | * | |
44 | * Panel Self Refresh must be supported by both Hardware (source) and | |
45 | * Panel (sink). | |
46 | * | |
47 | * PSR saves power by caching the framebuffer in the panel RFB, which allows us | |
48 | * to power down the link and memory controller. For DSI panels the same idea | |
49 | * is called "manual mode". | |
50 | * | |
51 | * The implementation uses the hardware-based PSR support which automatically | |
52 | * enters/exits self-refresh mode. The hardware takes care of sending the | |
53 | * required DP aux message and could even retrain the link (that part isn't | |
54 | * enabled yet though). The hardware also keeps track of any frontbuffer | |
55 | * changes to know when to exit self-refresh mode again. Unfortunately that | |
56 | * part doesn't work too well, hence why the i915 PSR support uses the | |
57 | * software frontbuffer tracking to make sure it doesn't miss a screen | |
58 | * update. For this integration intel_psr_invalidate() and intel_psr_flush() | |
59 | * get called by the frontbuffer tracking code. Note that because of locking | |
60 | * issues the self-refresh re-enable code is done from a work queue, which | |
61 | * must be correctly synchronized/cancelled when shutting down the pipe." | |
62 | */ | |
63 | ||
c44301fc ML |
64 | static bool psr_global_enabled(u32 debug) |
65 | { | |
66 | switch (debug & I915_PSR_DEBUG_MODE_MASK) { | |
67 | case I915_PSR_DEBUG_DEFAULT: | |
68 | return i915_modparams.enable_psr; | |
69 | case I915_PSR_DEBUG_DISABLE: | |
70 | return false; | |
71 | default: | |
72 | return true; | |
73 | } | |
74 | } | |
75 | ||
2ac45bdd ML |
76 | static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, |
77 | const struct intel_crtc_state *crtc_state) | |
78 | { | |
8228c42f | 79 | /* Cannot enable DSC and PSR2 simultaneously */ |
010663a6 | 80 | WARN_ON(crtc_state->dsc.compression_enable && |
8228c42f MN |
81 | crtc_state->has_psr2); |
82 | ||
2ac45bdd | 83 | switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { |
235ca26f | 84 | case I915_PSR_DEBUG_DISABLE: |
2ac45bdd ML |
85 | case I915_PSR_DEBUG_FORCE_PSR1: |
86 | return false; | |
87 | default: | |
88 | return crtc_state->has_psr2; | |
89 | } | |
90 | } | |
91 | ||
2f3b8712 | 92 | static void psr_irq_control(struct drm_i915_private *dev_priv) |
c0871805 | 93 | { |
8241cfbe JRS |
94 | enum transcoder trans_shift; |
95 | u32 mask, val; | |
96 | i915_reg_t imr_reg; | |
2f3b8712 | 97 | |
8241cfbe JRS |
98 | /* |
99 | * gen12+ has registers relative to transcoder and one per transcoder | |
100 | * using the same bit definition: handle it as TRANSCODER_EDP to force | |
101 | * 0 shift in bit definition | |
102 | */ | |
103 | if (INTEL_GEN(dev_priv) >= 12) { | |
104 | trans_shift = 0; | |
105 | imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); | |
106 | } else { | |
107 | trans_shift = dev_priv->psr.transcoder; | |
108 | imr_reg = EDP_PSR_IMR; | |
109 | } | |
110 | ||
111 | mask = EDP_PSR_ERROR(trans_shift); | |
2f3b8712 | 112 | if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) |
8241cfbe JRS |
113 | mask |= EDP_PSR_POST_EXIT(trans_shift) | |
114 | EDP_PSR_PRE_ENTRY(trans_shift); | |
2f3b8712 JRS |
115 | |
116 | /* Warning: it is masking/setting reserved bits too */ | |
8241cfbe JRS |
117 | val = I915_READ(imr_reg); |
118 | val &= ~EDP_PSR_TRANS_MASK(trans_shift); | |
2f3b8712 | 119 | val |= ~mask; |
8241cfbe | 120 | I915_WRITE(imr_reg, val); |
54fd3149 DP |
121 | } |
122 | ||
bc18b4df JRS |
123 | static void psr_event_print(u32 val, bool psr2_enabled) |
124 | { | |
125 | DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val); | |
126 | if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) | |
127 | DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n"); | |
128 | if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) | |
129 | DRM_DEBUG_KMS("\tPSR2 disabled\n"); | |
130 | if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) | |
131 | DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n"); | |
132 | if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) | |
133 | DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n"); | |
134 | if (val & PSR_EVENT_GRAPHICS_RESET) | |
135 | DRM_DEBUG_KMS("\tGraphics reset\n"); | |
136 | if (val & PSR_EVENT_PCH_INTERRUPT) | |
137 | DRM_DEBUG_KMS("\tPCH interrupt\n"); | |
138 | if (val & PSR_EVENT_MEMORY_UP) | |
139 | DRM_DEBUG_KMS("\tMemory up\n"); | |
140 | if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) | |
141 | DRM_DEBUG_KMS("\tFront buffer modification\n"); | |
142 | if (val & PSR_EVENT_WD_TIMER_EXPIRE) | |
143 | DRM_DEBUG_KMS("\tPSR watchdog timer expired\n"); | |
144 | if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) | |
145 | DRM_DEBUG_KMS("\tPIPE registers updated\n"); | |
146 | if (val & PSR_EVENT_REGISTER_UPDATE) | |
147 | DRM_DEBUG_KMS("\tRegister updated\n"); | |
148 | if (val & PSR_EVENT_HDCP_ENABLE) | |
149 | DRM_DEBUG_KMS("\tHDCP enabled\n"); | |
150 | if (val & PSR_EVENT_KVMR_SESSION_ENABLE) | |
151 | DRM_DEBUG_KMS("\tKVMR session enabled\n"); | |
152 | if (val & PSR_EVENT_VBI_ENABLE) | |
153 | DRM_DEBUG_KMS("\tVBI enabled\n"); | |
154 | if (val & PSR_EVENT_LPSP_MODE_EXIT) | |
155 | DRM_DEBUG_KMS("\tLPSP mode exited\n"); | |
156 | if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) | |
157 | DRM_DEBUG_KMS("\tPSR disabled\n"); | |
158 | } | |
159 | ||
54fd3149 DP |
160 | void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) |
161 | { | |
2f3b8712 | 162 | enum transcoder cpu_transcoder = dev_priv->psr.transcoder; |
8241cfbe JRS |
163 | enum transcoder trans_shift; |
164 | i915_reg_t imr_reg; | |
3f983e54 | 165 | ktime_t time_ns = ktime_get(); |
c0871805 | 166 | |
8241cfbe JRS |
167 | if (INTEL_GEN(dev_priv) >= 12) { |
168 | trans_shift = 0; | |
169 | imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); | |
170 | } else { | |
171 | trans_shift = dev_priv->psr.transcoder; | |
172 | imr_reg = EDP_PSR_IMR; | |
173 | } | |
174 | ||
175 | if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { | |
2f3b8712 JRS |
176 | dev_priv->psr.last_entry_attempt = time_ns; |
177 | DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", | |
178 | transcoder_name(cpu_transcoder)); | |
179 | } | |
183b8e67 | 180 | |
8241cfbe | 181 | if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { |
2f3b8712 JRS |
182 | dev_priv->psr.last_exit = time_ns; |
183 | DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", | |
184 | transcoder_name(cpu_transcoder)); | |
183b8e67 | 185 | |
2f3b8712 JRS |
186 | if (INTEL_GEN(dev_priv) >= 9) { |
187 | u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); | |
188 | bool psr2_enabled = dev_priv->psr.psr2_enabled; | |
54fd3149 | 189 | |
2f3b8712 JRS |
190 | I915_WRITE(PSR_EVENT(cpu_transcoder), val); |
191 | psr_event_print(val, psr2_enabled); | |
3f983e54 | 192 | } |
2f3b8712 | 193 | } |
54fd3149 | 194 | |
8241cfbe | 195 | if (psr_iir & EDP_PSR_ERROR(trans_shift)) { |
2f3b8712 | 196 | u32 val; |
bc18b4df | 197 | |
2f3b8712 JRS |
198 | DRM_WARN("[transcoder %s] PSR aux error\n", |
199 | transcoder_name(cpu_transcoder)); | |
bc18b4df | 200 | |
2f3b8712 | 201 | dev_priv->psr.irq_aux_error = true; |
183b8e67 | 202 | |
2f3b8712 JRS |
203 | /* |
204 | * If this interruption is not masked it will keep | |
205 | * interrupting so fast that it prevents the scheduled | |
206 | * work to run. | |
207 | * Also after a PSR error, we don't want to arm PSR | |
208 | * again so we don't care about unmask the interruption | |
209 | * or unset irq_aux_error. | |
210 | */ | |
8241cfbe JRS |
211 | val = I915_READ(imr_reg); |
212 | val |= EDP_PSR_ERROR(trans_shift); | |
213 | I915_WRITE(imr_reg, val); | |
183b8e67 JRS |
214 | |
215 | schedule_work(&dev_priv->psr.work); | |
216 | } | |
54fd3149 DP |
217 | } |
218 | ||
77fe36ff DP |
219 | static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) |
220 | { | |
739f3abd | 221 | u8 alpm_caps = 0; |
77fe36ff DP |
222 | |
223 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, | |
224 | &alpm_caps) != 1) | |
225 | return false; | |
226 | return alpm_caps & DP_ALPM_CAP; | |
227 | } | |
228 | ||
26e5378d JRS |
229 | static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) |
230 | { | |
264ff016 | 231 | u8 val = 8; /* assume the worst if we can't read the value */ |
26e5378d JRS |
232 | |
233 | if (drm_dp_dpcd_readb(&intel_dp->aux, | |
234 | DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) | |
235 | val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; | |
236 | else | |
264ff016 | 237 | DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); |
26e5378d JRS |
238 | return val; |
239 | } | |
240 | ||
8c0d2c29 JRS |
241 | static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) |
242 | { | |
243 | u16 val; | |
244 | ssize_t r; | |
245 | ||
246 | /* | |
247 | * Returning the default X granularity if granularity not required or | |
248 | * if DPCD read fails | |
249 | */ | |
250 | if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) | |
251 | return 4; | |
252 | ||
253 | r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); | |
254 | if (r != 2) | |
255 | DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n"); | |
256 | ||
257 | /* | |
258 | * Spec says that if the value read is 0 the default granularity should | |
259 | * be used instead. | |
260 | */ | |
261 | if (r != 2 || val == 0) | |
262 | val = 4; | |
263 | ||
264 | return val; | |
265 | } | |
266 | ||
77fe36ff DP |
267 | void intel_psr_init_dpcd(struct intel_dp *intel_dp) |
268 | { | |
269 | struct drm_i915_private *dev_priv = | |
270 | to_i915(dp_to_dig_port(intel_dp)->base.base.dev); | |
271 | ||
6056517a JRS |
272 | if (dev_priv->psr.dp) { |
273 | DRM_WARN("More than one eDP panel found, PSR support should be extended\n"); | |
274 | return; | |
275 | } | |
276 | ||
77fe36ff DP |
277 | drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, |
278 | sizeof(intel_dp->psr_dpcd)); | |
279 | ||
8cf6da7e DP |
280 | if (!intel_dp->psr_dpcd[0]) |
281 | return; | |
8cf6da7e DP |
282 | DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", |
283 | intel_dp->psr_dpcd[0]); | |
84bb2916 | 284 | |
7c5c641a JRS |
285 | if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { |
286 | DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); | |
287 | return; | |
288 | } | |
289 | ||
84bb2916 DP |
290 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { |
291 | DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); | |
292 | return; | |
293 | } | |
7c5c641a | 294 | |
8cf6da7e | 295 | dev_priv->psr.sink_support = true; |
a3db1428 DP |
296 | dev_priv->psr.sink_sync_latency = |
297 | intel_dp_get_sink_sync_latency(intel_dp); | |
77fe36ff | 298 | |
c44301fc ML |
299 | dev_priv->psr.dp = intel_dp; |
300 | ||
77fe36ff | 301 | if (INTEL_GEN(dev_priv) >= 9 && |
aee3bac0 | 302 | (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { |
97c9de66 DP |
303 | bool y_req = intel_dp->psr_dpcd[1] & |
304 | DP_PSR2_SU_Y_COORDINATE_REQUIRED; | |
305 | bool alpm = intel_dp_get_alpm_status(intel_dp); | |
306 | ||
aee3bac0 JRS |
307 | /* |
308 | * All panels that supports PSR version 03h (PSR2 + | |
309 | * Y-coordinate) can handle Y-coordinates in VSC but we are | |
310 | * only sure that it is going to be used when required by the | |
311 | * panel. This way panel is capable to do selective update | |
312 | * without a aux frame sync. | |
313 | * | |
314 | * To support PSR version 02h and PSR version 03h without | |
315 | * Y-coordinate requirement panels we would need to enable | |
316 | * GTC first. | |
317 | */ | |
97c9de66 | 318 | dev_priv->psr.sink_psr2_support = y_req && alpm; |
8cf6da7e DP |
319 | DRM_DEBUG_KMS("PSR2 %ssupported\n", |
320 | dev_priv->psr.sink_psr2_support ? "" : "not "); | |
77fe36ff | 321 | |
95f28d2e | 322 | if (dev_priv->psr.sink_psr2_support) { |
77fe36ff DP |
323 | dev_priv->psr.colorimetry_support = |
324 | intel_dp_get_colorimetry_status(intel_dp); | |
8c0d2c29 JRS |
325 | dev_priv->psr.su_x_granularity = |
326 | intel_dp_get_su_x_granulartiy(intel_dp); | |
77fe36ff DP |
327 | } |
328 | } | |
329 | } | |
330 | ||
cf5d862d RV |
331 | static void intel_psr_setup_vsc(struct intel_dp *intel_dp, |
332 | const struct intel_crtc_state *crtc_state) | |
474d1ec4 | 333 | { |
97da2ef4 | 334 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1895759e | 335 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
4d432f95 | 336 | struct dp_sdp psr_vsc; |
474d1ec4 | 337 | |
95f28d2e | 338 | if (dev_priv->psr.psr2_enabled) { |
2ce4df87 RV |
339 | /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ |
340 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
341 | psr_vsc.sdp_header.HB0 = 0; | |
342 | psr_vsc.sdp_header.HB1 = 0x7; | |
aee3bac0 | 343 | if (dev_priv->psr.colorimetry_support) { |
2ce4df87 RV |
344 | psr_vsc.sdp_header.HB2 = 0x5; |
345 | psr_vsc.sdp_header.HB3 = 0x13; | |
aee3bac0 | 346 | } else { |
2ce4df87 RV |
347 | psr_vsc.sdp_header.HB2 = 0x4; |
348 | psr_vsc.sdp_header.HB3 = 0xe; | |
2ce4df87 | 349 | } |
97da2ef4 | 350 | } else { |
2ce4df87 RV |
351 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ |
352 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
353 | psr_vsc.sdp_header.HB0 = 0; | |
354 | psr_vsc.sdp_header.HB1 = 0x7; | |
355 | psr_vsc.sdp_header.HB2 = 0x2; | |
356 | psr_vsc.sdp_header.HB3 = 0x8; | |
97da2ef4 NV |
357 | } |
358 | ||
790ea70c VS |
359 | intel_dig_port->write_infoframe(&intel_dig_port->base, |
360 | crtc_state, | |
1d776538 | 361 | DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); |
474d1ec4 SJ |
362 | } |
363 | ||
b90eed08 | 364 | static void hsw_psr_setup_aux(struct intel_dp *intel_dp) |
0bc12bcb | 365 | { |
1895759e | 366 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
d544e918 DP |
367 | u32 aux_clock_divider, aux_ctl; |
368 | int i; | |
739f3abd | 369 | static const u8 aux_msg[] = { |
0bc12bcb RV |
370 | [0] = DP_AUX_NATIVE_WRITE << 4, |
371 | [1] = DP_SET_POWER >> 8, | |
372 | [2] = DP_SET_POWER & 0xff, | |
373 | [3] = 1 - 1, | |
374 | [4] = DP_SET_POWER_D0, | |
375 | }; | |
d544e918 DP |
376 | u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | |
377 | EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | | |
378 | EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | | |
379 | EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; | |
0bc12bcb RV |
380 | |
381 | BUILD_BUG_ON(sizeof(aux_msg) > 20); | |
b90eed08 | 382 | for (i = 0; i < sizeof(aux_msg); i += 4) |
4ab4fa10 | 383 | I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), |
b90eed08 DP |
384 | intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); |
385 | ||
d544e918 DP |
386 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
387 | ||
388 | /* Start with bits set for DDI_AUX_CTL register */ | |
8a29c778 | 389 | aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), |
b90eed08 | 390 | aux_clock_divider); |
d544e918 DP |
391 | |
392 | /* Select only valid bits for SRD_AUX_CTL */ | |
393 | aux_ctl &= psr_aux_mask; | |
4ab4fa10 | 394 | I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl); |
b90eed08 DP |
395 | } |
396 | ||
cf5d862d | 397 | static void intel_psr_enable_sink(struct intel_dp *intel_dp) |
b90eed08 | 398 | { |
1895759e | 399 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
4df4925b | 400 | u8 dpcd_val = DP_PSR_ENABLE; |
b90eed08 | 401 | |
340c93c0 | 402 | /* Enable ALPM at sink for psr2 */ |
97c9de66 DP |
403 | if (dev_priv->psr.psr2_enabled) { |
404 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, | |
700355af JRS |
405 | DP_ALPM_ENABLE | |
406 | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); | |
407 | ||
98751b8c | 408 | dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; |
60cae442 JRS |
409 | } else { |
410 | if (dev_priv->psr.link_standby) | |
411 | dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; | |
de570946 JRS |
412 | |
413 | if (INTEL_GEN(dev_priv) >= 8) | |
414 | dpcd_val |= DP_PSR_CRC_VERIFICATION; | |
97c9de66 DP |
415 | } |
416 | ||
4df4925b | 417 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); |
6f32ea7e | 418 | |
d544e918 | 419 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); |
0bc12bcb RV |
420 | } |
421 | ||
1e0c05c0 | 422 | static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) |
0bc12bcb | 423 | { |
1895759e | 424 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1e0c05c0 | 425 | u32 val = 0; |
60e5ffe3 | 426 | |
8a9a5608 JRS |
427 | if (INTEL_GEN(dev_priv) >= 11) |
428 | val |= EDP_PSR_TP4_TIME_0US; | |
429 | ||
77312ae8 | 430 | if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) |
1e0c05c0 | 431 | val |= EDP_PSR_TP1_TIME_0us; |
77312ae8 | 432 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) |
50db1390 | 433 | val |= EDP_PSR_TP1_TIME_100us; |
77312ae8 VN |
434 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) |
435 | val |= EDP_PSR_TP1_TIME_500us; | |
50db1390 | 436 | else |
77312ae8 | 437 | val |= EDP_PSR_TP1_TIME_2500us; |
50db1390 | 438 | |
77312ae8 | 439 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) |
1e0c05c0 | 440 | val |= EDP_PSR_TP2_TP3_TIME_0us; |
77312ae8 | 441 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) |
50db1390 | 442 | val |= EDP_PSR_TP2_TP3_TIME_100us; |
77312ae8 VN |
443 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) |
444 | val |= EDP_PSR_TP2_TP3_TIME_500us; | |
50db1390 | 445 | else |
77312ae8 | 446 | val |= EDP_PSR_TP2_TP3_TIME_2500us; |
50db1390 DV |
447 | |
448 | if (intel_dp_source_supports_hbr2(intel_dp) && | |
449 | drm_dp_tps3_supported(intel_dp->dpcd)) | |
450 | val |= EDP_PSR_TP1_TP3_SEL; | |
451 | else | |
452 | val |= EDP_PSR_TP1_TP2_SEL; | |
453 | ||
1e0c05c0 JRS |
454 | return val; |
455 | } | |
456 | ||
9e83713a | 457 | static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) |
1e0c05c0 JRS |
458 | { |
459 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
9e83713a | 460 | int idle_frames; |
1e0c05c0 JRS |
461 | |
462 | /* Let's use 6 as the minimum to cover all known cases including the | |
463 | * off-by-one issue that HW has in some cases. | |
464 | */ | |
9e83713a | 465 | idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
1e0c05c0 | 466 | idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); |
9e83713a JRS |
467 | |
468 | if (WARN_ON(idle_frames > 0xf)) | |
469 | idle_frames = 0xf; | |
470 | ||
471 | return idle_frames; | |
472 | } | |
473 | ||
474 | static void hsw_activate_psr1(struct intel_dp *intel_dp) | |
475 | { | |
476 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
477 | u32 max_sleep_time = 0x1f; | |
478 | u32 val = EDP_PSR_ENABLE; | |
479 | ||
480 | val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT; | |
1e0c05c0 JRS |
481 | |
482 | val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; | |
483 | if (IS_HASWELL(dev_priv)) | |
484 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | |
485 | ||
486 | if (dev_priv->psr.link_standby) | |
487 | val |= EDP_PSR_LINK_STANDBY; | |
488 | ||
489 | val |= intel_psr1_get_tp_time(intel_dp); | |
490 | ||
00c8f194 JRS |
491 | if (INTEL_GEN(dev_priv) >= 8) |
492 | val |= EDP_PSR_CRC_ENABLE; | |
493 | ||
4ab4fa10 JRS |
494 | val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & |
495 | EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); | |
496 | I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); | |
3fcb0ca1 | 497 | } |
50db1390 | 498 | |
ed63d24b | 499 | static void hsw_activate_psr2(struct intel_dp *intel_dp) |
3fcb0ca1 | 500 | { |
1895759e | 501 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
a3db1428 DP |
502 | u32 val; |
503 | ||
9e83713a | 504 | val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT; |
50db1390 | 505 | |
5e87325f | 506 | val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; |
2a34b005 JRS |
507 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) |
508 | val |= EDP_Y_COORDINATE_ENABLE; | |
977da084 | 509 | |
26e5378d | 510 | val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); |
50db1390 | 511 | |
88a0d960 JRS |
512 | if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && |
513 | dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) | |
77312ae8 | 514 | val |= EDP_PSR2_TP2_TIME_50us; |
88a0d960 | 515 | else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) |
77312ae8 | 516 | val |= EDP_PSR2_TP2_TIME_100us; |
88a0d960 | 517 | else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) |
77312ae8 | 518 | val |= EDP_PSR2_TP2_TIME_500us; |
50db1390 | 519 | else |
77312ae8 | 520 | val |= EDP_PSR2_TP2_TIME_2500us; |
474d1ec4 | 521 | |
06dd94cc | 522 | /* |
15b7dae0 JRS |
523 | * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is |
524 | * recommending keep this bit unset while PSR2 is enabled. | |
06dd94cc | 525 | */ |
4ab4fa10 | 526 | I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0); |
06dd94cc | 527 | |
4ab4fa10 | 528 | I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); |
0bc12bcb RV |
529 | } |
530 | ||
99fc38b1 JRS |
531 | static bool |
532 | transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) | |
533 | { | |
0f81e645 JRS |
534 | if (INTEL_GEN(dev_priv) < 9) |
535 | return false; | |
536 | else if (INTEL_GEN(dev_priv) >= 12) | |
99fc38b1 JRS |
537 | return trans == TRANSCODER_A; |
538 | else | |
539 | return trans == TRANSCODER_EDP; | |
540 | } | |
541 | ||
1c4d821d AG |
542 | static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) |
543 | { | |
1326a92c | 544 | if (!cstate || !cstate->hw.active) |
1c4d821d AG |
545 | return 0; |
546 | ||
547 | return DIV_ROUND_UP(1000 * 1000, | |
1326a92c | 548 | drm_mode_vrefresh(&cstate->hw.adjusted_mode)); |
1c4d821d AG |
549 | } |
550 | ||
551 | static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, | |
552 | u32 idle_frames) | |
553 | { | |
554 | u32 val; | |
555 | ||
556 | idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; | |
557 | val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); | |
558 | val &= ~EDP_PSR2_IDLE_FRAME_MASK; | |
559 | val |= idle_frames; | |
560 | I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); | |
561 | } | |
562 | ||
563 | static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) | |
564 | { | |
565 | psr2_program_idle_frames(dev_priv, 0); | |
566 | intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); | |
567 | } | |
568 | ||
569 | static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv) | |
570 | { | |
9e83713a | 571 | struct intel_dp *intel_dp = dev_priv->psr.dp; |
1c4d821d AG |
572 | |
573 | intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); | |
9e83713a | 574 | psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp)); |
1c4d821d AG |
575 | } |
576 | ||
577 | static void tgl_dc5_idle_thread(struct work_struct *work) | |
578 | { | |
579 | struct drm_i915_private *dev_priv = | |
580 | container_of(work, typeof(*dev_priv), psr.idle_work.work); | |
581 | ||
582 | mutex_lock(&dev_priv->psr.lock); | |
583 | /* If delayed work is pending, it is not idle */ | |
584 | if (delayed_work_pending(&dev_priv->psr.idle_work)) | |
585 | goto unlock; | |
586 | ||
587 | DRM_DEBUG_KMS("DC5/6 idle thread\n"); | |
588 | tgl_psr2_disable_dc3co(dev_priv); | |
589 | unlock: | |
590 | mutex_unlock(&dev_priv->psr.lock); | |
591 | } | |
592 | ||
593 | static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv) | |
594 | { | |
595 | if (!dev_priv->psr.dc3co_enabled) | |
596 | return; | |
597 | ||
598 | cancel_delayed_work(&dev_priv->psr.idle_work); | |
599 | /* Before PSR2 exit disallow dc3co*/ | |
600 | tgl_psr2_disable_dc3co(dev_priv); | |
601 | } | |
602 | ||
c4932d79 RV |
603 | static bool intel_psr2_config_valid(struct intel_dp *intel_dp, |
604 | struct intel_crtc_state *crtc_state) | |
605 | { | |
1895759e | 606 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1326a92c ML |
607 | int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; |
608 | int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; | |
f98837e8 | 609 | int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; |
c4932d79 | 610 | |
95f28d2e | 611 | if (!dev_priv->psr.sink_psr2_support) |
c4932d79 RV |
612 | return false; |
613 | ||
99fc38b1 JRS |
614 | if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { |
615 | DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n", | |
616 | transcoder_name(crtc_state->cpu_transcoder)); | |
617 | return false; | |
618 | } | |
619 | ||
8228c42f MN |
620 | /* |
621 | * DSC and PSR2 cannot be enabled simultaneously. If a requested | |
622 | * resolution requires DSC to be enabled, priority is given to DSC | |
623 | * over PSR2. | |
624 | */ | |
010663a6 | 625 | if (crtc_state->dsc.compression_enable) { |
8228c42f MN |
626 | DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); |
627 | return false; | |
628 | } | |
629 | ||
f7b3c226 JRS |
630 | if (INTEL_GEN(dev_priv) >= 12) { |
631 | psr_max_h = 5120; | |
632 | psr_max_v = 3200; | |
f98837e8 | 633 | max_bpp = 30; |
f7b3c226 | 634 | } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { |
c90c275c DP |
635 | psr_max_h = 4096; |
636 | psr_max_v = 2304; | |
f98837e8 | 637 | max_bpp = 24; |
cf819eff | 638 | } else if (IS_GEN(dev_priv, 9)) { |
c90c275c DP |
639 | psr_max_h = 3640; |
640 | psr_max_v = 2304; | |
f98837e8 | 641 | max_bpp = 24; |
c90c275c DP |
642 | } |
643 | ||
644 | if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { | |
645 | DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", | |
646 | crtc_hdisplay, crtc_vdisplay, | |
647 | psr_max_h, psr_max_v); | |
c4932d79 RV |
648 | return false; |
649 | } | |
650 | ||
f98837e8 JRS |
651 | if (crtc_state->pipe_bpp > max_bpp) { |
652 | DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n", | |
653 | crtc_state->pipe_bpp, max_bpp); | |
654 | return false; | |
655 | } | |
656 | ||
bef5e5b3 JRS |
657 | /* |
658 | * HW sends SU blocks of size four scan lines, which means the starting | |
659 | * X coordinate and Y granularity requirements will always be met. We | |
8c0d2c29 JRS |
660 | * only need to validate the SU block width is a multiple of |
661 | * x granularity. | |
bef5e5b3 | 662 | */ |
8c0d2c29 JRS |
663 | if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { |
664 | DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n", | |
665 | crtc_hdisplay, dev_priv->psr.su_x_granularity); | |
bef5e5b3 JRS |
666 | return false; |
667 | } | |
668 | ||
618cf883 JRS |
669 | if (crtc_state->crc_enabled) { |
670 | DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n"); | |
671 | return false; | |
672 | } | |
673 | ||
c4932d79 RV |
674 | return true; |
675 | } | |
676 | ||
4d90f2d5 VS |
677 | void intel_psr_compute_config(struct intel_dp *intel_dp, |
678 | struct intel_crtc_state *crtc_state) | |
0bc12bcb RV |
679 | { |
680 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
1895759e | 681 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
dfd2e9ab | 682 | const struct drm_display_mode *adjusted_mode = |
1326a92c | 683 | &crtc_state->hw.adjusted_mode; |
dfd2e9ab | 684 | int psr_setup_time; |
0bc12bcb | 685 | |
4371d896 | 686 | if (!CAN_PSR(dev_priv)) |
4d90f2d5 VS |
687 | return; |
688 | ||
c44301fc | 689 | if (intel_dp != dev_priv->psr.dp) |
4d90f2d5 | 690 | return; |
0bc12bcb | 691 | |
dc9b5a0c RV |
692 | /* |
693 | * HSW spec explicitly says PSR is tied to port A. | |
4ab4fa10 JRS |
694 | * BDW+ platforms have a instance of PSR registers per transcoder but |
695 | * for now it only supports one instance of PSR, so lets keep it | |
696 | * hardcoded to PORT_A | |
dc9b5a0c | 697 | */ |
ce3508fd | 698 | if (dig_port->base.port != PORT_A) { |
dc9b5a0c | 699 | DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); |
4d90f2d5 | 700 | return; |
0bc12bcb RV |
701 | } |
702 | ||
50a12d8f JRS |
703 | if (dev_priv->psr.sink_not_reliable) { |
704 | DRM_DEBUG_KMS("PSR sink implementation is not reliable\n"); | |
705 | return; | |
706 | } | |
707 | ||
7ae6ad6f JRS |
708 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
709 | DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n"); | |
4d90f2d5 | 710 | return; |
0bc12bcb RV |
711 | } |
712 | ||
dfd2e9ab VS |
713 | psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); |
714 | if (psr_setup_time < 0) { | |
715 | DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", | |
716 | intel_dp->psr_dpcd[1]); | |
4d90f2d5 | 717 | return; |
dfd2e9ab VS |
718 | } |
719 | ||
720 | if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > | |
721 | adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { | |
722 | DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", | |
723 | psr_setup_time); | |
4d90f2d5 VS |
724 | return; |
725 | } | |
726 | ||
4d90f2d5 | 727 | crtc_state->has_psr = true; |
c4932d79 | 728 | crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); |
0bc12bcb RV |
729 | } |
730 | ||
e2bbc343 | 731 | static void intel_psr_activate(struct intel_dp *intel_dp) |
0bc12bcb | 732 | { |
1895759e | 733 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
0bc12bcb | 734 | |
0f81e645 | 735 | if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) |
4ab4fa10 | 736 | WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); |
0f81e645 | 737 | |
4ab4fa10 | 738 | WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); |
0bc12bcb RV |
739 | WARN_ON(dev_priv->psr.active); |
740 | lockdep_assert_held(&dev_priv->psr.lock); | |
741 | ||
cf5d862d RV |
742 | /* psr1 and psr2 are mutually exclusive.*/ |
743 | if (dev_priv->psr.psr2_enabled) | |
744 | hsw_activate_psr2(intel_dp); | |
745 | else | |
746 | hsw_activate_psr1(intel_dp); | |
747 | ||
0bc12bcb RV |
748 | dev_priv->psr.active = true; |
749 | } | |
750 | ||
cf5d862d RV |
751 | static void intel_psr_enable_source(struct intel_dp *intel_dp, |
752 | const struct intel_crtc_state *crtc_state) | |
4d1fa22f | 753 | { |
1895759e | 754 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
4d1fa22f | 755 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
fc6ff9dc | 756 | u32 mask; |
4d1fa22f | 757 | |
d544e918 DP |
758 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ |
759 | * use hardcoded values PSR AUX transactions | |
760 | */ | |
761 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
762 | hsw_psr_setup_aux(intel_dp); | |
763 | ||
cf819eff | 764 | if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && |
d15f9cdd | 765 | !IS_GEMINILAKE(dev_priv))) { |
12c4d4c1 | 766 | i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); |
8f19b401 | 767 | u32 chicken = I915_READ(reg); |
5e87325f | 768 | |
d15f9cdd JRS |
769 | chicken |= PSR2_VSC_ENABLE_PROG_HEADER | |
770 | PSR2_ADD_VERTICAL_LINE_COUNT; | |
8f19b401 | 771 | I915_WRITE(reg, chicken); |
4d1fa22f | 772 | } |
bf80928f JRS |
773 | |
774 | /* | |
775 | * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also | |
776 | * mask LPSP to avoid dependency on other drivers that might block | |
777 | * runtime_pm besides preventing other hw tracking issues now we | |
778 | * can rely on frontbuffer tracking. | |
779 | */ | |
fc6ff9dc JRS |
780 | mask = EDP_PSR_DEBUG_MASK_MEMUP | |
781 | EDP_PSR_DEBUG_MASK_HPD | | |
782 | EDP_PSR_DEBUG_MASK_LPSP | | |
783 | EDP_PSR_DEBUG_MASK_MAX_SLEEP; | |
784 | ||
785 | if (INTEL_GEN(dev_priv) < 11) | |
786 | mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; | |
787 | ||
4ab4fa10 | 788 | I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); |
df7415bf | 789 | |
2f3b8712 | 790 | psr_irq_control(dev_priv); |
4d1fa22f RV |
791 | } |
792 | ||
c44301fc ML |
793 | static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, |
794 | const struct intel_crtc_state *crtc_state) | |
795 | { | |
796 | struct intel_dp *intel_dp = dev_priv->psr.dp; | |
4ab4fa10 | 797 | u32 val; |
c44301fc | 798 | |
23ec9f52 JRS |
799 | WARN_ON(dev_priv->psr.enabled); |
800 | ||
801 | dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); | |
802 | dev_priv->psr.busy_frontbuffer_bits = 0; | |
2225f3c6 | 803 | dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; |
1c4d821d AG |
804 | dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; |
805 | dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state); | |
4ab4fa10 JRS |
806 | dev_priv->psr.transcoder = crtc_state->cpu_transcoder; |
807 | ||
808 | /* | |
809 | * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR | |
810 | * will still keep the error set even after the reset done in the | |
811 | * irq_preinstall and irq_uninstall hooks. | |
812 | * And enabling in this situation cause the screen to freeze in the | |
813 | * first time that PSR HW tries to activate so lets keep PSR disabled | |
814 | * to avoid any rendering problems. | |
815 | */ | |
8241cfbe JRS |
816 | if (INTEL_GEN(dev_priv) >= 12) { |
817 | val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder)); | |
818 | val &= EDP_PSR_ERROR(0); | |
819 | } else { | |
820 | val = I915_READ(EDP_PSR_IIR); | |
821 | val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); | |
822 | } | |
4ab4fa10 JRS |
823 | if (val) { |
824 | dev_priv->psr.sink_not_reliable = true; | |
825 | DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); | |
826 | return; | |
827 | } | |
c44301fc ML |
828 | |
829 | DRM_DEBUG_KMS("Enabling PSR%s\n", | |
830 | dev_priv->psr.psr2_enabled ? "2" : "1"); | |
831 | intel_psr_setup_vsc(intel_dp, crtc_state); | |
832 | intel_psr_enable_sink(intel_dp); | |
833 | intel_psr_enable_source(intel_dp, crtc_state); | |
834 | dev_priv->psr.enabled = true; | |
835 | ||
836 | intel_psr_activate(intel_dp); | |
837 | } | |
838 | ||
b2b89f55 RV |
839 | /** |
840 | * intel_psr_enable - Enable PSR | |
841 | * @intel_dp: Intel DP | |
d2419ffc | 842 | * @crtc_state: new CRTC state |
b2b89f55 RV |
843 | * |
844 | * This function can only be called after the pipe is fully trained and enabled. | |
845 | */ | |
d2419ffc VS |
846 | void intel_psr_enable(struct intel_dp *intel_dp, |
847 | const struct intel_crtc_state *crtc_state) | |
0bc12bcb | 848 | { |
1895759e | 849 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
0bc12bcb | 850 | |
4d90f2d5 | 851 | if (!crtc_state->has_psr) |
0bc12bcb | 852 | return; |
0bc12bcb | 853 | |
c9ef291a DP |
854 | if (WARN_ON(!CAN_PSR(dev_priv))) |
855 | return; | |
856 | ||
da83ef85 | 857 | WARN_ON(dev_priv->drrs.dp); |
c44301fc | 858 | |
0bc12bcb | 859 | mutex_lock(&dev_priv->psr.lock); |
23ec9f52 JRS |
860 | |
861 | if (!psr_global_enabled(dev_priv->psr.debug)) { | |
862 | DRM_DEBUG_KMS("PSR disabled by flag\n"); | |
0bc12bcb RV |
863 | goto unlock; |
864 | } | |
865 | ||
23ec9f52 | 866 | intel_psr_enable_locked(dev_priv, crtc_state); |
d0ac896a | 867 | |
0bc12bcb RV |
868 | unlock: |
869 | mutex_unlock(&dev_priv->psr.lock); | |
870 | } | |
871 | ||
26f9ec9a JRS |
872 | static void intel_psr_exit(struct drm_i915_private *dev_priv) |
873 | { | |
874 | u32 val; | |
875 | ||
b2fc2252 | 876 | if (!dev_priv->psr.active) { |
0f81e645 | 877 | if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { |
4ab4fa10 JRS |
878 | val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); |
879 | WARN_ON(val & EDP_PSR2_ENABLE); | |
880 | } | |
881 | ||
882 | val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); | |
883 | WARN_ON(val & EDP_PSR_ENABLE); | |
884 | ||
26f9ec9a | 885 | return; |
b2fc2252 | 886 | } |
26f9ec9a JRS |
887 | |
888 | if (dev_priv->psr.psr2_enabled) { | |
1c4d821d | 889 | tgl_disallow_dc3co_on_psr2_exit(dev_priv); |
4ab4fa10 | 890 | val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); |
26f9ec9a | 891 | WARN_ON(!(val & EDP_PSR2_ENABLE)); |
4ab4fa10 JRS |
892 | val &= ~EDP_PSR2_ENABLE; |
893 | I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); | |
26f9ec9a | 894 | } else { |
4ab4fa10 | 895 | val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); |
26f9ec9a | 896 | WARN_ON(!(val & EDP_PSR_ENABLE)); |
4ab4fa10 JRS |
897 | val &= ~EDP_PSR_ENABLE; |
898 | I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); | |
26f9ec9a JRS |
899 | } |
900 | dev_priv->psr.active = false; | |
901 | } | |
902 | ||
2ee936e3 | 903 | static void intel_psr_disable_locked(struct intel_dp *intel_dp) |
e2bbc343 | 904 | { |
1895759e | 905 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
b2fc2252 JRS |
906 | i915_reg_t psr_status; |
907 | u32 psr_status_mask; | |
0bc12bcb | 908 | |
2ee936e3 JRS |
909 | lockdep_assert_held(&dev_priv->psr.lock); |
910 | ||
911 | if (!dev_priv->psr.enabled) | |
912 | return; | |
913 | ||
914 | DRM_DEBUG_KMS("Disabling PSR%s\n", | |
915 | dev_priv->psr.psr2_enabled ? "2" : "1"); | |
916 | ||
b2fc2252 | 917 | intel_psr_exit(dev_priv); |
77affa31 | 918 | |
b2fc2252 | 919 | if (dev_priv->psr.psr2_enabled) { |
4ab4fa10 | 920 | psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); |
b2fc2252 | 921 | psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; |
0bc12bcb | 922 | } else { |
4ab4fa10 | 923 | psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); |
b2fc2252 | 924 | psr_status_mask = EDP_PSR_STATUS_STATE_MASK; |
0bc12bcb | 925 | } |
b2fc2252 JRS |
926 | |
927 | /* Wait till PSR is idle */ | |
4cb3b44d DCS |
928 | if (intel_de_wait_for_clear(dev_priv, psr_status, |
929 | psr_status_mask, 2000)) | |
b2fc2252 | 930 | DRM_ERROR("Timed out waiting PSR idle state\n"); |
cc3054ff JRS |
931 | |
932 | /* Disable PSR on Sink */ | |
933 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); | |
934 | ||
700355af JRS |
935 | if (dev_priv->psr.psr2_enabled) |
936 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); | |
937 | ||
c44301fc | 938 | dev_priv->psr.enabled = false; |
cc3054ff JRS |
939 | } |
940 | ||
e2bbc343 RV |
941 | /** |
942 | * intel_psr_disable - Disable PSR | |
943 | * @intel_dp: Intel DP | |
d2419ffc | 944 | * @old_crtc_state: old CRTC state |
e2bbc343 RV |
945 | * |
946 | * This function needs to be called before disabling pipe. | |
947 | */ | |
d2419ffc VS |
948 | void intel_psr_disable(struct intel_dp *intel_dp, |
949 | const struct intel_crtc_state *old_crtc_state) | |
e2bbc343 | 950 | { |
1895759e | 951 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
e2bbc343 | 952 | |
4d90f2d5 | 953 | if (!old_crtc_state->has_psr) |
0f328da6 RV |
954 | return; |
955 | ||
c9ef291a DP |
956 | if (WARN_ON(!CAN_PSR(dev_priv))) |
957 | return; | |
958 | ||
e2bbc343 | 959 | mutex_lock(&dev_priv->psr.lock); |
c44301fc | 960 | |
cc3054ff | 961 | intel_psr_disable_locked(intel_dp); |
c44301fc | 962 | |
0bc12bcb | 963 | mutex_unlock(&dev_priv->psr.lock); |
98fa2aec | 964 | cancel_work_sync(&dev_priv->psr.work); |
1c4d821d | 965 | cancel_delayed_work_sync(&dev_priv->psr.idle_work); |
0bc12bcb RV |
966 | } |
967 | ||
88e05aff JRS |
968 | static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) |
969 | { | |
381f8a20 JRS |
970 | if (INTEL_GEN(dev_priv) >= 9) |
971 | /* | |
972 | * Display WA #0884: skl+ | |
973 | * This documented WA for bxt can be safely applied | |
974 | * broadly so we can force HW tracking to exit PSR | |
975 | * instead of disabling and re-enabling. | |
976 | * Workaround tells us to write 0 to CUR_SURFLIVE_A, | |
977 | * but it makes more sense write to the current active | |
978 | * pipe. | |
979 | */ | |
980 | I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); | |
981 | else | |
982 | /* | |
983 | * A write to CURSURFLIVE do not cause HW tracking to exit PSR | |
984 | * on older gens so doing the manual exit instead. | |
985 | */ | |
986 | intel_psr_exit(dev_priv); | |
88e05aff JRS |
987 | } |
988 | ||
23ec9f52 JRS |
989 | /** |
990 | * intel_psr_update - Update PSR state | |
991 | * @intel_dp: Intel DP | |
992 | * @crtc_state: new CRTC state | |
993 | * | |
994 | * This functions will update PSR states, disabling, enabling or switching PSR | |
995 | * version when executing fastsets. For full modeset, intel_psr_disable() and | |
996 | * intel_psr_enable() should be called instead. | |
997 | */ | |
998 | void intel_psr_update(struct intel_dp *intel_dp, | |
999 | const struct intel_crtc_state *crtc_state) | |
1000 | { | |
1001 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1002 | struct i915_psr *psr = &dev_priv->psr; | |
1003 | bool enable, psr2_enable; | |
1004 | ||
1005 | if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) | |
1006 | return; | |
1007 | ||
1008 | mutex_lock(&dev_priv->psr.lock); | |
1009 | ||
1010 | enable = crtc_state->has_psr && psr_global_enabled(psr->debug); | |
1011 | psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); | |
1012 | ||
88e05aff JRS |
1013 | if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { |
1014 | /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ | |
1015 | if (crtc_state->crc_enabled && psr->enabled) | |
1016 | psr_force_hw_tracking_exit(dev_priv); | |
381f8a20 JRS |
1017 | else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) { |
1018 | /* | |
1019 | * Activate PSR again after a force exit when enabling | |
1020 | * CRC in older gens | |
1021 | */ | |
1022 | if (!dev_priv->psr.active && | |
1023 | !dev_priv->psr.busy_frontbuffer_bits) | |
1024 | schedule_work(&dev_priv->psr.work); | |
1025 | } | |
88e05aff | 1026 | |
23ec9f52 | 1027 | goto unlock; |
88e05aff | 1028 | } |
23ec9f52 | 1029 | |
9f952664 JRS |
1030 | if (psr->enabled) |
1031 | intel_psr_disable_locked(intel_dp); | |
23ec9f52 | 1032 | |
9f952664 JRS |
1033 | if (enable) |
1034 | intel_psr_enable_locked(dev_priv, crtc_state); | |
23ec9f52 JRS |
1035 | |
1036 | unlock: | |
1037 | mutex_unlock(&dev_priv->psr.lock); | |
1038 | } | |
1039 | ||
65df9c79 DP |
1040 | /** |
1041 | * intel_psr_wait_for_idle - wait for PSR1 to idle | |
1042 | * @new_crtc_state: new CRTC state | |
1043 | * @out_value: PSR status in case of failure | |
1044 | * | |
1045 | * This function is expected to be called from pipe_update_start() where it is | |
1046 | * not expected to race with PSR enable or disable. | |
1047 | * | |
1048 | * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. | |
1049 | */ | |
63ec132d DP |
1050 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, |
1051 | u32 *out_value) | |
c43dbcbb | 1052 | { |
2225f3c6 | 1053 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); |
c3d43361 | 1054 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
c43dbcbb | 1055 | |
c44301fc | 1056 | if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) |
c3d43361 TV |
1057 | return 0; |
1058 | ||
fd255f6e DP |
1059 | /* FIXME: Update this for PSR2 if we need to wait for idle */ |
1060 | if (READ_ONCE(dev_priv->psr.psr2_enabled)) | |
1061 | return 0; | |
c43dbcbb TV |
1062 | |
1063 | /* | |
65df9c79 DP |
1064 | * From bspec: Panel Self Refresh (BDW+) |
1065 | * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of | |
1066 | * exit training time + 1.5 ms of aux channel handshake. 50 ms is | |
1067 | * defensive enough to cover everything. | |
c43dbcbb | 1068 | */ |
63ec132d | 1069 | |
4ab4fa10 JRS |
1070 | return __intel_wait_for_register(&dev_priv->uncore, |
1071 | EDP_PSR_STATUS(dev_priv->psr.transcoder), | |
fd255f6e | 1072 | EDP_PSR_STATUS_STATE_MASK, |
63ec132d DP |
1073 | EDP_PSR_STATUS_STATE_IDLE, 2, 50, |
1074 | out_value); | |
c43dbcbb TV |
1075 | } |
1076 | ||
1077 | static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) | |
0bc12bcb | 1078 | { |
daeb725e CW |
1079 | i915_reg_t reg; |
1080 | u32 mask; | |
1081 | int err; | |
1082 | ||
c44301fc | 1083 | if (!dev_priv->psr.enabled) |
daeb725e | 1084 | return false; |
0bc12bcb | 1085 | |
ce3508fd | 1086 | if (dev_priv->psr.psr2_enabled) { |
4ab4fa10 | 1087 | reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); |
ce3508fd | 1088 | mask = EDP_PSR2_STATUS_STATE_MASK; |
995d3047 | 1089 | } else { |
4ab4fa10 | 1090 | reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); |
ce3508fd | 1091 | mask = EDP_PSR_STATUS_STATE_MASK; |
0bc12bcb | 1092 | } |
daeb725e CW |
1093 | |
1094 | mutex_unlock(&dev_priv->psr.lock); | |
1095 | ||
4cb3b44d | 1096 | err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); |
daeb725e CW |
1097 | if (err) |
1098 | DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); | |
1099 | ||
1100 | /* After the unlocked wait, verify that PSR is still wanted! */ | |
0bc12bcb | 1101 | mutex_lock(&dev_priv->psr.lock); |
daeb725e CW |
1102 | return err == 0 && dev_priv->psr.enabled; |
1103 | } | |
0bc12bcb | 1104 | |
23ec9f52 | 1105 | static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) |
2ac45bdd | 1106 | { |
23ec9f52 JRS |
1107 | struct drm_device *dev = &dev_priv->drm; |
1108 | struct drm_modeset_acquire_ctx ctx; | |
1109 | struct drm_atomic_state *state; | |
3558cafc | 1110 | struct intel_crtc *crtc; |
23ec9f52 | 1111 | int err; |
2ac45bdd | 1112 | |
23ec9f52 JRS |
1113 | state = drm_atomic_state_alloc(dev); |
1114 | if (!state) | |
1115 | return -ENOMEM; | |
2ac45bdd | 1116 | |
23ec9f52 JRS |
1117 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); |
1118 | state->acquire_ctx = &ctx; | |
1119 | ||
1120 | retry: | |
3558cafc ML |
1121 | for_each_intel_crtc(dev, crtc) { |
1122 | struct intel_crtc_state *crtc_state = | |
1123 | intel_atomic_get_crtc_state(state, crtc); | |
23ec9f52 | 1124 | |
23ec9f52 JRS |
1125 | if (IS_ERR(crtc_state)) { |
1126 | err = PTR_ERR(crtc_state); | |
1127 | goto error; | |
1128 | } | |
1129 | ||
1326a92c | 1130 | if (crtc_state->hw.active && crtc_state->has_psr) { |
23ec9f52 | 1131 | /* Mark mode as changed to trigger a pipe->update() */ |
2225f3c6 | 1132 | crtc_state->uapi.mode_changed = true; |
23ec9f52 JRS |
1133 | break; |
1134 | } | |
1135 | } | |
1136 | ||
1137 | err = drm_atomic_commit(state); | |
2ac45bdd | 1138 | |
23ec9f52 JRS |
1139 | error: |
1140 | if (err == -EDEADLK) { | |
1141 | drm_atomic_state_clear(state); | |
1142 | err = drm_modeset_backoff(&ctx); | |
1143 | if (!err) | |
1144 | goto retry; | |
1145 | } | |
1146 | ||
1147 | drm_modeset_drop_locks(&ctx); | |
1148 | drm_modeset_acquire_fini(&ctx); | |
1149 | drm_atomic_state_put(state); | |
1150 | ||
1151 | return err; | |
2ac45bdd ML |
1152 | } |
1153 | ||
23ec9f52 | 1154 | int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) |
c44301fc | 1155 | { |
23ec9f52 JRS |
1156 | const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; |
1157 | u32 old_mode; | |
c44301fc | 1158 | int ret; |
c44301fc ML |
1159 | |
1160 | if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || | |
2ac45bdd | 1161 | mode > I915_PSR_DEBUG_FORCE_PSR1) { |
c44301fc ML |
1162 | DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); |
1163 | return -EINVAL; | |
1164 | } | |
1165 | ||
c44301fc ML |
1166 | ret = mutex_lock_interruptible(&dev_priv->psr.lock); |
1167 | if (ret) | |
1168 | return ret; | |
1169 | ||
23ec9f52 | 1170 | old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; |
c44301fc | 1171 | dev_priv->psr.debug = val; |
2f3b8712 JRS |
1172 | |
1173 | /* | |
1174 | * Do it right away if it's already enabled, otherwise it will be done | |
1175 | * when enabling the source. | |
1176 | */ | |
1177 | if (dev_priv->psr.enabled) | |
1178 | psr_irq_control(dev_priv); | |
c44301fc | 1179 | |
c44301fc | 1180 | mutex_unlock(&dev_priv->psr.lock); |
23ec9f52 JRS |
1181 | |
1182 | if (old_mode != mode) | |
1183 | ret = intel_psr_fastset_force(dev_priv); | |
1184 | ||
c44301fc ML |
1185 | return ret; |
1186 | } | |
1187 | ||
183b8e67 JRS |
1188 | static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) |
1189 | { | |
1190 | struct i915_psr *psr = &dev_priv->psr; | |
1191 | ||
1192 | intel_psr_disable_locked(psr->dp); | |
1193 | psr->sink_not_reliable = true; | |
1194 | /* let's make sure that sink is awaken */ | |
1195 | drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); | |
1196 | } | |
1197 | ||
daeb725e CW |
1198 | static void intel_psr_work(struct work_struct *work) |
1199 | { | |
1200 | struct drm_i915_private *dev_priv = | |
5422b37c | 1201 | container_of(work, typeof(*dev_priv), psr.work); |
daeb725e CW |
1202 | |
1203 | mutex_lock(&dev_priv->psr.lock); | |
1204 | ||
5422b37c RV |
1205 | if (!dev_priv->psr.enabled) |
1206 | goto unlock; | |
1207 | ||
183b8e67 JRS |
1208 | if (READ_ONCE(dev_priv->psr.irq_aux_error)) |
1209 | intel_psr_handle_irq(dev_priv); | |
1210 | ||
daeb725e CW |
1211 | /* |
1212 | * We have to make sure PSR is ready for re-enable | |
1213 | * otherwise it keeps disabled until next full enable/disable cycle. | |
1214 | * PSR might take some time to get fully disabled | |
1215 | * and be ready for re-enable. | |
1216 | */ | |
c43dbcbb | 1217 | if (!__psr_wait_for_idle_locked(dev_priv)) |
0bc12bcb RV |
1218 | goto unlock; |
1219 | ||
1220 | /* | |
1221 | * The delayed work can race with an invalidate hence we need to | |
1222 | * recheck. Since psr_flush first clears this and then reschedules we | |
1223 | * won't ever miss a flush when bailing out here. | |
1224 | */ | |
c12e0643 | 1225 | if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) |
0bc12bcb RV |
1226 | goto unlock; |
1227 | ||
c44301fc | 1228 | intel_psr_activate(dev_priv->psr.dp); |
0bc12bcb RV |
1229 | unlock: |
1230 | mutex_unlock(&dev_priv->psr.lock); | |
1231 | } | |
1232 | ||
b2b89f55 RV |
1233 | /** |
1234 | * intel_psr_invalidate - Invalidade PSR | |
5748b6a1 | 1235 | * @dev_priv: i915 device |
b2b89f55 | 1236 | * @frontbuffer_bits: frontbuffer plane tracking bits |
5baf63cc | 1237 | * @origin: which operation caused the invalidate |
b2b89f55 RV |
1238 | * |
1239 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
1240 | * with the software frontbuffer tracking. This function gets called every | |
1241 | * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be | |
1242 | * disabled if the frontbuffer mask contains a buffer relevant to PSR. | |
1243 | * | |
1244 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." | |
1245 | */ | |
5748b6a1 | 1246 | void intel_psr_invalidate(struct drm_i915_private *dev_priv, |
5baf63cc | 1247 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 1248 | { |
4371d896 | 1249 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
1250 | return; |
1251 | ||
ce3508fd | 1252 | if (origin == ORIGIN_FLIP) |
5baf63cc RV |
1253 | return; |
1254 | ||
0bc12bcb RV |
1255 | mutex_lock(&dev_priv->psr.lock); |
1256 | if (!dev_priv->psr.enabled) { | |
1257 | mutex_unlock(&dev_priv->psr.lock); | |
1258 | return; | |
1259 | } | |
1260 | ||
f0ad62a6 | 1261 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); |
0bc12bcb | 1262 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
ec76d629 DV |
1263 | |
1264 | if (frontbuffer_bits) | |
5748b6a1 | 1265 | intel_psr_exit(dev_priv); |
ec76d629 | 1266 | |
0bc12bcb RV |
1267 | mutex_unlock(&dev_priv->psr.lock); |
1268 | } | |
1269 | ||
1c4d821d AG |
1270 | /* |
1271 | * When we will be completely rely on PSR2 S/W tracking in future, | |
1272 | * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP | |
1273 | * event also therefore tgl_dc3co_flush() require to be changed | |
1274 | * accrodingly in future. | |
1275 | */ | |
1276 | static void | |
1277 | tgl_dc3co_flush(struct drm_i915_private *dev_priv, | |
1278 | unsigned int frontbuffer_bits, enum fb_op_origin origin) | |
1279 | { | |
1280 | u32 delay; | |
1281 | ||
1282 | mutex_lock(&dev_priv->psr.lock); | |
1283 | ||
1284 | if (!dev_priv->psr.dc3co_enabled) | |
1285 | goto unlock; | |
1286 | ||
1287 | if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active) | |
1288 | goto unlock; | |
1289 | ||
1290 | /* | |
1291 | * At every frontbuffer flush flip event modified delay of delayed work, | |
1292 | * when delayed work schedules that means display has been idle. | |
1293 | */ | |
1294 | if (!(frontbuffer_bits & | |
1295 | INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe))) | |
1296 | goto unlock; | |
1297 | ||
1298 | tgl_psr2_enable_dc3co(dev_priv); | |
1299 | /* DC5/DC6 required idle frames = 6 */ | |
1300 | delay = 6 * dev_priv->psr.dc3co_exit_delay; | |
1301 | mod_delayed_work(system_wq, &dev_priv->psr.idle_work, | |
1302 | usecs_to_jiffies(delay)); | |
1303 | ||
1304 | unlock: | |
1305 | mutex_unlock(&dev_priv->psr.lock); | |
1306 | } | |
1307 | ||
b2b89f55 RV |
1308 | /** |
1309 | * intel_psr_flush - Flush PSR | |
5748b6a1 | 1310 | * @dev_priv: i915 device |
b2b89f55 | 1311 | * @frontbuffer_bits: frontbuffer plane tracking bits |
169de131 | 1312 | * @origin: which operation caused the flush |
b2b89f55 RV |
1313 | * |
1314 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
1315 | * with the software frontbuffer tracking. This function gets called every | |
1316 | * time frontbuffer rendering has completed and flushed out to memory. PSR | |
1317 | * can be enabled again if no other frontbuffer relevant to PSR is dirty. | |
1318 | * | |
1319 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. | |
1320 | */ | |
5748b6a1 | 1321 | void intel_psr_flush(struct drm_i915_private *dev_priv, |
169de131 | 1322 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 1323 | { |
4371d896 | 1324 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
1325 | return; |
1326 | ||
1c4d821d AG |
1327 | if (origin == ORIGIN_FLIP) { |
1328 | tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin); | |
5baf63cc | 1329 | return; |
1c4d821d | 1330 | } |
5baf63cc | 1331 | |
0bc12bcb RV |
1332 | mutex_lock(&dev_priv->psr.lock); |
1333 | if (!dev_priv->psr.enabled) { | |
1334 | mutex_unlock(&dev_priv->psr.lock); | |
1335 | return; | |
1336 | } | |
1337 | ||
f0ad62a6 | 1338 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); |
0bc12bcb RV |
1339 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
1340 | ||
921ec285 | 1341 | /* By definition flush = invalidate + flush */ |
88e05aff JRS |
1342 | if (frontbuffer_bits) |
1343 | psr_force_hw_tracking_exit(dev_priv); | |
995d3047 | 1344 | |
0bc12bcb | 1345 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
5422b37c | 1346 | schedule_work(&dev_priv->psr.work); |
0bc12bcb RV |
1347 | mutex_unlock(&dev_priv->psr.lock); |
1348 | } | |
1349 | ||
b2b89f55 RV |
1350 | /** |
1351 | * intel_psr_init - Init basic PSR work and mutex. | |
93de056b | 1352 | * @dev_priv: i915 device private |
b2b89f55 RV |
1353 | * |
1354 | * This function is called only once at driver load to initialize basic | |
1355 | * PSR stuff. | |
1356 | */ | |
c39055b0 | 1357 | void intel_psr_init(struct drm_i915_private *dev_priv) |
0bc12bcb | 1358 | { |
0f328da6 RV |
1359 | if (!HAS_PSR(dev_priv)) |
1360 | return; | |
1361 | ||
c9ef291a DP |
1362 | if (!dev_priv->psr.sink_support) |
1363 | return; | |
1364 | ||
4ab4fa10 JRS |
1365 | if (IS_HASWELL(dev_priv)) |
1366 | /* | |
1367 | * HSW don't have PSR registers on the same space as transcoder | |
1368 | * so set this to a value that when subtract to the register | |
1369 | * in transcoder space results in the right offset for HSW | |
1370 | */ | |
1371 | dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; | |
1372 | ||
598c6cfe DP |
1373 | if (i915_modparams.enable_psr == -1) |
1374 | if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) | |
1375 | i915_modparams.enable_psr = 0; | |
d94d6e87 | 1376 | |
65f61b42 | 1377 | /* Set link_standby x link_off defaults */ |
8652744b | 1378 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
60e5ffe3 RV |
1379 | /* HSW and BDW require workarounds that we don't implement. */ |
1380 | dev_priv->psr.link_standby = false; | |
99d7a741 JRS |
1381 | else if (INTEL_GEN(dev_priv) < 12) |
1382 | /* For new platforms up to TGL let's respect VBT back again */ | |
60e5ffe3 RV |
1383 | dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; |
1384 | ||
5422b37c | 1385 | INIT_WORK(&dev_priv->psr.work, intel_psr_work); |
1c4d821d | 1386 | INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread); |
0bc12bcb RV |
1387 | mutex_init(&dev_priv->psr.lock); |
1388 | } | |
cc3054ff | 1389 | |
95851205 JRS |
1390 | static int psr_get_status_and_error_status(struct intel_dp *intel_dp, |
1391 | u8 *status, u8 *error_status) | |
1392 | { | |
1393 | struct drm_dp_aux *aux = &intel_dp->aux; | |
1394 | int ret; | |
1395 | ||
1396 | ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); | |
1397 | if (ret != 1) | |
1398 | return ret; | |
1399 | ||
1400 | ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); | |
1401 | if (ret != 1) | |
1402 | return ret; | |
1403 | ||
1404 | *status = *status & DP_PSR_SINK_STATE_MASK; | |
1405 | ||
1406 | return 0; | |
1407 | } | |
1408 | ||
700355af JRS |
1409 | static void psr_alpm_check(struct intel_dp *intel_dp) |
1410 | { | |
1411 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1412 | struct drm_dp_aux *aux = &intel_dp->aux; | |
1413 | struct i915_psr *psr = &dev_priv->psr; | |
1414 | u8 val; | |
1415 | int r; | |
1416 | ||
1417 | if (!psr->psr2_enabled) | |
1418 | return; | |
1419 | ||
1420 | r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); | |
1421 | if (r != 1) { | |
1422 | DRM_ERROR("Error reading ALPM status\n"); | |
1423 | return; | |
1424 | } | |
1425 | ||
1426 | if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { | |
1427 | intel_psr_disable_locked(intel_dp); | |
1428 | psr->sink_not_reliable = true; | |
1429 | DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n"); | |
1430 | ||
1431 | /* Clearing error */ | |
1432 | drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); | |
1433 | } | |
1434 | } | |
1435 | ||
ba0af30d JRS |
1436 | static void psr_capability_changed_check(struct intel_dp *intel_dp) |
1437 | { | |
1438 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1439 | struct i915_psr *psr = &dev_priv->psr; | |
1440 | u8 val; | |
1441 | int r; | |
1442 | ||
1443 | r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); | |
1444 | if (r != 1) { | |
1445 | DRM_ERROR("Error reading DP_PSR_ESI\n"); | |
1446 | return; | |
1447 | } | |
1448 | ||
1449 | if (val & DP_PSR_CAPS_CHANGE) { | |
1450 | intel_psr_disable_locked(intel_dp); | |
1451 | psr->sink_not_reliable = true; | |
1452 | DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n"); | |
1453 | ||
1454 | /* Clearing it */ | |
1455 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); | |
1456 | } | |
1457 | } | |
1458 | ||
cc3054ff JRS |
1459 | void intel_psr_short_pulse(struct intel_dp *intel_dp) |
1460 | { | |
1895759e | 1461 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
cc3054ff | 1462 | struct i915_psr *psr = &dev_priv->psr; |
95851205 | 1463 | u8 status, error_status; |
93bf76ed | 1464 | const u8 errors = DP_PSR_RFB_STORAGE_ERROR | |
00c8f194 JRS |
1465 | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | |
1466 | DP_PSR_LINK_CRC_ERROR; | |
cc3054ff JRS |
1467 | |
1468 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | |
1469 | return; | |
1470 | ||
1471 | mutex_lock(&psr->lock); | |
1472 | ||
c44301fc | 1473 | if (!psr->enabled || psr->dp != intel_dp) |
cc3054ff JRS |
1474 | goto exit; |
1475 | ||
95851205 JRS |
1476 | if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { |
1477 | DRM_ERROR("Error reading PSR status or error status\n"); | |
cc3054ff JRS |
1478 | goto exit; |
1479 | } | |
1480 | ||
95851205 | 1481 | if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { |
cc3054ff | 1482 | intel_psr_disable_locked(intel_dp); |
50a12d8f | 1483 | psr->sink_not_reliable = true; |
cc3054ff JRS |
1484 | } |
1485 | ||
95851205 JRS |
1486 | if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) |
1487 | DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); | |
1488 | if (error_status & DP_PSR_RFB_STORAGE_ERROR) | |
93bf76ed | 1489 | DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); |
95851205 | 1490 | if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) |
93bf76ed | 1491 | DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); |
95851205 | 1492 | if (error_status & DP_PSR_LINK_CRC_ERROR) |
5063f48b | 1493 | DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n"); |
93bf76ed | 1494 | |
95851205 | 1495 | if (error_status & ~errors) |
93bf76ed | 1496 | DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", |
95851205 | 1497 | error_status & ~errors); |
93bf76ed | 1498 | /* clear status register */ |
95851205 | 1499 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); |
700355af JRS |
1500 | |
1501 | psr_alpm_check(intel_dp); | |
ba0af30d | 1502 | psr_capability_changed_check(intel_dp); |
700355af | 1503 | |
cc3054ff JRS |
1504 | exit: |
1505 | mutex_unlock(&psr->lock); | |
1506 | } | |
2f8e7ea9 JRS |
1507 | |
1508 | bool intel_psr_enabled(struct intel_dp *intel_dp) | |
1509 | { | |
1510 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1511 | bool ret; | |
1512 | ||
1513 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | |
1514 | return false; | |
1515 | ||
1516 | mutex_lock(&dev_priv->psr.lock); | |
1517 | ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); | |
1518 | mutex_unlock(&dev_priv->psr.lock); | |
1519 | ||
1520 | return ret; | |
1521 | } | |
60c6a14b JRS |
1522 | |
1523 | void intel_psr_atomic_check(struct drm_connector *connector, | |
1524 | struct drm_connector_state *old_state, | |
1525 | struct drm_connector_state *new_state) | |
1526 | { | |
1527 | struct drm_i915_private *dev_priv = to_i915(connector->dev); | |
1528 | struct intel_connector *intel_connector; | |
1529 | struct intel_digital_port *dig_port; | |
1530 | struct drm_crtc_state *crtc_state; | |
1531 | ||
1532 | if (!CAN_PSR(dev_priv) || !new_state->crtc || | |
1533 | dev_priv->psr.initially_probed) | |
1534 | return; | |
1535 | ||
1536 | intel_connector = to_intel_connector(connector); | |
fa7edcd2 | 1537 | dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector)); |
60c6a14b JRS |
1538 | if (dev_priv->psr.dp != &dig_port->dp) |
1539 | return; | |
1540 | ||
1541 | crtc_state = drm_atomic_get_new_crtc_state(new_state->state, | |
1542 | new_state->crtc); | |
1543 | crtc_state->mode_changed = true; | |
1544 | dev_priv->psr.initially_probed = true; | |
1545 | } |