]>
Commit | Line | Data |
---|---|---|
0bc12bcb RV |
1 | /* |
2 | * Copyright © 2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | */ | |
23 | ||
55367a27 JN |
24 | #include <drm/drm_atomic_helper.h> |
25 | ||
379bc100 JN |
26 | #include "display/intel_dp.h" |
27 | ||
55367a27 | 28 | #include "i915_drv.h" |
3558cafc | 29 | #include "intel_atomic.h" |
1d455f8d | 30 | #include "intel_display_types.h" |
55367a27 | 31 | #include "intel_psr.h" |
f9a79f9a | 32 | #include "intel_sprite.h" |
7a00e68b | 33 | #include "intel_hdmi.h" |
55367a27 | 34 | |
b2b89f55 RV |
35 | /** |
36 | * DOC: Panel Self Refresh (PSR/SRD) | |
37 | * | |
38 | * Since Haswell Display controller supports Panel Self-Refresh on display | |
39 | * panels witch have a remote frame buffer (RFB) implemented according to PSR | |
40 | * spec in eDP1.3. PSR feature allows the display to go to lower standby states | |
41 | * when system is idle but display is on as it eliminates display refresh | |
42 | * request to DDR memory completely as long as the frame buffer for that | |
43 | * display is unchanged. | |
44 | * | |
45 | * Panel Self Refresh must be supported by both Hardware (source) and | |
46 | * Panel (sink). | |
47 | * | |
48 | * PSR saves power by caching the framebuffer in the panel RFB, which allows us | |
49 | * to power down the link and memory controller. For DSI panels the same idea | |
50 | * is called "manual mode". | |
51 | * | |
52 | * The implementation uses the hardware-based PSR support which automatically | |
53 | * enters/exits self-refresh mode. The hardware takes care of sending the | |
54 | * required DP aux message and could even retrain the link (that part isn't | |
55 | * enabled yet though). The hardware also keeps track of any frontbuffer | |
56 | * changes to know when to exit self-refresh mode again. Unfortunately that | |
57 | * part doesn't work too well, hence why the i915 PSR support uses the | |
58 | * software frontbuffer tracking to make sure it doesn't miss a screen | |
59 | * update. For this integration intel_psr_invalidate() and intel_psr_flush() | |
60 | * get called by the frontbuffer tracking code. Note that because of locking | |
61 | * issues the self-refresh re-enable code is done from a work queue, which | |
62 | * must be correctly synchronized/cancelled when shutting down the pipe." | |
ceaaf530 JRS |
63 | * |
64 | * DC3CO (DC3 clock off) | |
65 | * | |
66 | * On top of PSR2, GEN12 adds a intermediate power savings state that turns | |
67 | * clock off automatically during PSR2 idle state. | |
68 | * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep | |
69 | * entry/exit allows the HW to enter a low-power state even when page flipping | |
70 | * periodically (for instance a 30fps video playback scenario). | |
71 | * | |
72 | * Every time a flips occurs PSR2 will get out of deep sleep state(if it was), | |
73 | * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6 | |
74 | * frames, if no other flip occurs and the function above is executed, DC3CO is | |
75 | * disabled and PSR2 is configured to enter deep sleep, resetting again in case | |
76 | * of another flip. | |
77 | * Front buffer modifications do not trigger DC3CO activation on purpose as it | |
78 | * would bring a lot of complexity and most of the moderns systems will only | |
79 | * use page flips. | |
b2b89f55 RV |
80 | */ |
81 | ||
58d4ad50 | 82 | static bool psr_global_enabled(struct drm_i915_private *i915) |
c44301fc | 83 | { |
58d4ad50 | 84 | switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) { |
c44301fc | 85 | case I915_PSR_DEBUG_DEFAULT: |
8a25c4be | 86 | return i915->params.enable_psr; |
c44301fc ML |
87 | case I915_PSR_DEBUG_DISABLE: |
88 | return false; | |
89 | default: | |
90 | return true; | |
91 | } | |
92 | } | |
93 | ||
5c90660f | 94 | static bool psr2_global_enabled(struct drm_i915_private *dev_priv) |
2ac45bdd ML |
95 | { |
96 | switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { | |
235ca26f | 97 | case I915_PSR_DEBUG_DISABLE: |
2ac45bdd ML |
98 | case I915_PSR_DEBUG_FORCE_PSR1: |
99 | return false; | |
100 | default: | |
5c90660f | 101 | return true; |
2ac45bdd ML |
102 | } |
103 | } | |
104 | ||
2f3b8712 | 105 | static void psr_irq_control(struct drm_i915_private *dev_priv) |
c0871805 | 106 | { |
8241cfbe JRS |
107 | enum transcoder trans_shift; |
108 | u32 mask, val; | |
109 | i915_reg_t imr_reg; | |
2f3b8712 | 110 | |
8241cfbe JRS |
111 | /* |
112 | * gen12+ has registers relative to transcoder and one per transcoder | |
113 | * using the same bit definition: handle it as TRANSCODER_EDP to force | |
114 | * 0 shift in bit definition | |
115 | */ | |
116 | if (INTEL_GEN(dev_priv) >= 12) { | |
117 | trans_shift = 0; | |
118 | imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); | |
119 | } else { | |
120 | trans_shift = dev_priv->psr.transcoder; | |
121 | imr_reg = EDP_PSR_IMR; | |
122 | } | |
123 | ||
124 | mask = EDP_PSR_ERROR(trans_shift); | |
2f3b8712 | 125 | if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) |
8241cfbe JRS |
126 | mask |= EDP_PSR_POST_EXIT(trans_shift) | |
127 | EDP_PSR_PRE_ENTRY(trans_shift); | |
2f3b8712 JRS |
128 | |
129 | /* Warning: it is masking/setting reserved bits too */ | |
c51e7138 | 130 | val = intel_de_read(dev_priv, imr_reg); |
8241cfbe | 131 | val &= ~EDP_PSR_TRANS_MASK(trans_shift); |
2f3b8712 | 132 | val |= ~mask; |
c51e7138 | 133 | intel_de_write(dev_priv, imr_reg, val); |
54fd3149 DP |
134 | } |
135 | ||
85f691d3 JN |
136 | static void psr_event_print(struct drm_i915_private *i915, |
137 | u32 val, bool psr2_enabled) | |
bc18b4df | 138 | { |
85f691d3 | 139 | drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val); |
bc18b4df | 140 | if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) |
85f691d3 | 141 | drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n"); |
bc18b4df | 142 | if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) |
85f691d3 | 143 | drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n"); |
bc18b4df | 144 | if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) |
85f691d3 | 145 | drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n"); |
bc18b4df | 146 | if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) |
85f691d3 | 147 | drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n"); |
bc18b4df | 148 | if (val & PSR_EVENT_GRAPHICS_RESET) |
85f691d3 | 149 | drm_dbg_kms(&i915->drm, "\tGraphics reset\n"); |
bc18b4df | 150 | if (val & PSR_EVENT_PCH_INTERRUPT) |
85f691d3 | 151 | drm_dbg_kms(&i915->drm, "\tPCH interrupt\n"); |
bc18b4df | 152 | if (val & PSR_EVENT_MEMORY_UP) |
85f691d3 | 153 | drm_dbg_kms(&i915->drm, "\tMemory up\n"); |
bc18b4df | 154 | if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) |
85f691d3 | 155 | drm_dbg_kms(&i915->drm, "\tFront buffer modification\n"); |
bc18b4df | 156 | if (val & PSR_EVENT_WD_TIMER_EXPIRE) |
85f691d3 | 157 | drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n"); |
bc18b4df | 158 | if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) |
85f691d3 | 159 | drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n"); |
bc18b4df | 160 | if (val & PSR_EVENT_REGISTER_UPDATE) |
85f691d3 | 161 | drm_dbg_kms(&i915->drm, "\tRegister updated\n"); |
bc18b4df | 162 | if (val & PSR_EVENT_HDCP_ENABLE) |
85f691d3 | 163 | drm_dbg_kms(&i915->drm, "\tHDCP enabled\n"); |
bc18b4df | 164 | if (val & PSR_EVENT_KVMR_SESSION_ENABLE) |
85f691d3 | 165 | drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n"); |
bc18b4df | 166 | if (val & PSR_EVENT_VBI_ENABLE) |
85f691d3 | 167 | drm_dbg_kms(&i915->drm, "\tVBI enabled\n"); |
bc18b4df | 168 | if (val & PSR_EVENT_LPSP_MODE_EXIT) |
85f691d3 | 169 | drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n"); |
bc18b4df | 170 | if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) |
85f691d3 | 171 | drm_dbg_kms(&i915->drm, "\tPSR disabled\n"); |
bc18b4df JRS |
172 | } |
173 | ||
54fd3149 DP |
174 | void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) |
175 | { | |
2f3b8712 | 176 | enum transcoder cpu_transcoder = dev_priv->psr.transcoder; |
8241cfbe JRS |
177 | enum transcoder trans_shift; |
178 | i915_reg_t imr_reg; | |
3f983e54 | 179 | ktime_t time_ns = ktime_get(); |
c0871805 | 180 | |
8241cfbe JRS |
181 | if (INTEL_GEN(dev_priv) >= 12) { |
182 | trans_shift = 0; | |
183 | imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); | |
184 | } else { | |
185 | trans_shift = dev_priv->psr.transcoder; | |
186 | imr_reg = EDP_PSR_IMR; | |
187 | } | |
188 | ||
189 | if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { | |
2f3b8712 | 190 | dev_priv->psr.last_entry_attempt = time_ns; |
6471bd74 WK |
191 | drm_dbg_kms(&dev_priv->drm, |
192 | "[transcoder %s] PSR entry attempt in 2 vblanks\n", | |
193 | transcoder_name(cpu_transcoder)); | |
2f3b8712 | 194 | } |
183b8e67 | 195 | |
8241cfbe | 196 | if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { |
2f3b8712 | 197 | dev_priv->psr.last_exit = time_ns; |
6471bd74 WK |
198 | drm_dbg_kms(&dev_priv->drm, |
199 | "[transcoder %s] PSR exit completed\n", | |
200 | transcoder_name(cpu_transcoder)); | |
183b8e67 | 201 | |
2f3b8712 | 202 | if (INTEL_GEN(dev_priv) >= 9) { |
c51e7138 JN |
203 | u32 val = intel_de_read(dev_priv, |
204 | PSR_EVENT(cpu_transcoder)); | |
2f3b8712 | 205 | bool psr2_enabled = dev_priv->psr.psr2_enabled; |
54fd3149 | 206 | |
c51e7138 JN |
207 | intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder), |
208 | val); | |
85f691d3 | 209 | psr_event_print(dev_priv, val, psr2_enabled); |
3f983e54 | 210 | } |
2f3b8712 | 211 | } |
54fd3149 | 212 | |
8241cfbe | 213 | if (psr_iir & EDP_PSR_ERROR(trans_shift)) { |
2f3b8712 | 214 | u32 val; |
bc18b4df | 215 | |
6471bd74 | 216 | drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", |
2f3b8712 | 217 | transcoder_name(cpu_transcoder)); |
bc18b4df | 218 | |
2f3b8712 | 219 | dev_priv->psr.irq_aux_error = true; |
183b8e67 | 220 | |
2f3b8712 JRS |
221 | /* |
222 | * If this interruption is not masked it will keep | |
223 | * interrupting so fast that it prevents the scheduled | |
224 | * work to run. | |
225 | * Also after a PSR error, we don't want to arm PSR | |
226 | * again so we don't care about unmask the interruption | |
227 | * or unset irq_aux_error. | |
228 | */ | |
c51e7138 | 229 | val = intel_de_read(dev_priv, imr_reg); |
8241cfbe | 230 | val |= EDP_PSR_ERROR(trans_shift); |
c51e7138 | 231 | intel_de_write(dev_priv, imr_reg, val); |
183b8e67 JRS |
232 | |
233 | schedule_work(&dev_priv->psr.work); | |
234 | } | |
54fd3149 DP |
235 | } |
236 | ||
77fe36ff DP |
237 | static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) |
238 | { | |
739f3abd | 239 | u8 alpm_caps = 0; |
77fe36ff DP |
240 | |
241 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, | |
242 | &alpm_caps) != 1) | |
243 | return false; | |
244 | return alpm_caps & DP_ALPM_CAP; | |
245 | } | |
246 | ||
26e5378d JRS |
247 | static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) |
248 | { | |
85f691d3 | 249 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
264ff016 | 250 | u8 val = 8; /* assume the worst if we can't read the value */ |
26e5378d JRS |
251 | |
252 | if (drm_dp_dpcd_readb(&intel_dp->aux, | |
253 | DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) | |
254 | val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; | |
255 | else | |
85f691d3 JN |
256 | drm_dbg_kms(&i915->drm, |
257 | "Unable to get sink synchronization latency, assuming 8 frames\n"); | |
26e5378d JRS |
258 | return val; |
259 | } | |
260 | ||
8c0d2c29 JRS |
261 | static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) |
262 | { | |
85f691d3 | 263 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
8c0d2c29 JRS |
264 | u16 val; |
265 | ssize_t r; | |
266 | ||
267 | /* | |
268 | * Returning the default X granularity if granularity not required or | |
269 | * if DPCD read fails | |
270 | */ | |
271 | if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) | |
272 | return 4; | |
273 | ||
274 | r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); | |
275 | if (r != 2) | |
85f691d3 JN |
276 | drm_dbg_kms(&i915->drm, |
277 | "Unable to read DP_PSR2_SU_X_GRANULARITY\n"); | |
8c0d2c29 JRS |
278 | |
279 | /* | |
280 | * Spec says that if the value read is 0 the default granularity should | |
281 | * be used instead. | |
282 | */ | |
283 | if (r != 2 || val == 0) | |
284 | val = 4; | |
285 | ||
286 | return val; | |
287 | } | |
288 | ||
77fe36ff DP |
289 | void intel_psr_init_dpcd(struct intel_dp *intel_dp) |
290 | { | |
291 | struct drm_i915_private *dev_priv = | |
292 | to_i915(dp_to_dig_port(intel_dp)->base.base.dev); | |
293 | ||
6056517a | 294 | if (dev_priv->psr.dp) { |
6471bd74 WK |
295 | drm_warn(&dev_priv->drm, |
296 | "More than one eDP panel found, PSR support should be extended\n"); | |
6056517a JRS |
297 | return; |
298 | } | |
299 | ||
77fe36ff DP |
300 | drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, |
301 | sizeof(intel_dp->psr_dpcd)); | |
302 | ||
8cf6da7e DP |
303 | if (!intel_dp->psr_dpcd[0]) |
304 | return; | |
6471bd74 WK |
305 | drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n", |
306 | intel_dp->psr_dpcd[0]); | |
84bb2916 | 307 | |
0883ce81 | 308 | if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) { |
6471bd74 WK |
309 | drm_dbg_kms(&dev_priv->drm, |
310 | "PSR support not currently available for this panel\n"); | |
7c5c641a JRS |
311 | return; |
312 | } | |
313 | ||
84bb2916 | 314 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { |
6471bd74 WK |
315 | drm_dbg_kms(&dev_priv->drm, |
316 | "Panel lacks power state control, PSR cannot be enabled\n"); | |
84bb2916 DP |
317 | return; |
318 | } | |
7c5c641a | 319 | |
8cf6da7e | 320 | dev_priv->psr.sink_support = true; |
a3db1428 DP |
321 | dev_priv->psr.sink_sync_latency = |
322 | intel_dp_get_sink_sync_latency(intel_dp); | |
77fe36ff | 323 | |
c44301fc ML |
324 | dev_priv->psr.dp = intel_dp; |
325 | ||
77fe36ff | 326 | if (INTEL_GEN(dev_priv) >= 9 && |
aee3bac0 | 327 | (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { |
97c9de66 DP |
328 | bool y_req = intel_dp->psr_dpcd[1] & |
329 | DP_PSR2_SU_Y_COORDINATE_REQUIRED; | |
330 | bool alpm = intel_dp_get_alpm_status(intel_dp); | |
331 | ||
aee3bac0 JRS |
332 | /* |
333 | * All panels that supports PSR version 03h (PSR2 + | |
334 | * Y-coordinate) can handle Y-coordinates in VSC but we are | |
335 | * only sure that it is going to be used when required by the | |
336 | * panel. This way panel is capable to do selective update | |
337 | * without a aux frame sync. | |
338 | * | |
339 | * To support PSR version 02h and PSR version 03h without | |
340 | * Y-coordinate requirement panels we would need to enable | |
341 | * GTC first. | |
342 | */ | |
97c9de66 | 343 | dev_priv->psr.sink_psr2_support = y_req && alpm; |
6471bd74 WK |
344 | drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n", |
345 | dev_priv->psr.sink_psr2_support ? "" : "not "); | |
77fe36ff | 346 | |
95f28d2e | 347 | if (dev_priv->psr.sink_psr2_support) { |
77fe36ff DP |
348 | dev_priv->psr.colorimetry_support = |
349 | intel_dp_get_colorimetry_status(intel_dp); | |
8c0d2c29 JRS |
350 | dev_priv->psr.su_x_granularity = |
351 | intel_dp_get_su_x_granulartiy(intel_dp); | |
77fe36ff DP |
352 | } |
353 | } | |
354 | } | |
355 | ||
b90eed08 | 356 | static void hsw_psr_setup_aux(struct intel_dp *intel_dp) |
0bc12bcb | 357 | { |
1895759e | 358 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
d544e918 DP |
359 | u32 aux_clock_divider, aux_ctl; |
360 | int i; | |
739f3abd | 361 | static const u8 aux_msg[] = { |
0bc12bcb RV |
362 | [0] = DP_AUX_NATIVE_WRITE << 4, |
363 | [1] = DP_SET_POWER >> 8, | |
364 | [2] = DP_SET_POWER & 0xff, | |
365 | [3] = 1 - 1, | |
366 | [4] = DP_SET_POWER_D0, | |
367 | }; | |
d544e918 DP |
368 | u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | |
369 | EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | | |
370 | EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | | |
371 | EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; | |
0bc12bcb RV |
372 | |
373 | BUILD_BUG_ON(sizeof(aux_msg) > 20); | |
b90eed08 | 374 | for (i = 0; i < sizeof(aux_msg); i += 4) |
c51e7138 JN |
375 | intel_de_write(dev_priv, |
376 | EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), | |
377 | intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); | |
b90eed08 | 378 | |
d544e918 DP |
379 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
380 | ||
381 | /* Start with bits set for DDI_AUX_CTL register */ | |
8a29c778 | 382 | aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), |
b90eed08 | 383 | aux_clock_divider); |
d544e918 DP |
384 | |
385 | /* Select only valid bits for SRD_AUX_CTL */ | |
386 | aux_ctl &= psr_aux_mask; | |
c51e7138 JN |
387 | intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), |
388 | aux_ctl); | |
b90eed08 DP |
389 | } |
390 | ||
cf5d862d | 391 | static void intel_psr_enable_sink(struct intel_dp *intel_dp) |
b90eed08 | 392 | { |
1895759e | 393 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
4df4925b | 394 | u8 dpcd_val = DP_PSR_ENABLE; |
b90eed08 | 395 | |
340c93c0 | 396 | /* Enable ALPM at sink for psr2 */ |
97c9de66 DP |
397 | if (dev_priv->psr.psr2_enabled) { |
398 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, | |
700355af JRS |
399 | DP_ALPM_ENABLE | |
400 | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); | |
401 | ||
98751b8c | 402 | dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; |
60cae442 JRS |
403 | } else { |
404 | if (dev_priv->psr.link_standby) | |
405 | dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; | |
de570946 JRS |
406 | |
407 | if (INTEL_GEN(dev_priv) >= 8) | |
408 | dpcd_val |= DP_PSR_CRC_VERIFICATION; | |
97c9de66 DP |
409 | } |
410 | ||
4df4925b | 411 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); |
6f32ea7e | 412 | |
d544e918 | 413 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); |
0bc12bcb RV |
414 | } |
415 | ||
1e0c05c0 | 416 | static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) |
0bc12bcb | 417 | { |
1895759e | 418 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1e0c05c0 | 419 | u32 val = 0; |
60e5ffe3 | 420 | |
8a9a5608 JRS |
421 | if (INTEL_GEN(dev_priv) >= 11) |
422 | val |= EDP_PSR_TP4_TIME_0US; | |
423 | ||
8a25c4be | 424 | if (dev_priv->params.psr_safest_params) { |
2d387995 JRS |
425 | val |= EDP_PSR_TP1_TIME_2500us; |
426 | val |= EDP_PSR_TP2_TP3_TIME_2500us; | |
427 | goto check_tp3_sel; | |
428 | } | |
429 | ||
77312ae8 | 430 | if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) |
1e0c05c0 | 431 | val |= EDP_PSR_TP1_TIME_0us; |
77312ae8 | 432 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) |
50db1390 | 433 | val |= EDP_PSR_TP1_TIME_100us; |
77312ae8 VN |
434 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) |
435 | val |= EDP_PSR_TP1_TIME_500us; | |
50db1390 | 436 | else |
77312ae8 | 437 | val |= EDP_PSR_TP1_TIME_2500us; |
50db1390 | 438 | |
77312ae8 | 439 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) |
1e0c05c0 | 440 | val |= EDP_PSR_TP2_TP3_TIME_0us; |
77312ae8 | 441 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) |
50db1390 | 442 | val |= EDP_PSR_TP2_TP3_TIME_100us; |
77312ae8 VN |
443 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) |
444 | val |= EDP_PSR_TP2_TP3_TIME_500us; | |
50db1390 | 445 | else |
77312ae8 | 446 | val |= EDP_PSR_TP2_TP3_TIME_2500us; |
50db1390 | 447 | |
2d387995 | 448 | check_tp3_sel: |
50db1390 DV |
449 | if (intel_dp_source_supports_hbr2(intel_dp) && |
450 | drm_dp_tps3_supported(intel_dp->dpcd)) | |
451 | val |= EDP_PSR_TP1_TP3_SEL; | |
452 | else | |
453 | val |= EDP_PSR_TP1_TP2_SEL; | |
454 | ||
1e0c05c0 JRS |
455 | return val; |
456 | } | |
457 | ||
9e83713a | 458 | static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) |
1e0c05c0 JRS |
459 | { |
460 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
9e83713a | 461 | int idle_frames; |
1e0c05c0 JRS |
462 | |
463 | /* Let's use 6 as the minimum to cover all known cases including the | |
464 | * off-by-one issue that HW has in some cases. | |
465 | */ | |
9e83713a | 466 | idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
1e0c05c0 | 467 | idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); |
9e83713a | 468 | |
16c56083 | 469 | if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)) |
9e83713a JRS |
470 | idle_frames = 0xf; |
471 | ||
472 | return idle_frames; | |
473 | } | |
474 | ||
475 | static void hsw_activate_psr1(struct intel_dp *intel_dp) | |
476 | { | |
477 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
478 | u32 max_sleep_time = 0x1f; | |
479 | u32 val = EDP_PSR_ENABLE; | |
480 | ||
481 | val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT; | |
1e0c05c0 JRS |
482 | |
483 | val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; | |
484 | if (IS_HASWELL(dev_priv)) | |
485 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | |
486 | ||
487 | if (dev_priv->psr.link_standby) | |
488 | val |= EDP_PSR_LINK_STANDBY; | |
489 | ||
490 | val |= intel_psr1_get_tp_time(intel_dp); | |
491 | ||
00c8f194 JRS |
492 | if (INTEL_GEN(dev_priv) >= 8) |
493 | val |= EDP_PSR_CRC_ENABLE; | |
494 | ||
c51e7138 | 495 | val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & |
4ab4fa10 | 496 | EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); |
c51e7138 | 497 | intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val); |
3fcb0ca1 | 498 | } |
50db1390 | 499 | |
2d387995 | 500 | static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) |
3fcb0ca1 | 501 | { |
1895759e | 502 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
2d387995 | 503 | u32 val = 0; |
977da084 | 504 | |
8a25c4be | 505 | if (dev_priv->params.psr_safest_params) |
2d387995 | 506 | return EDP_PSR2_TP2_TIME_2500us; |
50db1390 | 507 | |
88a0d960 JRS |
508 | if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && |
509 | dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) | |
77312ae8 | 510 | val |= EDP_PSR2_TP2_TIME_50us; |
88a0d960 | 511 | else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) |
77312ae8 | 512 | val |= EDP_PSR2_TP2_TIME_100us; |
88a0d960 | 513 | else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) |
77312ae8 | 514 | val |= EDP_PSR2_TP2_TIME_500us; |
50db1390 | 515 | else |
77312ae8 | 516 | val |= EDP_PSR2_TP2_TIME_2500us; |
474d1ec4 | 517 | |
2d387995 JRS |
518 | return val; |
519 | } | |
520 | ||
521 | static void hsw_activate_psr2(struct intel_dp *intel_dp) | |
522 | { | |
523 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
524 | u32 val; | |
525 | ||
526 | val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT; | |
527 | ||
528 | val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; | |
529 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) | |
530 | val |= EDP_Y_COORDINATE_ENABLE; | |
531 | ||
532 | val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); | |
533 | val |= intel_psr2_get_tp_time(intel_dp); | |
534 | ||
64cf40a1 GM |
535 | if (INTEL_GEN(dev_priv) >= 12) { |
536 | /* | |
537 | * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default | |
538 | * values from BSpec. In order to setting an optimal power | |
539 | * consumption, lower than 4k resoluition mode needs to decrese | |
540 | * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution | |
541 | * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. | |
542 | */ | |
543 | val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; | |
544 | val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); | |
545 | val |= TGL_EDP_PSR2_FAST_WAKE(7); | |
546 | } else if (INTEL_GEN(dev_priv) >= 9) { | |
547 | val |= EDP_PSR2_IO_BUFFER_WAKE(7); | |
548 | val |= EDP_PSR2_FAST_WAKE(7); | |
549 | } | |
550 | ||
a170f4f1 JRS |
551 | if (dev_priv->psr.psr2_sel_fetch_enabled) { |
552 | /* WA 1408330847 */ | |
c33298cb | 553 | if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || |
a170f4f1 JRS |
554 | IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)) |
555 | intel_de_rmw(dev_priv, CHICKEN_PAR1_1, | |
556 | DIS_RAM_BYPASS_PSR2_MAN_TRACK, | |
557 | DIS_RAM_BYPASS_PSR2_MAN_TRACK); | |
558 | ||
6e43e276 JRS |
559 | intel_de_write(dev_priv, |
560 | PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), | |
561 | PSR2_MAN_TRK_CTL_ENABLE); | |
a170f4f1 | 562 | } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { |
6e43e276 JRS |
563 | intel_de_write(dev_priv, |
564 | PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0); | |
a170f4f1 | 565 | } |
6e43e276 | 566 | |
06dd94cc | 567 | /* |
15b7dae0 JRS |
568 | * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is |
569 | * recommending keep this bit unset while PSR2 is enabled. | |
06dd94cc | 570 | */ |
c51e7138 | 571 | intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0); |
06dd94cc | 572 | |
c51e7138 | 573 | intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val); |
0bc12bcb RV |
574 | } |
575 | ||
99fc38b1 JRS |
576 | static bool |
577 | transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) | |
578 | { | |
0f81e645 JRS |
579 | if (INTEL_GEN(dev_priv) < 9) |
580 | return false; | |
581 | else if (INTEL_GEN(dev_priv) >= 12) | |
99fc38b1 JRS |
582 | return trans == TRANSCODER_A; |
583 | else | |
584 | return trans == TRANSCODER_EDP; | |
585 | } | |
586 | ||
1c4d821d AG |
587 | static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) |
588 | { | |
1326a92c | 589 | if (!cstate || !cstate->hw.active) |
1c4d821d AG |
590 | return 0; |
591 | ||
592 | return DIV_ROUND_UP(1000 * 1000, | |
1326a92c | 593 | drm_mode_vrefresh(&cstate->hw.adjusted_mode)); |
1c4d821d AG |
594 | } |
595 | ||
596 | static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, | |
597 | u32 idle_frames) | |
598 | { | |
599 | u32 val; | |
600 | ||
601 | idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; | |
c51e7138 | 602 | val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)); |
1c4d821d AG |
603 | val &= ~EDP_PSR2_IDLE_FRAME_MASK; |
604 | val |= idle_frames; | |
c51e7138 | 605 | intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val); |
1c4d821d AG |
606 | } |
607 | ||
608 | static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) | |
609 | { | |
610 | psr2_program_idle_frames(dev_priv, 0); | |
611 | intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); | |
612 | } | |
613 | ||
614 | static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv) | |
615 | { | |
9e83713a | 616 | struct intel_dp *intel_dp = dev_priv->psr.dp; |
1c4d821d AG |
617 | |
618 | intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); | |
9e83713a | 619 | psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp)); |
1c4d821d AG |
620 | } |
621 | ||
ceaaf530 | 622 | static void tgl_dc3co_disable_work(struct work_struct *work) |
1c4d821d AG |
623 | { |
624 | struct drm_i915_private *dev_priv = | |
ceaaf530 | 625 | container_of(work, typeof(*dev_priv), psr.dc3co_work.work); |
1c4d821d AG |
626 | |
627 | mutex_lock(&dev_priv->psr.lock); | |
628 | /* If delayed work is pending, it is not idle */ | |
ceaaf530 | 629 | if (delayed_work_pending(&dev_priv->psr.dc3co_work)) |
1c4d821d AG |
630 | goto unlock; |
631 | ||
1c4d821d AG |
632 | tgl_psr2_disable_dc3co(dev_priv); |
633 | unlock: | |
634 | mutex_unlock(&dev_priv->psr.lock); | |
635 | } | |
636 | ||
637 | static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv) | |
638 | { | |
639 | if (!dev_priv->psr.dc3co_enabled) | |
640 | return; | |
641 | ||
ceaaf530 | 642 | cancel_delayed_work(&dev_priv->psr.dc3co_work); |
1c4d821d AG |
643 | /* Before PSR2 exit disallow dc3co*/ |
644 | tgl_psr2_disable_dc3co(dev_priv); | |
645 | } | |
646 | ||
c5c772cf JRS |
647 | static void |
648 | tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, | |
649 | struct intel_crtc_state *crtc_state) | |
650 | { | |
651 | const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; | |
652 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
653 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
654 | u32 exit_scanlines; | |
655 | ||
f6119781 GM |
656 | /* |
657 | * DMC's DC3CO exit mechanism has an issue with Selective Fecth | |
658 | * TODO: when the issue is addressed, this restriction should be removed. | |
659 | */ | |
660 | if (crtc_state->enable_psr2_sel_fetch) | |
661 | return; | |
662 | ||
c5c772cf JRS |
663 | if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO)) |
664 | return; | |
665 | ||
666 | /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/ | |
667 | if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A || | |
668 | dig_port->base.port != PORT_A) | |
669 | return; | |
670 | ||
671 | /* | |
672 | * DC3CO Exit time 200us B.Spec 49196 | |
673 | * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 | |
674 | */ | |
675 | exit_scanlines = | |
676 | intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; | |
677 | ||
16c56083 | 678 | if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay)) |
c5c772cf JRS |
679 | return; |
680 | ||
681 | crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; | |
682 | } | |
683 | ||
6e43e276 JRS |
684 | static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, |
685 | struct intel_crtc_state *crtc_state) | |
686 | { | |
687 | struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); | |
688 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
689 | struct intel_plane_state *plane_state; | |
690 | struct intel_plane *plane; | |
691 | int i; | |
692 | ||
693 | if (!dev_priv->params.enable_psr2_sel_fetch) { | |
694 | drm_dbg_kms(&dev_priv->drm, | |
695 | "PSR2 sel fetch not enabled, disabled by parameter\n"); | |
696 | return false; | |
697 | } | |
698 | ||
699 | if (crtc_state->uapi.async_flip) { | |
700 | drm_dbg_kms(&dev_priv->drm, | |
701 | "PSR2 sel fetch not enabled, async flip enabled\n"); | |
702 | return false; | |
703 | } | |
704 | ||
705 | for_each_new_intel_plane_in_state(state, plane, plane_state, i) { | |
706 | if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) { | |
707 | drm_dbg_kms(&dev_priv->drm, | |
708 | "PSR2 sel fetch not enabled, plane rotated\n"); | |
709 | return false; | |
710 | } | |
711 | } | |
712 | ||
713 | return crtc_state->enable_psr2_sel_fetch = true; | |
714 | } | |
715 | ||
c4932d79 RV |
716 | static bool intel_psr2_config_valid(struct intel_dp *intel_dp, |
717 | struct intel_crtc_state *crtc_state) | |
718 | { | |
1895759e | 719 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1326a92c ML |
720 | int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; |
721 | int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; | |
f98837e8 | 722 | int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; |
c4932d79 | 723 | |
95f28d2e | 724 | if (!dev_priv->psr.sink_psr2_support) |
c4932d79 RV |
725 | return false; |
726 | ||
99fc38b1 | 727 | if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { |
6471bd74 WK |
728 | drm_dbg_kms(&dev_priv->drm, |
729 | "PSR2 not supported in transcoder %s\n", | |
730 | transcoder_name(crtc_state->cpu_transcoder)); | |
99fc38b1 JRS |
731 | return false; |
732 | } | |
733 | ||
5c90660f JRS |
734 | if (!psr2_global_enabled(dev_priv)) { |
735 | drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n"); | |
736 | return false; | |
737 | } | |
738 | ||
8228c42f MN |
739 | /* |
740 | * DSC and PSR2 cannot be enabled simultaneously. If a requested | |
741 | * resolution requires DSC to be enabled, priority is given to DSC | |
742 | * over PSR2. | |
743 | */ | |
010663a6 | 744 | if (crtc_state->dsc.compression_enable) { |
6471bd74 WK |
745 | drm_dbg_kms(&dev_priv->drm, |
746 | "PSR2 cannot be enabled since DSC is enabled\n"); | |
8228c42f MN |
747 | return false; |
748 | } | |
749 | ||
19167eb0 JRS |
750 | if (crtc_state->crc_enabled) { |
751 | drm_dbg_kms(&dev_priv->drm, | |
752 | "PSR2 not enabled because it would inhibit pipe CRC calculation\n"); | |
753 | return false; | |
754 | } | |
755 | ||
f7b3c226 JRS |
756 | if (INTEL_GEN(dev_priv) >= 12) { |
757 | psr_max_h = 5120; | |
758 | psr_max_v = 3200; | |
f98837e8 | 759 | max_bpp = 30; |
f7b3c226 | 760 | } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { |
c90c275c DP |
761 | psr_max_h = 4096; |
762 | psr_max_v = 2304; | |
f98837e8 | 763 | max_bpp = 24; |
cf819eff | 764 | } else if (IS_GEN(dev_priv, 9)) { |
c90c275c DP |
765 | psr_max_h = 3640; |
766 | psr_max_v = 2304; | |
f98837e8 | 767 | max_bpp = 24; |
c90c275c DP |
768 | } |
769 | ||
f98837e8 | 770 | if (crtc_state->pipe_bpp > max_bpp) { |
6471bd74 WK |
771 | drm_dbg_kms(&dev_priv->drm, |
772 | "PSR2 not enabled, pipe bpp %d > max supported %d\n", | |
773 | crtc_state->pipe_bpp, max_bpp); | |
f98837e8 JRS |
774 | return false; |
775 | } | |
776 | ||
bef5e5b3 JRS |
777 | /* |
778 | * HW sends SU blocks of size four scan lines, which means the starting | |
779 | * X coordinate and Y granularity requirements will always be met. We | |
8c0d2c29 JRS |
780 | * only need to validate the SU block width is a multiple of |
781 | * x granularity. | |
bef5e5b3 | 782 | */ |
8c0d2c29 | 783 | if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { |
6471bd74 WK |
784 | drm_dbg_kms(&dev_priv->drm, |
785 | "PSR2 not enabled, hdisplay(%d) not multiple of %d\n", | |
786 | crtc_hdisplay, dev_priv->psr.su_x_granularity); | |
bef5e5b3 JRS |
787 | return false; |
788 | } | |
789 | ||
6e43e276 JRS |
790 | if (HAS_PSR2_SEL_FETCH(dev_priv)) { |
791 | if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && | |
792 | !HAS_PSR_HW_TRACKING(dev_priv)) { | |
793 | drm_dbg_kms(&dev_priv->drm, | |
794 | "PSR2 not enabled, selective fetch not valid and no HW tracking available\n"); | |
795 | return false; | |
796 | } | |
19167eb0 JRS |
797 | } |
798 | ||
6e43e276 JRS |
799 | if (!crtc_state->enable_psr2_sel_fetch && |
800 | (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { | |
19167eb0 JRS |
801 | drm_dbg_kms(&dev_priv->drm, |
802 | "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", | |
803 | crtc_hdisplay, crtc_vdisplay, | |
804 | psr_max_h, psr_max_v); | |
618cf883 JRS |
805 | return false; |
806 | } | |
807 | ||
c5c772cf | 808 | tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); |
c4932d79 RV |
809 | return true; |
810 | } | |
811 | ||
4d90f2d5 VS |
812 | void intel_psr_compute_config(struct intel_dp *intel_dp, |
813 | struct intel_crtc_state *crtc_state) | |
0bc12bcb RV |
814 | { |
815 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
1895759e | 816 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
dfd2e9ab | 817 | const struct drm_display_mode *adjusted_mode = |
1326a92c | 818 | &crtc_state->hw.adjusted_mode; |
dfd2e9ab | 819 | int psr_setup_time; |
0bc12bcb | 820 | |
4371d896 | 821 | if (!CAN_PSR(dev_priv)) |
4d90f2d5 VS |
822 | return; |
823 | ||
c44301fc | 824 | if (intel_dp != dev_priv->psr.dp) |
4d90f2d5 | 825 | return; |
0bc12bcb | 826 | |
5c90660f JRS |
827 | if (!psr_global_enabled(dev_priv)) { |
828 | drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); | |
7a00e68b | 829 | return; |
5c90660f JRS |
830 | } |
831 | ||
dc9b5a0c RV |
832 | /* |
833 | * HSW spec explicitly says PSR is tied to port A. | |
4ab4fa10 JRS |
834 | * BDW+ platforms have a instance of PSR registers per transcoder but |
835 | * for now it only supports one instance of PSR, so lets keep it | |
836 | * hardcoded to PORT_A | |
dc9b5a0c | 837 | */ |
ce3508fd | 838 | if (dig_port->base.port != PORT_A) { |
6471bd74 WK |
839 | drm_dbg_kms(&dev_priv->drm, |
840 | "PSR condition failed: Port not supported\n"); | |
4d90f2d5 | 841 | return; |
0bc12bcb RV |
842 | } |
843 | ||
50a12d8f | 844 | if (dev_priv->psr.sink_not_reliable) { |
6471bd74 WK |
845 | drm_dbg_kms(&dev_priv->drm, |
846 | "PSR sink implementation is not reliable\n"); | |
50a12d8f JRS |
847 | return; |
848 | } | |
849 | ||
7ae6ad6f | 850 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
6471bd74 WK |
851 | drm_dbg_kms(&dev_priv->drm, |
852 | "PSR condition failed: Interlaced mode enabled\n"); | |
4d90f2d5 | 853 | return; |
0bc12bcb RV |
854 | } |
855 | ||
dfd2e9ab VS |
856 | psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); |
857 | if (psr_setup_time < 0) { | |
6471bd74 WK |
858 | drm_dbg_kms(&dev_priv->drm, |
859 | "PSR condition failed: Invalid PSR setup time (0x%02x)\n", | |
860 | intel_dp->psr_dpcd[1]); | |
4d90f2d5 | 861 | return; |
dfd2e9ab VS |
862 | } |
863 | ||
864 | if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > | |
865 | adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { | |
6471bd74 WK |
866 | drm_dbg_kms(&dev_priv->drm, |
867 | "PSR condition failed: PSR setup time (%d us) too long\n", | |
868 | psr_setup_time); | |
4d90f2d5 VS |
869 | return; |
870 | } | |
871 | ||
4d90f2d5 | 872 | crtc_state->has_psr = true; |
c4932d79 | 873 | crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); |
7a00e68b | 874 | crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); |
0bc12bcb RV |
875 | } |
876 | ||
e2bbc343 | 877 | static void intel_psr_activate(struct intel_dp *intel_dp) |
0bc12bcb | 878 | { |
1895759e | 879 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
0bc12bcb | 880 | |
0f81e645 | 881 | if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) |
16c56083 PB |
882 | drm_WARN_ON(&dev_priv->drm, |
883 | intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); | |
0f81e645 | 884 | |
16c56083 PB |
885 | drm_WARN_ON(&dev_priv->drm, |
886 | intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); | |
887 | drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active); | |
0bc12bcb RV |
888 | lockdep_assert_held(&dev_priv->psr.lock); |
889 | ||
cf5d862d RV |
890 | /* psr1 and psr2 are mutually exclusive.*/ |
891 | if (dev_priv->psr.psr2_enabled) | |
892 | hsw_activate_psr2(intel_dp); | |
893 | else | |
894 | hsw_activate_psr1(intel_dp); | |
895 | ||
0bc12bcb RV |
896 | dev_priv->psr.active = true; |
897 | } | |
898 | ||
cf5d862d RV |
899 | static void intel_psr_enable_source(struct intel_dp *intel_dp, |
900 | const struct intel_crtc_state *crtc_state) | |
4d1fa22f | 901 | { |
1895759e | 902 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
4d1fa22f | 903 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
fc6ff9dc | 904 | u32 mask; |
4d1fa22f | 905 | |
d544e918 DP |
906 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ |
907 | * use hardcoded values PSR AUX transactions | |
908 | */ | |
909 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
910 | hsw_psr_setup_aux(intel_dp); | |
911 | ||
cf819eff | 912 | if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && |
d15f9cdd | 913 | !IS_GEMINILAKE(dev_priv))) { |
12c4d4c1 | 914 | i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); |
c51e7138 | 915 | u32 chicken = intel_de_read(dev_priv, reg); |
5e87325f | 916 | |
d15f9cdd JRS |
917 | chicken |= PSR2_VSC_ENABLE_PROG_HEADER | |
918 | PSR2_ADD_VERTICAL_LINE_COUNT; | |
c51e7138 | 919 | intel_de_write(dev_priv, reg, chicken); |
4d1fa22f | 920 | } |
bf80928f JRS |
921 | |
922 | /* | |
923 | * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also | |
924 | * mask LPSP to avoid dependency on other drivers that might block | |
925 | * runtime_pm besides preventing other hw tracking issues now we | |
926 | * can rely on frontbuffer tracking. | |
927 | */ | |
fc6ff9dc JRS |
928 | mask = EDP_PSR_DEBUG_MASK_MEMUP | |
929 | EDP_PSR_DEBUG_MASK_HPD | | |
930 | EDP_PSR_DEBUG_MASK_LPSP | | |
931 | EDP_PSR_DEBUG_MASK_MAX_SLEEP; | |
932 | ||
933 | if (INTEL_GEN(dev_priv) < 11) | |
934 | mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; | |
935 | ||
c51e7138 JN |
936 | intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder), |
937 | mask); | |
df7415bf | 938 | |
2f3b8712 | 939 | psr_irq_control(dev_priv); |
c5c772cf JRS |
940 | |
941 | if (crtc_state->dc3co_exitline) { | |
942 | u32 val; | |
943 | ||
944 | /* | |
945 | * TODO: if future platforms supports DC3CO in more than one | |
946 | * transcoder, EXITLINE will need to be unset when disabling PSR | |
947 | */ | |
ddfa21bc | 948 | val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)); |
c5c772cf JRS |
949 | val &= ~EXITLINE_MASK; |
950 | val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT; | |
951 | val |= EXITLINE_ENABLE; | |
ddfa21bc | 952 | intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); |
c5c772cf | 953 | } |
6e43e276 | 954 | |
90a24b9d | 955 | if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) |
6e43e276 JRS |
956 | intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, |
957 | dev_priv->psr.psr2_sel_fetch_enabled ? | |
958 | IGNORE_PSR2_HW_TRACKING : 0); | |
4d1fa22f RV |
959 | } |
960 | ||
c44301fc | 961 | static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, |
7a00e68b GM |
962 | const struct intel_crtc_state *crtc_state, |
963 | const struct drm_connector_state *conn_state) | |
c44301fc ML |
964 | { |
965 | struct intel_dp *intel_dp = dev_priv->psr.dp; | |
7801f3b7 LDM |
966 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
967 | struct intel_encoder *encoder = &dig_port->base; | |
4ab4fa10 | 968 | u32 val; |
c44301fc | 969 | |
16c56083 | 970 | drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled); |
23ec9f52 | 971 | |
5c90660f | 972 | dev_priv->psr.psr2_enabled = crtc_state->has_psr2; |
23ec9f52 | 973 | dev_priv->psr.busy_frontbuffer_bits = 0; |
2225f3c6 | 974 | dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; |
1c4d821d | 975 | dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; |
4ab4fa10 | 976 | dev_priv->psr.transcoder = crtc_state->cpu_transcoder; |
58c34c4c JRS |
977 | /* DC5/DC6 requires at least 6 idle frames */ |
978 | val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); | |
979 | dev_priv->psr.dc3co_exit_delay = val; | |
6e43e276 | 980 | dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; |
4ab4fa10 JRS |
981 | |
982 | /* | |
983 | * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR | |
984 | * will still keep the error set even after the reset done in the | |
985 | * irq_preinstall and irq_uninstall hooks. | |
986 | * And enabling in this situation cause the screen to freeze in the | |
987 | * first time that PSR HW tries to activate so lets keep PSR disabled | |
988 | * to avoid any rendering problems. | |
989 | */ | |
8241cfbe | 990 | if (INTEL_GEN(dev_priv) >= 12) { |
c51e7138 JN |
991 | val = intel_de_read(dev_priv, |
992 | TRANS_PSR_IIR(dev_priv->psr.transcoder)); | |
8241cfbe JRS |
993 | val &= EDP_PSR_ERROR(0); |
994 | } else { | |
c51e7138 | 995 | val = intel_de_read(dev_priv, EDP_PSR_IIR); |
8241cfbe JRS |
996 | val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); |
997 | } | |
4ab4fa10 JRS |
998 | if (val) { |
999 | dev_priv->psr.sink_not_reliable = true; | |
6471bd74 WK |
1000 | drm_dbg_kms(&dev_priv->drm, |
1001 | "PSR interruption error set, not enabling PSR\n"); | |
4ab4fa10 JRS |
1002 | return; |
1003 | } | |
c44301fc | 1004 | |
6471bd74 WK |
1005 | drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", |
1006 | dev_priv->psr.psr2_enabled ? "2" : "1"); | |
7a00e68b GM |
1007 | intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, |
1008 | &dev_priv->psr.vsc); | |
1009 | intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc); | |
c44301fc ML |
1010 | intel_psr_enable_sink(intel_dp); |
1011 | intel_psr_enable_source(intel_dp, crtc_state); | |
1012 | dev_priv->psr.enabled = true; | |
1013 | ||
1014 | intel_psr_activate(intel_dp); | |
1015 | } | |
1016 | ||
b2b89f55 RV |
1017 | /** |
1018 | * intel_psr_enable - Enable PSR | |
1019 | * @intel_dp: Intel DP | |
d2419ffc | 1020 | * @crtc_state: new CRTC state |
7a00e68b | 1021 | * @conn_state: new CONNECTOR state |
b2b89f55 RV |
1022 | * |
1023 | * This function can only be called after the pipe is fully trained and enabled. | |
1024 | */ | |
d2419ffc | 1025 | void intel_psr_enable(struct intel_dp *intel_dp, |
7a00e68b GM |
1026 | const struct intel_crtc_state *crtc_state, |
1027 | const struct drm_connector_state *conn_state) | |
0bc12bcb | 1028 | { |
1895759e | 1029 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
0bc12bcb | 1030 | |
df1a5bfc | 1031 | if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp) |
0bc12bcb | 1032 | return; |
0bc12bcb | 1033 | |
df1a5bfc | 1034 | if (!crtc_state->has_psr) |
c9ef291a DP |
1035 | return; |
1036 | ||
16c56083 | 1037 | drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp); |
c44301fc | 1038 | |
0bc12bcb | 1039 | mutex_lock(&dev_priv->psr.lock); |
7a00e68b | 1040 | intel_psr_enable_locked(dev_priv, crtc_state, conn_state); |
0bc12bcb RV |
1041 | mutex_unlock(&dev_priv->psr.lock); |
1042 | } | |
1043 | ||
26f9ec9a JRS |
1044 | static void intel_psr_exit(struct drm_i915_private *dev_priv) |
1045 | { | |
1046 | u32 val; | |
1047 | ||
b2fc2252 | 1048 | if (!dev_priv->psr.active) { |
0f81e645 | 1049 | if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { |
c51e7138 JN |
1050 | val = intel_de_read(dev_priv, |
1051 | EDP_PSR2_CTL(dev_priv->psr.transcoder)); | |
16c56083 | 1052 | drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); |
4ab4fa10 JRS |
1053 | } |
1054 | ||
c51e7138 JN |
1055 | val = intel_de_read(dev_priv, |
1056 | EDP_PSR_CTL(dev_priv->psr.transcoder)); | |
16c56083 | 1057 | drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); |
4ab4fa10 | 1058 | |
26f9ec9a | 1059 | return; |
b2fc2252 | 1060 | } |
26f9ec9a JRS |
1061 | |
1062 | if (dev_priv->psr.psr2_enabled) { | |
1c4d821d | 1063 | tgl_disallow_dc3co_on_psr2_exit(dev_priv); |
c51e7138 JN |
1064 | val = intel_de_read(dev_priv, |
1065 | EDP_PSR2_CTL(dev_priv->psr.transcoder)); | |
16c56083 | 1066 | drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); |
4ab4fa10 | 1067 | val &= ~EDP_PSR2_ENABLE; |
c51e7138 JN |
1068 | intel_de_write(dev_priv, |
1069 | EDP_PSR2_CTL(dev_priv->psr.transcoder), val); | |
26f9ec9a | 1070 | } else { |
c51e7138 JN |
1071 | val = intel_de_read(dev_priv, |
1072 | EDP_PSR_CTL(dev_priv->psr.transcoder)); | |
16c56083 | 1073 | drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); |
4ab4fa10 | 1074 | val &= ~EDP_PSR_ENABLE; |
c51e7138 JN |
1075 | intel_de_write(dev_priv, |
1076 | EDP_PSR_CTL(dev_priv->psr.transcoder), val); | |
26f9ec9a JRS |
1077 | } |
1078 | dev_priv->psr.active = false; | |
1079 | } | |
1080 | ||
2ee936e3 | 1081 | static void intel_psr_disable_locked(struct intel_dp *intel_dp) |
e2bbc343 | 1082 | { |
1895759e | 1083 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
b2fc2252 JRS |
1084 | i915_reg_t psr_status; |
1085 | u32 psr_status_mask; | |
0bc12bcb | 1086 | |
2ee936e3 JRS |
1087 | lockdep_assert_held(&dev_priv->psr.lock); |
1088 | ||
1089 | if (!dev_priv->psr.enabled) | |
1090 | return; | |
1091 | ||
6471bd74 WK |
1092 | drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", |
1093 | dev_priv->psr.psr2_enabled ? "2" : "1"); | |
2ee936e3 | 1094 | |
b2fc2252 | 1095 | intel_psr_exit(dev_priv); |
77affa31 | 1096 | |
b2fc2252 | 1097 | if (dev_priv->psr.psr2_enabled) { |
4ab4fa10 | 1098 | psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); |
b2fc2252 | 1099 | psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; |
0bc12bcb | 1100 | } else { |
4ab4fa10 | 1101 | psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); |
b2fc2252 | 1102 | psr_status_mask = EDP_PSR_STATUS_STATE_MASK; |
0bc12bcb | 1103 | } |
b2fc2252 JRS |
1104 | |
1105 | /* Wait till PSR is idle */ | |
4cb3b44d DCS |
1106 | if (intel_de_wait_for_clear(dev_priv, psr_status, |
1107 | psr_status_mask, 2000)) | |
6471bd74 | 1108 | drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n"); |
cc3054ff | 1109 | |
a170f4f1 JRS |
1110 | /* WA 1408330847 */ |
1111 | if (dev_priv->psr.psr2_sel_fetch_enabled && | |
c33298cb | 1112 | (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || |
a170f4f1 JRS |
1113 | IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))) |
1114 | intel_de_rmw(dev_priv, CHICKEN_PAR1_1, | |
1115 | DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); | |
1116 | ||
cc3054ff JRS |
1117 | /* Disable PSR on Sink */ |
1118 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); | |
1119 | ||
700355af JRS |
1120 | if (dev_priv->psr.psr2_enabled) |
1121 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); | |
1122 | ||
c44301fc | 1123 | dev_priv->psr.enabled = false; |
cc3054ff JRS |
1124 | } |
1125 | ||
e2bbc343 RV |
1126 | /** |
1127 | * intel_psr_disable - Disable PSR | |
1128 | * @intel_dp: Intel DP | |
d2419ffc | 1129 | * @old_crtc_state: old CRTC state |
e2bbc343 RV |
1130 | * |
1131 | * This function needs to be called before disabling pipe. | |
1132 | */ | |
d2419ffc VS |
1133 | void intel_psr_disable(struct intel_dp *intel_dp, |
1134 | const struct intel_crtc_state *old_crtc_state) | |
e2bbc343 | 1135 | { |
1895759e | 1136 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
e2bbc343 | 1137 | |
4d90f2d5 | 1138 | if (!old_crtc_state->has_psr) |
0f328da6 RV |
1139 | return; |
1140 | ||
16c56083 | 1141 | if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv))) |
c9ef291a DP |
1142 | return; |
1143 | ||
e2bbc343 | 1144 | mutex_lock(&dev_priv->psr.lock); |
c44301fc | 1145 | |
cc3054ff | 1146 | intel_psr_disable_locked(intel_dp); |
c44301fc | 1147 | |
0bc12bcb | 1148 | mutex_unlock(&dev_priv->psr.lock); |
98fa2aec | 1149 | cancel_work_sync(&dev_priv->psr.work); |
ceaaf530 | 1150 | cancel_delayed_work_sync(&dev_priv->psr.dc3co_work); |
0bc12bcb RV |
1151 | } |
1152 | ||
88e05aff JRS |
1153 | static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) |
1154 | { | |
71c1a499 JRS |
1155 | if (IS_TIGERLAKE(dev_priv)) |
1156 | /* | |
1157 | * Writes to CURSURFLIVE in TGL are causing IOMMU errors and | |
1158 | * visual glitches that are often reproduced when executing | |
1159 | * CPU intensive workloads while a eDP 4K panel is attached. | |
1160 | * | |
1161 | * Manually exiting PSR causes the frontbuffer to be updated | |
1162 | * without glitches and the IOMMU errors are also gone but | |
1163 | * this comes at the cost of less time with PSR active. | |
1164 | * | |
1165 | * So using this workaround until this issue is root caused | |
1166 | * and a better fix is found. | |
1167 | */ | |
1168 | intel_psr_exit(dev_priv); | |
1169 | else if (INTEL_GEN(dev_priv) >= 9) | |
381f8a20 JRS |
1170 | /* |
1171 | * Display WA #0884: skl+ | |
1172 | * This documented WA for bxt can be safely applied | |
1173 | * broadly so we can force HW tracking to exit PSR | |
1174 | * instead of disabling and re-enabling. | |
1175 | * Workaround tells us to write 0 to CUR_SURFLIVE_A, | |
1176 | * but it makes more sense write to the current active | |
1177 | * pipe. | |
1178 | */ | |
c51e7138 | 1179 | intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0); |
381f8a20 JRS |
1180 | else |
1181 | /* | |
1182 | * A write to CURSURFLIVE do not cause HW tracking to exit PSR | |
1183 | * on older gens so doing the manual exit instead. | |
1184 | */ | |
1185 | intel_psr_exit(dev_priv); | |
88e05aff JRS |
1186 | } |
1187 | ||
0bcbcba7 JRS |
1188 | void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, |
1189 | const struct intel_crtc_state *crtc_state, | |
1190 | const struct intel_plane_state *plane_state, | |
1191 | int color_plane) | |
1192 | { | |
1193 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | |
1194 | enum pipe pipe = plane->pipe; | |
1195 | u32 val; | |
1196 | ||
1197 | if (!crtc_state->enable_psr2_sel_fetch) | |
1198 | return; | |
1199 | ||
1200 | val = plane_state ? plane_state->ctl : 0; | |
1201 | val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE; | |
1202 | intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val); | |
1203 | if (!val || plane->id == PLANE_CURSOR) | |
1204 | return; | |
1205 | ||
1206 | val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1; | |
1207 | intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); | |
1208 | ||
1209 | val = plane_state->color_plane[color_plane].y << 16; | |
1210 | val |= plane_state->color_plane[color_plane].x; | |
1211 | intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), | |
1212 | val); | |
1213 | ||
1214 | /* Sizes are 0 based */ | |
1215 | val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16; | |
1216 | val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; | |
1217 | intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); | |
1218 | } | |
1219 | ||
6e43e276 JRS |
1220 | void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) |
1221 | { | |
1222 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); | |
1223 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | |
1224 | struct i915_psr *psr = &dev_priv->psr; | |
1225 | ||
1226 | if (!HAS_PSR2_SEL_FETCH(dev_priv) || | |
1227 | !crtc_state->enable_psr2_sel_fetch) | |
1228 | return; | |
1229 | ||
1230 | intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder), | |
1231 | crtc_state->psr2_man_track_ctl); | |
1232 | } | |
1233 | ||
0bcbcba7 JRS |
1234 | static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, |
1235 | struct drm_rect *clip, bool full_update) | |
1236 | { | |
1237 | u32 val = PSR2_MAN_TRK_CTL_ENABLE; | |
1238 | ||
1239 | if (full_update) { | |
1240 | val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; | |
1241 | goto exit; | |
1242 | } | |
1243 | ||
1244 | if (clip->y1 == -1) | |
1245 | goto exit; | |
1246 | ||
1247 | val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; | |
1248 | val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1); | |
1249 | val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1); | |
1250 | exit: | |
1251 | crtc_state->psr2_man_track_ctl = val; | |
1252 | } | |
1253 | ||
1254 | static void clip_area_update(struct drm_rect *overlap_damage_area, | |
1255 | struct drm_rect *damage_area) | |
1256 | { | |
1257 | if (overlap_damage_area->y1 == -1) { | |
1258 | overlap_damage_area->y1 = damage_area->y1; | |
1259 | overlap_damage_area->y2 = damage_area->y2; | |
1260 | return; | |
1261 | } | |
1262 | ||
1263 | if (damage_area->y1 < overlap_damage_area->y1) | |
1264 | overlap_damage_area->y1 = damage_area->y1; | |
1265 | ||
1266 | if (damage_area->y2 > overlap_damage_area->y2) | |
1267 | overlap_damage_area->y2 = damage_area->y2; | |
1268 | } | |
1269 | ||
1270 | int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, | |
1271 | struct intel_crtc *crtc) | |
6e43e276 JRS |
1272 | { |
1273 | struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); | |
0bcbcba7 JRS |
1274 | struct intel_plane_state *new_plane_state, *old_plane_state; |
1275 | struct drm_rect pipe_clip = { .y1 = -1 }; | |
1276 | struct intel_plane *plane; | |
1277 | bool full_update = false; | |
1278 | int i, ret; | |
6e43e276 JRS |
1279 | |
1280 | if (!crtc_state->enable_psr2_sel_fetch) | |
0bcbcba7 JRS |
1281 | return 0; |
1282 | ||
1283 | ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); | |
1284 | if (ret) | |
1285 | return ret; | |
1286 | ||
1287 | for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, | |
1288 | new_plane_state, i) { | |
1289 | struct drm_rect temp; | |
1290 | ||
1291 | if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) | |
1292 | continue; | |
1293 | ||
1294 | /* | |
1295 | * TODO: Not clear how to handle planes with negative position, | |
1296 | * also planes are not updated if they have a negative X | |
1297 | * position so for now doing a full update in this cases | |
1298 | */ | |
1299 | if (new_plane_state->uapi.dst.y1 < 0 || | |
1300 | new_plane_state->uapi.dst.x1 < 0) { | |
1301 | full_update = true; | |
1302 | break; | |
1303 | } | |
1304 | ||
1305 | if (!new_plane_state->uapi.visible) | |
1306 | continue; | |
1307 | ||
1308 | /* | |
1309 | * For now doing a selective fetch in the whole plane area, | |
1310 | * optimizations will come in the future. | |
1311 | */ | |
1312 | temp.y1 = new_plane_state->uapi.dst.y1; | |
1313 | temp.y2 = new_plane_state->uapi.dst.y2; | |
1314 | clip_area_update(&pipe_clip, &temp); | |
1315 | } | |
6e43e276 | 1316 | |
0bcbcba7 JRS |
1317 | psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update); |
1318 | return 0; | |
6e43e276 JRS |
1319 | } |
1320 | ||
23ec9f52 JRS |
1321 | /** |
1322 | * intel_psr_update - Update PSR state | |
1323 | * @intel_dp: Intel DP | |
1324 | * @crtc_state: new CRTC state | |
7a00e68b | 1325 | * @conn_state: new CONNECTOR state |
23ec9f52 JRS |
1326 | * |
1327 | * This functions will update PSR states, disabling, enabling or switching PSR | |
1328 | * version when executing fastsets. For full modeset, intel_psr_disable() and | |
1329 | * intel_psr_enable() should be called instead. | |
1330 | */ | |
1331 | void intel_psr_update(struct intel_dp *intel_dp, | |
7a00e68b GM |
1332 | const struct intel_crtc_state *crtc_state, |
1333 | const struct drm_connector_state *conn_state) | |
23ec9f52 JRS |
1334 | { |
1335 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1336 | struct i915_psr *psr = &dev_priv->psr; | |
1337 | bool enable, psr2_enable; | |
1338 | ||
1339 | if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) | |
1340 | return; | |
1341 | ||
1342 | mutex_lock(&dev_priv->psr.lock); | |
1343 | ||
5c90660f JRS |
1344 | enable = crtc_state->has_psr; |
1345 | psr2_enable = crtc_state->has_psr2; | |
23ec9f52 | 1346 | |
88e05aff JRS |
1347 | if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { |
1348 | /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ | |
1349 | if (crtc_state->crc_enabled && psr->enabled) | |
1350 | psr_force_hw_tracking_exit(dev_priv); | |
381f8a20 JRS |
1351 | else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) { |
1352 | /* | |
1353 | * Activate PSR again after a force exit when enabling | |
1354 | * CRC in older gens | |
1355 | */ | |
1356 | if (!dev_priv->psr.active && | |
1357 | !dev_priv->psr.busy_frontbuffer_bits) | |
1358 | schedule_work(&dev_priv->psr.work); | |
1359 | } | |
88e05aff | 1360 | |
23ec9f52 | 1361 | goto unlock; |
88e05aff | 1362 | } |
23ec9f52 | 1363 | |
9f952664 JRS |
1364 | if (psr->enabled) |
1365 | intel_psr_disable_locked(intel_dp); | |
23ec9f52 | 1366 | |
9f952664 | 1367 | if (enable) |
7a00e68b | 1368 | intel_psr_enable_locked(dev_priv, crtc_state, conn_state); |
23ec9f52 JRS |
1369 | |
1370 | unlock: | |
1371 | mutex_unlock(&dev_priv->psr.lock); | |
1372 | } | |
1373 | ||
65df9c79 DP |
1374 | /** |
1375 | * intel_psr_wait_for_idle - wait for PSR1 to idle | |
1376 | * @new_crtc_state: new CRTC state | |
1377 | * @out_value: PSR status in case of failure | |
1378 | * | |
1379 | * This function is expected to be called from pipe_update_start() where it is | |
1380 | * not expected to race with PSR enable or disable. | |
1381 | * | |
1382 | * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. | |
1383 | */ | |
63ec132d DP |
1384 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, |
1385 | u32 *out_value) | |
c43dbcbb | 1386 | { |
2225f3c6 | 1387 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); |
c3d43361 | 1388 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
c43dbcbb | 1389 | |
c44301fc | 1390 | if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) |
c3d43361 TV |
1391 | return 0; |
1392 | ||
fd255f6e DP |
1393 | /* FIXME: Update this for PSR2 if we need to wait for idle */ |
1394 | if (READ_ONCE(dev_priv->psr.psr2_enabled)) | |
1395 | return 0; | |
c43dbcbb TV |
1396 | |
1397 | /* | |
65df9c79 DP |
1398 | * From bspec: Panel Self Refresh (BDW+) |
1399 | * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of | |
1400 | * exit training time + 1.5 ms of aux channel handshake. 50 ms is | |
1401 | * defensive enough to cover everything. | |
c43dbcbb | 1402 | */ |
63ec132d | 1403 | |
4ab4fa10 JRS |
1404 | return __intel_wait_for_register(&dev_priv->uncore, |
1405 | EDP_PSR_STATUS(dev_priv->psr.transcoder), | |
fd255f6e | 1406 | EDP_PSR_STATUS_STATE_MASK, |
63ec132d DP |
1407 | EDP_PSR_STATUS_STATE_IDLE, 2, 50, |
1408 | out_value); | |
c43dbcbb TV |
1409 | } |
1410 | ||
1411 | static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) | |
0bc12bcb | 1412 | { |
daeb725e CW |
1413 | i915_reg_t reg; |
1414 | u32 mask; | |
1415 | int err; | |
1416 | ||
c44301fc | 1417 | if (!dev_priv->psr.enabled) |
daeb725e | 1418 | return false; |
0bc12bcb | 1419 | |
ce3508fd | 1420 | if (dev_priv->psr.psr2_enabled) { |
4ab4fa10 | 1421 | reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); |
ce3508fd | 1422 | mask = EDP_PSR2_STATUS_STATE_MASK; |
995d3047 | 1423 | } else { |
4ab4fa10 | 1424 | reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); |
ce3508fd | 1425 | mask = EDP_PSR_STATUS_STATE_MASK; |
0bc12bcb | 1426 | } |
daeb725e CW |
1427 | |
1428 | mutex_unlock(&dev_priv->psr.lock); | |
1429 | ||
4cb3b44d | 1430 | err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); |
daeb725e | 1431 | if (err) |
6471bd74 WK |
1432 | drm_err(&dev_priv->drm, |
1433 | "Timed out waiting for PSR Idle for re-enable\n"); | |
daeb725e CW |
1434 | |
1435 | /* After the unlocked wait, verify that PSR is still wanted! */ | |
0bc12bcb | 1436 | mutex_lock(&dev_priv->psr.lock); |
daeb725e CW |
1437 | return err == 0 && dev_priv->psr.enabled; |
1438 | } | |
0bc12bcb | 1439 | |
23ec9f52 | 1440 | static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) |
2ac45bdd | 1441 | { |
5c90660f | 1442 | struct drm_connector_list_iter conn_iter; |
23ec9f52 JRS |
1443 | struct drm_device *dev = &dev_priv->drm; |
1444 | struct drm_modeset_acquire_ctx ctx; | |
1445 | struct drm_atomic_state *state; | |
5c90660f JRS |
1446 | struct drm_connector *conn; |
1447 | int err = 0; | |
2ac45bdd | 1448 | |
23ec9f52 JRS |
1449 | state = drm_atomic_state_alloc(dev); |
1450 | if (!state) | |
1451 | return -ENOMEM; | |
2ac45bdd | 1452 | |
23ec9f52 JRS |
1453 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); |
1454 | state->acquire_ctx = &ctx; | |
1455 | ||
1456 | retry: | |
23ec9f52 | 1457 | |
5c90660f JRS |
1458 | drm_connector_list_iter_begin(dev, &conn_iter); |
1459 | drm_for_each_connector_iter(conn, &conn_iter) { | |
1460 | struct drm_connector_state *conn_state; | |
1461 | struct drm_crtc_state *crtc_state; | |
1462 | ||
1463 | if (conn->connector_type != DRM_MODE_CONNECTOR_eDP) | |
1464 | continue; | |
1465 | ||
1466 | conn_state = drm_atomic_get_connector_state(state, conn); | |
1467 | if (IS_ERR(conn_state)) { | |
1468 | err = PTR_ERR(conn_state); | |
1469 | break; | |
23ec9f52 JRS |
1470 | } |
1471 | ||
5c90660f JRS |
1472 | if (!conn_state->crtc) |
1473 | continue; | |
1474 | ||
1475 | crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc); | |
1476 | if (IS_ERR(crtc_state)) { | |
1477 | err = PTR_ERR(crtc_state); | |
23ec9f52 JRS |
1478 | break; |
1479 | } | |
5c90660f JRS |
1480 | |
1481 | /* Mark mode as changed to trigger a pipe->update() */ | |
1482 | crtc_state->mode_changed = true; | |
23ec9f52 | 1483 | } |
5c90660f | 1484 | drm_connector_list_iter_end(&conn_iter); |
23ec9f52 | 1485 | |
5c90660f JRS |
1486 | if (err == 0) |
1487 | err = drm_atomic_commit(state); | |
2ac45bdd | 1488 | |
23ec9f52 JRS |
1489 | if (err == -EDEADLK) { |
1490 | drm_atomic_state_clear(state); | |
1491 | err = drm_modeset_backoff(&ctx); | |
1492 | if (!err) | |
1493 | goto retry; | |
1494 | } | |
1495 | ||
1496 | drm_modeset_drop_locks(&ctx); | |
1497 | drm_modeset_acquire_fini(&ctx); | |
1498 | drm_atomic_state_put(state); | |
1499 | ||
1500 | return err; | |
2ac45bdd ML |
1501 | } |
1502 | ||
23ec9f52 | 1503 | int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) |
c44301fc | 1504 | { |
23ec9f52 JRS |
1505 | const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; |
1506 | u32 old_mode; | |
c44301fc | 1507 | int ret; |
c44301fc ML |
1508 | |
1509 | if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || | |
2ac45bdd | 1510 | mode > I915_PSR_DEBUG_FORCE_PSR1) { |
6471bd74 | 1511 | drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val); |
c44301fc ML |
1512 | return -EINVAL; |
1513 | } | |
1514 | ||
c44301fc ML |
1515 | ret = mutex_lock_interruptible(&dev_priv->psr.lock); |
1516 | if (ret) | |
1517 | return ret; | |
1518 | ||
23ec9f52 | 1519 | old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; |
c44301fc | 1520 | dev_priv->psr.debug = val; |
2f3b8712 JRS |
1521 | |
1522 | /* | |
1523 | * Do it right away if it's already enabled, otherwise it will be done | |
1524 | * when enabling the source. | |
1525 | */ | |
1526 | if (dev_priv->psr.enabled) | |
1527 | psr_irq_control(dev_priv); | |
c44301fc | 1528 | |
c44301fc | 1529 | mutex_unlock(&dev_priv->psr.lock); |
23ec9f52 JRS |
1530 | |
1531 | if (old_mode != mode) | |
1532 | ret = intel_psr_fastset_force(dev_priv); | |
1533 | ||
c44301fc ML |
1534 | return ret; |
1535 | } | |
1536 | ||
183b8e67 JRS |
1537 | static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) |
1538 | { | |
1539 | struct i915_psr *psr = &dev_priv->psr; | |
1540 | ||
1541 | intel_psr_disable_locked(psr->dp); | |
1542 | psr->sink_not_reliable = true; | |
1543 | /* let's make sure that sink is awaken */ | |
1544 | drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); | |
1545 | } | |
1546 | ||
daeb725e CW |
1547 | static void intel_psr_work(struct work_struct *work) |
1548 | { | |
1549 | struct drm_i915_private *dev_priv = | |
5422b37c | 1550 | container_of(work, typeof(*dev_priv), psr.work); |
daeb725e CW |
1551 | |
1552 | mutex_lock(&dev_priv->psr.lock); | |
1553 | ||
5422b37c RV |
1554 | if (!dev_priv->psr.enabled) |
1555 | goto unlock; | |
1556 | ||
183b8e67 JRS |
1557 | if (READ_ONCE(dev_priv->psr.irq_aux_error)) |
1558 | intel_psr_handle_irq(dev_priv); | |
1559 | ||
daeb725e CW |
1560 | /* |
1561 | * We have to make sure PSR is ready for re-enable | |
1562 | * otherwise it keeps disabled until next full enable/disable cycle. | |
1563 | * PSR might take some time to get fully disabled | |
1564 | * and be ready for re-enable. | |
1565 | */ | |
c43dbcbb | 1566 | if (!__psr_wait_for_idle_locked(dev_priv)) |
0bc12bcb RV |
1567 | goto unlock; |
1568 | ||
1569 | /* | |
1570 | * The delayed work can race with an invalidate hence we need to | |
1571 | * recheck. Since psr_flush first clears this and then reschedules we | |
1572 | * won't ever miss a flush when bailing out here. | |
1573 | */ | |
c12e0643 | 1574 | if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) |
0bc12bcb RV |
1575 | goto unlock; |
1576 | ||
c44301fc | 1577 | intel_psr_activate(dev_priv->psr.dp); |
0bc12bcb RV |
1578 | unlock: |
1579 | mutex_unlock(&dev_priv->psr.lock); | |
1580 | } | |
1581 | ||
b2b89f55 RV |
1582 | /** |
1583 | * intel_psr_invalidate - Invalidade PSR | |
5748b6a1 | 1584 | * @dev_priv: i915 device |
b2b89f55 | 1585 | * @frontbuffer_bits: frontbuffer plane tracking bits |
5baf63cc | 1586 | * @origin: which operation caused the invalidate |
b2b89f55 RV |
1587 | * |
1588 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
1589 | * with the software frontbuffer tracking. This function gets called every | |
1590 | * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be | |
1591 | * disabled if the frontbuffer mask contains a buffer relevant to PSR. | |
1592 | * | |
1593 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." | |
1594 | */ | |
5748b6a1 | 1595 | void intel_psr_invalidate(struct drm_i915_private *dev_priv, |
5baf63cc | 1596 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 1597 | { |
4371d896 | 1598 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
1599 | return; |
1600 | ||
ce3508fd | 1601 | if (origin == ORIGIN_FLIP) |
5baf63cc RV |
1602 | return; |
1603 | ||
0bc12bcb RV |
1604 | mutex_lock(&dev_priv->psr.lock); |
1605 | if (!dev_priv->psr.enabled) { | |
1606 | mutex_unlock(&dev_priv->psr.lock); | |
1607 | return; | |
1608 | } | |
1609 | ||
f0ad62a6 | 1610 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); |
0bc12bcb | 1611 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
ec76d629 DV |
1612 | |
1613 | if (frontbuffer_bits) | |
5748b6a1 | 1614 | intel_psr_exit(dev_priv); |
ec76d629 | 1615 | |
0bc12bcb RV |
1616 | mutex_unlock(&dev_priv->psr.lock); |
1617 | } | |
1618 | ||
1c4d821d AG |
1619 | /* |
1620 | * When we will be completely rely on PSR2 S/W tracking in future, | |
1621 | * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP | |
1622 | * event also therefore tgl_dc3co_flush() require to be changed | |
ceaaf530 | 1623 | * accordingly in future. |
1c4d821d AG |
1624 | */ |
1625 | static void | |
1626 | tgl_dc3co_flush(struct drm_i915_private *dev_priv, | |
1627 | unsigned int frontbuffer_bits, enum fb_op_origin origin) | |
1628 | { | |
1c4d821d AG |
1629 | mutex_lock(&dev_priv->psr.lock); |
1630 | ||
1631 | if (!dev_priv->psr.dc3co_enabled) | |
1632 | goto unlock; | |
1633 | ||
1634 | if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active) | |
1635 | goto unlock; | |
1636 | ||
1637 | /* | |
1638 | * At every frontbuffer flush flip event modified delay of delayed work, | |
1639 | * when delayed work schedules that means display has been idle. | |
1640 | */ | |
1641 | if (!(frontbuffer_bits & | |
1642 | INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe))) | |
1643 | goto unlock; | |
1644 | ||
1645 | tgl_psr2_enable_dc3co(dev_priv); | |
ceaaf530 | 1646 | mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work, |
58c34c4c | 1647 | dev_priv->psr.dc3co_exit_delay); |
1c4d821d AG |
1648 | |
1649 | unlock: | |
1650 | mutex_unlock(&dev_priv->psr.lock); | |
1651 | } | |
1652 | ||
b2b89f55 RV |
1653 | /** |
1654 | * intel_psr_flush - Flush PSR | |
5748b6a1 | 1655 | * @dev_priv: i915 device |
b2b89f55 | 1656 | * @frontbuffer_bits: frontbuffer plane tracking bits |
169de131 | 1657 | * @origin: which operation caused the flush |
b2b89f55 RV |
1658 | * |
1659 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
1660 | * with the software frontbuffer tracking. This function gets called every | |
1661 | * time frontbuffer rendering has completed and flushed out to memory. PSR | |
1662 | * can be enabled again if no other frontbuffer relevant to PSR is dirty. | |
1663 | * | |
1664 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. | |
1665 | */ | |
5748b6a1 | 1666 | void intel_psr_flush(struct drm_i915_private *dev_priv, |
169de131 | 1667 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 1668 | { |
4371d896 | 1669 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
1670 | return; |
1671 | ||
1c4d821d AG |
1672 | if (origin == ORIGIN_FLIP) { |
1673 | tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin); | |
5baf63cc | 1674 | return; |
1c4d821d | 1675 | } |
5baf63cc | 1676 | |
0bc12bcb RV |
1677 | mutex_lock(&dev_priv->psr.lock); |
1678 | if (!dev_priv->psr.enabled) { | |
1679 | mutex_unlock(&dev_priv->psr.lock); | |
1680 | return; | |
1681 | } | |
1682 | ||
f0ad62a6 | 1683 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); |
0bc12bcb RV |
1684 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
1685 | ||
921ec285 | 1686 | /* By definition flush = invalidate + flush */ |
88e05aff JRS |
1687 | if (frontbuffer_bits) |
1688 | psr_force_hw_tracking_exit(dev_priv); | |
995d3047 | 1689 | |
0bc12bcb | 1690 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
5422b37c | 1691 | schedule_work(&dev_priv->psr.work); |
0bc12bcb RV |
1692 | mutex_unlock(&dev_priv->psr.lock); |
1693 | } | |
1694 | ||
b2b89f55 RV |
1695 | /** |
1696 | * intel_psr_init - Init basic PSR work and mutex. | |
93de056b | 1697 | * @dev_priv: i915 device private |
b2b89f55 RV |
1698 | * |
1699 | * This function is called only once at driver load to initialize basic | |
1700 | * PSR stuff. | |
1701 | */ | |
c39055b0 | 1702 | void intel_psr_init(struct drm_i915_private *dev_priv) |
0bc12bcb | 1703 | { |
0f328da6 RV |
1704 | if (!HAS_PSR(dev_priv)) |
1705 | return; | |
1706 | ||
c9ef291a DP |
1707 | if (!dev_priv->psr.sink_support) |
1708 | return; | |
1709 | ||
4ab4fa10 JRS |
1710 | if (IS_HASWELL(dev_priv)) |
1711 | /* | |
1712 | * HSW don't have PSR registers on the same space as transcoder | |
1713 | * so set this to a value that when subtract to the register | |
1714 | * in transcoder space results in the right offset for HSW | |
1715 | */ | |
1716 | dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; | |
1717 | ||
8a25c4be | 1718 | if (dev_priv->params.enable_psr == -1) |
598c6cfe | 1719 | if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) |
8a25c4be | 1720 | dev_priv->params.enable_psr = 0; |
d94d6e87 | 1721 | |
65f61b42 | 1722 | /* Set link_standby x link_off defaults */ |
8652744b | 1723 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
60e5ffe3 RV |
1724 | /* HSW and BDW require workarounds that we don't implement. */ |
1725 | dev_priv->psr.link_standby = false; | |
99d7a741 JRS |
1726 | else if (INTEL_GEN(dev_priv) < 12) |
1727 | /* For new platforms up to TGL let's respect VBT back again */ | |
60e5ffe3 RV |
1728 | dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; |
1729 | ||
5422b37c | 1730 | INIT_WORK(&dev_priv->psr.work, intel_psr_work); |
ceaaf530 | 1731 | INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work); |
0bc12bcb RV |
1732 | mutex_init(&dev_priv->psr.lock); |
1733 | } | |
cc3054ff | 1734 | |
95851205 JRS |
1735 | static int psr_get_status_and_error_status(struct intel_dp *intel_dp, |
1736 | u8 *status, u8 *error_status) | |
1737 | { | |
1738 | struct drm_dp_aux *aux = &intel_dp->aux; | |
1739 | int ret; | |
1740 | ||
1741 | ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); | |
1742 | if (ret != 1) | |
1743 | return ret; | |
1744 | ||
1745 | ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); | |
1746 | if (ret != 1) | |
1747 | return ret; | |
1748 | ||
1749 | *status = *status & DP_PSR_SINK_STATE_MASK; | |
1750 | ||
1751 | return 0; | |
1752 | } | |
1753 | ||
700355af JRS |
1754 | static void psr_alpm_check(struct intel_dp *intel_dp) |
1755 | { | |
1756 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1757 | struct drm_dp_aux *aux = &intel_dp->aux; | |
1758 | struct i915_psr *psr = &dev_priv->psr; | |
1759 | u8 val; | |
1760 | int r; | |
1761 | ||
1762 | if (!psr->psr2_enabled) | |
1763 | return; | |
1764 | ||
1765 | r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); | |
1766 | if (r != 1) { | |
6471bd74 | 1767 | drm_err(&dev_priv->drm, "Error reading ALPM status\n"); |
700355af JRS |
1768 | return; |
1769 | } | |
1770 | ||
1771 | if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { | |
1772 | intel_psr_disable_locked(intel_dp); | |
1773 | psr->sink_not_reliable = true; | |
6471bd74 WK |
1774 | drm_dbg_kms(&dev_priv->drm, |
1775 | "ALPM lock timeout error, disabling PSR\n"); | |
700355af JRS |
1776 | |
1777 | /* Clearing error */ | |
1778 | drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); | |
1779 | } | |
1780 | } | |
1781 | ||
ba0af30d JRS |
1782 | static void psr_capability_changed_check(struct intel_dp *intel_dp) |
1783 | { | |
1784 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1785 | struct i915_psr *psr = &dev_priv->psr; | |
1786 | u8 val; | |
1787 | int r; | |
1788 | ||
1789 | r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); | |
1790 | if (r != 1) { | |
6471bd74 | 1791 | drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n"); |
ba0af30d JRS |
1792 | return; |
1793 | } | |
1794 | ||
1795 | if (val & DP_PSR_CAPS_CHANGE) { | |
1796 | intel_psr_disable_locked(intel_dp); | |
1797 | psr->sink_not_reliable = true; | |
6471bd74 WK |
1798 | drm_dbg_kms(&dev_priv->drm, |
1799 | "Sink PSR capability changed, disabling PSR\n"); | |
ba0af30d JRS |
1800 | |
1801 | /* Clearing it */ | |
1802 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); | |
1803 | } | |
1804 | } | |
1805 | ||
cc3054ff JRS |
1806 | void intel_psr_short_pulse(struct intel_dp *intel_dp) |
1807 | { | |
1895759e | 1808 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
cc3054ff | 1809 | struct i915_psr *psr = &dev_priv->psr; |
95851205 | 1810 | u8 status, error_status; |
93bf76ed | 1811 | const u8 errors = DP_PSR_RFB_STORAGE_ERROR | |
00c8f194 JRS |
1812 | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | |
1813 | DP_PSR_LINK_CRC_ERROR; | |
cc3054ff JRS |
1814 | |
1815 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | |
1816 | return; | |
1817 | ||
1818 | mutex_lock(&psr->lock); | |
1819 | ||
c44301fc | 1820 | if (!psr->enabled || psr->dp != intel_dp) |
cc3054ff JRS |
1821 | goto exit; |
1822 | ||
95851205 | 1823 | if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { |
6471bd74 WK |
1824 | drm_err(&dev_priv->drm, |
1825 | "Error reading PSR status or error status\n"); | |
cc3054ff JRS |
1826 | goto exit; |
1827 | } | |
1828 | ||
95851205 | 1829 | if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { |
cc3054ff | 1830 | intel_psr_disable_locked(intel_dp); |
50a12d8f | 1831 | psr->sink_not_reliable = true; |
cc3054ff JRS |
1832 | } |
1833 | ||
95851205 | 1834 | if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) |
6471bd74 WK |
1835 | drm_dbg_kms(&dev_priv->drm, |
1836 | "PSR sink internal error, disabling PSR\n"); | |
95851205 | 1837 | if (error_status & DP_PSR_RFB_STORAGE_ERROR) |
6471bd74 WK |
1838 | drm_dbg_kms(&dev_priv->drm, |
1839 | "PSR RFB storage error, disabling PSR\n"); | |
95851205 | 1840 | if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) |
6471bd74 WK |
1841 | drm_dbg_kms(&dev_priv->drm, |
1842 | "PSR VSC SDP uncorrectable error, disabling PSR\n"); | |
95851205 | 1843 | if (error_status & DP_PSR_LINK_CRC_ERROR) |
6471bd74 WK |
1844 | drm_dbg_kms(&dev_priv->drm, |
1845 | "PSR Link CRC error, disabling PSR\n"); | |
93bf76ed | 1846 | |
95851205 | 1847 | if (error_status & ~errors) |
6471bd74 WK |
1848 | drm_err(&dev_priv->drm, |
1849 | "PSR_ERROR_STATUS unhandled errors %x\n", | |
1850 | error_status & ~errors); | |
93bf76ed | 1851 | /* clear status register */ |
95851205 | 1852 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); |
700355af JRS |
1853 | |
1854 | psr_alpm_check(intel_dp); | |
ba0af30d | 1855 | psr_capability_changed_check(intel_dp); |
700355af | 1856 | |
cc3054ff JRS |
1857 | exit: |
1858 | mutex_unlock(&psr->lock); | |
1859 | } | |
2f8e7ea9 JRS |
1860 | |
1861 | bool intel_psr_enabled(struct intel_dp *intel_dp) | |
1862 | { | |
1863 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | |
1864 | bool ret; | |
1865 | ||
1866 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | |
1867 | return false; | |
1868 | ||
1869 | mutex_lock(&dev_priv->psr.lock); | |
1870 | ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); | |
1871 | mutex_unlock(&dev_priv->psr.lock); | |
1872 | ||
1873 | return ret; | |
1874 | } |