]>
Commit | Line | Data |
---|---|---|
0bc12bcb RV |
1 | /* |
2 | * Copyright © 2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | */ | |
23 | ||
b2b89f55 RV |
24 | /** |
25 | * DOC: Panel Self Refresh (PSR/SRD) | |
26 | * | |
27 | * Since Haswell Display controller supports Panel Self-Refresh on display | |
28 | * panels witch have a remote frame buffer (RFB) implemented according to PSR | |
29 | * spec in eDP1.3. PSR feature allows the display to go to lower standby states | |
30 | * when system is idle but display is on as it eliminates display refresh | |
31 | * request to DDR memory completely as long as the frame buffer for that | |
32 | * display is unchanged. | |
33 | * | |
34 | * Panel Self Refresh must be supported by both Hardware (source) and | |
35 | * Panel (sink). | |
36 | * | |
37 | * PSR saves power by caching the framebuffer in the panel RFB, which allows us | |
38 | * to power down the link and memory controller. For DSI panels the same idea | |
39 | * is called "manual mode". | |
40 | * | |
41 | * The implementation uses the hardware-based PSR support which automatically | |
42 | * enters/exits self-refresh mode. The hardware takes care of sending the | |
43 | * required DP aux message and could even retrain the link (that part isn't | |
44 | * enabled yet though). The hardware also keeps track of any frontbuffer | |
45 | * changes to know when to exit self-refresh mode again. Unfortunately that | |
46 | * part doesn't work too well, hence why the i915 PSR support uses the | |
47 | * software frontbuffer tracking to make sure it doesn't miss a screen | |
48 | * update. For this integration intel_psr_invalidate() and intel_psr_flush() | |
49 | * get called by the frontbuffer tracking code. Note that because of locking | |
50 | * issues the self-refresh re-enable code is done from a work queue, which | |
51 | * must be correctly synchronized/cancelled when shutting down the pipe." | |
52 | */ | |
53 | ||
0bc12bcb RV |
54 | #include <drm/drmP.h> |
55 | ||
56 | #include "intel_drv.h" | |
57 | #include "i915_drv.h" | |
58 | ||
54fd3149 DP |
59 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) |
60 | { | |
61 | u32 debug_mask, mask; | |
62 | ||
54fd3149 DP |
63 | mask = EDP_PSR_ERROR(TRANSCODER_EDP); |
64 | debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | | |
65 | EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); | |
66 | ||
67 | if (INTEL_GEN(dev_priv) >= 8) { | |
68 | mask |= EDP_PSR_ERROR(TRANSCODER_A) | | |
69 | EDP_PSR_ERROR(TRANSCODER_B) | | |
70 | EDP_PSR_ERROR(TRANSCODER_C); | |
71 | ||
72 | debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) | | |
73 | EDP_PSR_PRE_ENTRY(TRANSCODER_A) | | |
74 | EDP_PSR_POST_EXIT(TRANSCODER_B) | | |
75 | EDP_PSR_PRE_ENTRY(TRANSCODER_B) | | |
76 | EDP_PSR_POST_EXIT(TRANSCODER_C) | | |
77 | EDP_PSR_PRE_ENTRY(TRANSCODER_C); | |
78 | } | |
79 | ||
80 | if (debug) | |
81 | mask |= debug_mask; | |
82 | ||
83 | WRITE_ONCE(dev_priv->psr.debug, debug); | |
84 | I915_WRITE(EDP_PSR_IMR, ~mask); | |
85 | } | |
86 | ||
bc18b4df JRS |
87 | static void psr_event_print(u32 val, bool psr2_enabled) |
88 | { | |
89 | DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val); | |
90 | if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) | |
91 | DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n"); | |
92 | if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) | |
93 | DRM_DEBUG_KMS("\tPSR2 disabled\n"); | |
94 | if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) | |
95 | DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n"); | |
96 | if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) | |
97 | DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n"); | |
98 | if (val & PSR_EVENT_GRAPHICS_RESET) | |
99 | DRM_DEBUG_KMS("\tGraphics reset\n"); | |
100 | if (val & PSR_EVENT_PCH_INTERRUPT) | |
101 | DRM_DEBUG_KMS("\tPCH interrupt\n"); | |
102 | if (val & PSR_EVENT_MEMORY_UP) | |
103 | DRM_DEBUG_KMS("\tMemory up\n"); | |
104 | if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) | |
105 | DRM_DEBUG_KMS("\tFront buffer modification\n"); | |
106 | if (val & PSR_EVENT_WD_TIMER_EXPIRE) | |
107 | DRM_DEBUG_KMS("\tPSR watchdog timer expired\n"); | |
108 | if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) | |
109 | DRM_DEBUG_KMS("\tPIPE registers updated\n"); | |
110 | if (val & PSR_EVENT_REGISTER_UPDATE) | |
111 | DRM_DEBUG_KMS("\tRegister updated\n"); | |
112 | if (val & PSR_EVENT_HDCP_ENABLE) | |
113 | DRM_DEBUG_KMS("\tHDCP enabled\n"); | |
114 | if (val & PSR_EVENT_KVMR_SESSION_ENABLE) | |
115 | DRM_DEBUG_KMS("\tKVMR session enabled\n"); | |
116 | if (val & PSR_EVENT_VBI_ENABLE) | |
117 | DRM_DEBUG_KMS("\tVBI enabled\n"); | |
118 | if (val & PSR_EVENT_LPSP_MODE_EXIT) | |
119 | DRM_DEBUG_KMS("\tLPSP mode exited\n"); | |
120 | if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) | |
121 | DRM_DEBUG_KMS("\tPSR disabled\n"); | |
122 | } | |
123 | ||
54fd3149 DP |
124 | void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) |
125 | { | |
126 | u32 transcoders = BIT(TRANSCODER_EDP); | |
127 | enum transcoder cpu_transcoder; | |
3f983e54 | 128 | ktime_t time_ns = ktime_get(); |
54fd3149 DP |
129 | |
130 | if (INTEL_GEN(dev_priv) >= 8) | |
131 | transcoders |= BIT(TRANSCODER_A) | | |
132 | BIT(TRANSCODER_B) | | |
133 | BIT(TRANSCODER_C); | |
134 | ||
135 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { | |
136 | /* FIXME: Exit PSR and link train manually when this happens. */ | |
137 | if (psr_iir & EDP_PSR_ERROR(cpu_transcoder)) | |
138 | DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n", | |
139 | transcoder_name(cpu_transcoder)); | |
140 | ||
3f983e54 DP |
141 | if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) { |
142 | dev_priv->psr.last_entry_attempt = time_ns; | |
54fd3149 DP |
143 | DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", |
144 | transcoder_name(cpu_transcoder)); | |
3f983e54 | 145 | } |
54fd3149 | 146 | |
3f983e54 DP |
147 | if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) { |
148 | dev_priv->psr.last_exit = time_ns; | |
54fd3149 DP |
149 | DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", |
150 | transcoder_name(cpu_transcoder)); | |
bc18b4df JRS |
151 | |
152 | if (INTEL_GEN(dev_priv) >= 9) { | |
153 | u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); | |
154 | bool psr2_enabled = dev_priv->psr.psr2_enabled; | |
155 | ||
156 | I915_WRITE(PSR_EVENT(cpu_transcoder), val); | |
157 | psr_event_print(val, psr2_enabled); | |
158 | } | |
3f983e54 | 159 | } |
54fd3149 DP |
160 | } |
161 | } | |
162 | ||
77fe36ff DP |
163 | static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) |
164 | { | |
165 | uint8_t dprx = 0; | |
166 | ||
167 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, | |
168 | &dprx) != 1) | |
169 | return false; | |
170 | return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; | |
171 | } | |
172 | ||
173 | static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) | |
174 | { | |
175 | uint8_t alpm_caps = 0; | |
176 | ||
177 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, | |
178 | &alpm_caps) != 1) | |
179 | return false; | |
180 | return alpm_caps & DP_ALPM_CAP; | |
181 | } | |
182 | ||
26e5378d JRS |
183 | static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) |
184 | { | |
264ff016 | 185 | u8 val = 8; /* assume the worst if we can't read the value */ |
26e5378d JRS |
186 | |
187 | if (drm_dp_dpcd_readb(&intel_dp->aux, | |
188 | DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) | |
189 | val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; | |
190 | else | |
264ff016 | 191 | DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); |
26e5378d JRS |
192 | return val; |
193 | } | |
194 | ||
77fe36ff DP |
195 | void intel_psr_init_dpcd(struct intel_dp *intel_dp) |
196 | { | |
197 | struct drm_i915_private *dev_priv = | |
198 | to_i915(dp_to_dig_port(intel_dp)->base.base.dev); | |
199 | ||
200 | drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, | |
201 | sizeof(intel_dp->psr_dpcd)); | |
202 | ||
8cf6da7e DP |
203 | if (!intel_dp->psr_dpcd[0]) |
204 | return; | |
8cf6da7e DP |
205 | DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", |
206 | intel_dp->psr_dpcd[0]); | |
84bb2916 DP |
207 | |
208 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { | |
209 | DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); | |
210 | return; | |
211 | } | |
8cf6da7e | 212 | dev_priv->psr.sink_support = true; |
a3db1428 DP |
213 | dev_priv->psr.sink_sync_latency = |
214 | intel_dp_get_sink_sync_latency(intel_dp); | |
77fe36ff DP |
215 | |
216 | if (INTEL_GEN(dev_priv) >= 9 && | |
aee3bac0 | 217 | (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { |
97c9de66 DP |
218 | bool y_req = intel_dp->psr_dpcd[1] & |
219 | DP_PSR2_SU_Y_COORDINATE_REQUIRED; | |
220 | bool alpm = intel_dp_get_alpm_status(intel_dp); | |
221 | ||
aee3bac0 JRS |
222 | /* |
223 | * All panels that supports PSR version 03h (PSR2 + | |
224 | * Y-coordinate) can handle Y-coordinates in VSC but we are | |
225 | * only sure that it is going to be used when required by the | |
226 | * panel. This way panel is capable to do selective update | |
227 | * without a aux frame sync. | |
228 | * | |
229 | * To support PSR version 02h and PSR version 03h without | |
230 | * Y-coordinate requirement panels we would need to enable | |
231 | * GTC first. | |
232 | */ | |
97c9de66 | 233 | dev_priv->psr.sink_psr2_support = y_req && alpm; |
8cf6da7e DP |
234 | DRM_DEBUG_KMS("PSR2 %ssupported\n", |
235 | dev_priv->psr.sink_psr2_support ? "" : "not "); | |
77fe36ff | 236 | |
95f28d2e | 237 | if (dev_priv->psr.sink_psr2_support) { |
77fe36ff DP |
238 | dev_priv->psr.colorimetry_support = |
239 | intel_dp_get_colorimetry_status(intel_dp); | |
77fe36ff DP |
240 | } |
241 | } | |
242 | } | |
243 | ||
cf5d862d RV |
244 | static void intel_psr_setup_vsc(struct intel_dp *intel_dp, |
245 | const struct intel_crtc_state *crtc_state) | |
474d1ec4 | 246 | { |
97da2ef4 | 247 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
d2419ffc VS |
248 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); |
249 | struct edp_vsc_psr psr_vsc; | |
474d1ec4 | 250 | |
95f28d2e | 251 | if (dev_priv->psr.psr2_enabled) { |
2ce4df87 RV |
252 | /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ |
253 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
254 | psr_vsc.sdp_header.HB0 = 0; | |
255 | psr_vsc.sdp_header.HB1 = 0x7; | |
aee3bac0 | 256 | if (dev_priv->psr.colorimetry_support) { |
2ce4df87 RV |
257 | psr_vsc.sdp_header.HB2 = 0x5; |
258 | psr_vsc.sdp_header.HB3 = 0x13; | |
aee3bac0 | 259 | } else { |
2ce4df87 RV |
260 | psr_vsc.sdp_header.HB2 = 0x4; |
261 | psr_vsc.sdp_header.HB3 = 0xe; | |
2ce4df87 | 262 | } |
97da2ef4 | 263 | } else { |
2ce4df87 RV |
264 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ |
265 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | |
266 | psr_vsc.sdp_header.HB0 = 0; | |
267 | psr_vsc.sdp_header.HB1 = 0x7; | |
268 | psr_vsc.sdp_header.HB2 = 0x2; | |
269 | psr_vsc.sdp_header.HB3 = 0x8; | |
97da2ef4 NV |
270 | } |
271 | ||
1d776538 VS |
272 | intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state, |
273 | DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); | |
474d1ec4 SJ |
274 | } |
275 | ||
b90eed08 | 276 | static void hsw_psr_setup_aux(struct intel_dp *intel_dp) |
0bc12bcb RV |
277 | { |
278 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
d544e918 DP |
279 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); |
280 | u32 aux_clock_divider, aux_ctl; | |
281 | int i; | |
0bc12bcb RV |
282 | static const uint8_t aux_msg[] = { |
283 | [0] = DP_AUX_NATIVE_WRITE << 4, | |
284 | [1] = DP_SET_POWER >> 8, | |
285 | [2] = DP_SET_POWER & 0xff, | |
286 | [3] = 1 - 1, | |
287 | [4] = DP_SET_POWER_D0, | |
288 | }; | |
d544e918 DP |
289 | u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | |
290 | EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | | |
291 | EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | | |
292 | EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; | |
0bc12bcb RV |
293 | |
294 | BUILD_BUG_ON(sizeof(aux_msg) > 20); | |
b90eed08 | 295 | for (i = 0; i < sizeof(aux_msg); i += 4) |
d544e918 | 296 | I915_WRITE(EDP_PSR_AUX_DATA(i >> 2), |
b90eed08 DP |
297 | intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); |
298 | ||
d544e918 DP |
299 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
300 | ||
301 | /* Start with bits set for DDI_AUX_CTL register */ | |
8a29c778 | 302 | aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), |
b90eed08 | 303 | aux_clock_divider); |
d544e918 DP |
304 | |
305 | /* Select only valid bits for SRD_AUX_CTL */ | |
306 | aux_ctl &= psr_aux_mask; | |
307 | I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); | |
b90eed08 DP |
308 | } |
309 | ||
cf5d862d | 310 | static void intel_psr_enable_sink(struct intel_dp *intel_dp) |
b90eed08 DP |
311 | { |
312 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
313 | struct drm_device *dev = dig_port->base.base.dev; | |
314 | struct drm_i915_private *dev_priv = to_i915(dev); | |
4df4925b | 315 | u8 dpcd_val = DP_PSR_ENABLE; |
b90eed08 | 316 | |
340c93c0 | 317 | /* Enable ALPM at sink for psr2 */ |
97c9de66 DP |
318 | if (dev_priv->psr.psr2_enabled) { |
319 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, | |
320 | DP_ALPM_ENABLE); | |
4df4925b | 321 | dpcd_val |= DP_PSR_ENABLE_PSR2; |
97c9de66 DP |
322 | } |
323 | ||
6f32ea7e | 324 | if (dev_priv->psr.link_standby) |
4df4925b | 325 | dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; |
00c8f194 JRS |
326 | if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8) |
327 | dpcd_val |= DP_PSR_CRC_VERIFICATION; | |
4df4925b | 328 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); |
6f32ea7e | 329 | |
d544e918 | 330 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); |
0bc12bcb RV |
331 | } |
332 | ||
ed63d24b | 333 | static void hsw_activate_psr1(struct intel_dp *intel_dp) |
0bc12bcb RV |
334 | { |
335 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
336 | struct drm_device *dev = dig_port->base.base.dev; | |
fac5e23e | 337 | struct drm_i915_private *dev_priv = to_i915(dev); |
a3db1428 DP |
338 | u32 max_sleep_time = 0x1f; |
339 | u32 val = EDP_PSR_ENABLE; | |
474d1ec4 | 340 | |
a3db1428 DP |
341 | /* Let's use 6 as the minimum to cover all known cases including the |
342 | * off-by-one issue that HW has in some cases. | |
d44b4dcb | 343 | */ |
a3db1428 | 344 | int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
50db1390 | 345 | |
a3db1428 DP |
346 | /* sink_sync_latency of 8 means source has to wait for more than 8 |
347 | * frames, we'll go with 9 frames for now | |
348 | */ | |
349 | idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); | |
50db1390 | 350 | val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; |
7370c68d | 351 | |
a3db1428 | 352 | val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; |
772c2a51 | 353 | if (IS_HASWELL(dev_priv)) |
7370c68d | 354 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
0bc12bcb | 355 | |
60e5ffe3 RV |
356 | if (dev_priv->psr.link_standby) |
357 | val |= EDP_PSR_LINK_STANDBY; | |
358 | ||
77312ae8 VN |
359 | if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) |
360 | val |= EDP_PSR_TP1_TIME_0us; | |
361 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) | |
50db1390 | 362 | val |= EDP_PSR_TP1_TIME_100us; |
77312ae8 VN |
363 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) |
364 | val |= EDP_PSR_TP1_TIME_500us; | |
50db1390 | 365 | else |
77312ae8 | 366 | val |= EDP_PSR_TP1_TIME_2500us; |
50db1390 | 367 | |
77312ae8 VN |
368 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) |
369 | val |= EDP_PSR_TP2_TP3_TIME_0us; | |
370 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) | |
50db1390 | 371 | val |= EDP_PSR_TP2_TP3_TIME_100us; |
77312ae8 VN |
372 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) |
373 | val |= EDP_PSR_TP2_TP3_TIME_500us; | |
50db1390 | 374 | else |
77312ae8 | 375 | val |= EDP_PSR_TP2_TP3_TIME_2500us; |
50db1390 DV |
376 | |
377 | if (intel_dp_source_supports_hbr2(intel_dp) && | |
378 | drm_dp_tps3_supported(intel_dp->dpcd)) | |
379 | val |= EDP_PSR_TP1_TP3_SEL; | |
380 | else | |
381 | val |= EDP_PSR_TP1_TP2_SEL; | |
382 | ||
00c8f194 JRS |
383 | if (INTEL_GEN(dev_priv) >= 8) |
384 | val |= EDP_PSR_CRC_ENABLE; | |
385 | ||
912d6412 | 386 | val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; |
50db1390 | 387 | I915_WRITE(EDP_PSR_CTL, val); |
3fcb0ca1 | 388 | } |
50db1390 | 389 | |
ed63d24b | 390 | static void hsw_activate_psr2(struct intel_dp *intel_dp) |
3fcb0ca1 NV |
391 | { |
392 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
393 | struct drm_device *dev = dig_port->base.base.dev; | |
394 | struct drm_i915_private *dev_priv = to_i915(dev); | |
a3db1428 DP |
395 | u32 val; |
396 | ||
397 | /* Let's use 6 as the minimum to cover all known cases including the | |
398 | * off-by-one issue that HW has in some cases. | |
3fcb0ca1 | 399 | */ |
a3db1428 DP |
400 | int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
401 | ||
402 | idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); | |
403 | val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; | |
50db1390 DV |
404 | |
405 | /* FIXME: selective update is probably totally broken because it doesn't | |
406 | * mesh at all with our frontbuffer tracking. And the hw alone isn't | |
407 | * good enough. */ | |
5e87325f | 408 | val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; |
2a34b005 JRS |
409 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) |
410 | val |= EDP_Y_COORDINATE_ENABLE; | |
977da084 | 411 | |
26e5378d | 412 | val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); |
50db1390 | 413 | |
77312ae8 VN |
414 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 && |
415 | dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50) | |
416 | val |= EDP_PSR2_TP2_TIME_50us; | |
417 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) | |
418 | val |= EDP_PSR2_TP2_TIME_100us; | |
419 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) | |
420 | val |= EDP_PSR2_TP2_TIME_500us; | |
50db1390 | 421 | else |
77312ae8 | 422 | val |= EDP_PSR2_TP2_TIME_2500us; |
474d1ec4 | 423 | |
50db1390 | 424 | I915_WRITE(EDP_PSR2_CTL, val); |
0bc12bcb RV |
425 | } |
426 | ||
c4932d79 RV |
427 | static bool intel_psr2_config_valid(struct intel_dp *intel_dp, |
428 | struct intel_crtc_state *crtc_state) | |
429 | { | |
430 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
431 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | |
c90c275c DP |
432 | int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay; |
433 | int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; | |
434 | int psr_max_h = 0, psr_max_v = 0; | |
c4932d79 RV |
435 | |
436 | /* | |
437 | * FIXME psr2_support is messed up. It's both computed | |
438 | * dynamically during PSR enable, and extracted from sink | |
439 | * caps during eDP detection. | |
440 | */ | |
95f28d2e | 441 | if (!dev_priv->psr.sink_psr2_support) |
c4932d79 RV |
442 | return false; |
443 | ||
c90c275c DP |
444 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { |
445 | psr_max_h = 4096; | |
446 | psr_max_v = 2304; | |
447 | } else if (IS_GEN9(dev_priv)) { | |
448 | psr_max_h = 3640; | |
449 | psr_max_v = 2304; | |
450 | } | |
451 | ||
452 | if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { | |
453 | DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", | |
454 | crtc_hdisplay, crtc_vdisplay, | |
455 | psr_max_h, psr_max_v); | |
c4932d79 RV |
456 | return false; |
457 | } | |
458 | ||
c4932d79 RV |
459 | return true; |
460 | } | |
461 | ||
4d90f2d5 VS |
462 | void intel_psr_compute_config(struct intel_dp *intel_dp, |
463 | struct intel_crtc_state *crtc_state) | |
0bc12bcb RV |
464 | { |
465 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
4d90f2d5 | 466 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); |
dfd2e9ab | 467 | const struct drm_display_mode *adjusted_mode = |
4d90f2d5 | 468 | &crtc_state->base.adjusted_mode; |
dfd2e9ab | 469 | int psr_setup_time; |
0bc12bcb | 470 | |
4371d896 | 471 | if (!CAN_PSR(dev_priv)) |
4d90f2d5 VS |
472 | return; |
473 | ||
474 | if (!i915_modparams.enable_psr) { | |
475 | DRM_DEBUG_KMS("PSR disable by flag\n"); | |
476 | return; | |
477 | } | |
0bc12bcb | 478 | |
dc9b5a0c RV |
479 | /* |
480 | * HSW spec explicitly says PSR is tied to port A. | |
481 | * BDW+ platforms with DDI implementation of PSR have different | |
482 | * PSR registers per transcoder and we only implement transcoder EDP | |
483 | * ones. Since by Display design transcoder EDP is tied to port A | |
484 | * we can safely escape based on the port A. | |
485 | */ | |
ce3508fd | 486 | if (dig_port->base.port != PORT_A) { |
dc9b5a0c | 487 | DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); |
4d90f2d5 | 488 | return; |
0bc12bcb RV |
489 | } |
490 | ||
772c2a51 | 491 | if (IS_HASWELL(dev_priv) && |
4d90f2d5 | 492 | I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) & |
c8e68b7e | 493 | S3D_ENABLE) { |
0bc12bcb | 494 | DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); |
4d90f2d5 | 495 | return; |
0bc12bcb RV |
496 | } |
497 | ||
772c2a51 | 498 | if (IS_HASWELL(dev_priv) && |
dfd2e9ab | 499 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
0bc12bcb | 500 | DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); |
4d90f2d5 | 501 | return; |
0bc12bcb RV |
502 | } |
503 | ||
dfd2e9ab VS |
504 | psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); |
505 | if (psr_setup_time < 0) { | |
506 | DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", | |
507 | intel_dp->psr_dpcd[1]); | |
4d90f2d5 | 508 | return; |
dfd2e9ab VS |
509 | } |
510 | ||
511 | if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > | |
512 | adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { | |
513 | DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", | |
514 | psr_setup_time); | |
4d90f2d5 VS |
515 | return; |
516 | } | |
517 | ||
4d90f2d5 | 518 | crtc_state->has_psr = true; |
c4932d79 RV |
519 | crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); |
520 | DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); | |
0bc12bcb RV |
521 | } |
522 | ||
e2bbc343 | 523 | static void intel_psr_activate(struct intel_dp *intel_dp) |
0bc12bcb RV |
524 | { |
525 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
526 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
fac5e23e | 527 | struct drm_i915_private *dev_priv = to_i915(dev); |
0bc12bcb | 528 | |
bcc233b2 | 529 | if (INTEL_GEN(dev_priv) >= 9) |
3fcb0ca1 | 530 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); |
bcc233b2 | 531 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); |
0bc12bcb RV |
532 | WARN_ON(dev_priv->psr.active); |
533 | lockdep_assert_held(&dev_priv->psr.lock); | |
534 | ||
cf5d862d RV |
535 | /* psr1 and psr2 are mutually exclusive.*/ |
536 | if (dev_priv->psr.psr2_enabled) | |
537 | hsw_activate_psr2(intel_dp); | |
538 | else | |
539 | hsw_activate_psr1(intel_dp); | |
540 | ||
0bc12bcb RV |
541 | dev_priv->psr.active = true; |
542 | } | |
543 | ||
cf5d862d RV |
544 | static void intel_psr_enable_source(struct intel_dp *intel_dp, |
545 | const struct intel_crtc_state *crtc_state) | |
4d1fa22f RV |
546 | { |
547 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | |
548 | struct drm_device *dev = dig_port->base.base.dev; | |
549 | struct drm_i915_private *dev_priv = to_i915(dev); | |
550 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | |
4d1fa22f | 551 | |
d544e918 DP |
552 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ |
553 | * use hardcoded values PSR AUX transactions | |
554 | */ | |
555 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
556 | hsw_psr_setup_aux(intel_dp); | |
557 | ||
95f28d2e | 558 | if (dev_priv->psr.psr2_enabled) { |
5e87325f JRS |
559 | u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder)); |
560 | ||
561 | if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) | |
562 | chicken |= (PSR2_VSC_ENABLE_PROG_HEADER | |
563 | | PSR2_ADD_VERTICAL_LINE_COUNT); | |
564 | ||
565 | else | |
566 | chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; | |
4d1fa22f RV |
567 | I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); |
568 | ||
861023e0 | 569 | I915_WRITE(EDP_PSR_DEBUG, |
4d1fa22f RV |
570 | EDP_PSR_DEBUG_MASK_MEMUP | |
571 | EDP_PSR_DEBUG_MASK_HPD | | |
572 | EDP_PSR_DEBUG_MASK_LPSP | | |
573 | EDP_PSR_DEBUG_MASK_MAX_SLEEP | | |
574 | EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); | |
575 | } else { | |
576 | /* | |
577 | * Per Spec: Avoid continuous PSR exit by masking MEMUP | |
578 | * and HPD. also mask LPSP to avoid dependency on other | |
579 | * drivers that might block runtime_pm besides | |
580 | * preventing other hw tracking issues now we can rely | |
581 | * on frontbuffer tracking. | |
582 | */ | |
861023e0 | 583 | I915_WRITE(EDP_PSR_DEBUG, |
4d1fa22f RV |
584 | EDP_PSR_DEBUG_MASK_MEMUP | |
585 | EDP_PSR_DEBUG_MASK_HPD | | |
75cbec03 | 586 | EDP_PSR_DEBUG_MASK_LPSP | |
3ebe3df5 JRS |
587 | EDP_PSR_DEBUG_MASK_DISP_REG_WRITE | |
588 | EDP_PSR_DEBUG_MASK_MAX_SLEEP); | |
4d1fa22f RV |
589 | } |
590 | } | |
591 | ||
b2b89f55 RV |
592 | /** |
593 | * intel_psr_enable - Enable PSR | |
594 | * @intel_dp: Intel DP | |
d2419ffc | 595 | * @crtc_state: new CRTC state |
b2b89f55 RV |
596 | * |
597 | * This function can only be called after the pipe is fully trained and enabled. | |
598 | */ | |
d2419ffc VS |
599 | void intel_psr_enable(struct intel_dp *intel_dp, |
600 | const struct intel_crtc_state *crtc_state) | |
0bc12bcb RV |
601 | { |
602 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
603 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
fac5e23e | 604 | struct drm_i915_private *dev_priv = to_i915(dev); |
0bc12bcb | 605 | |
4d90f2d5 | 606 | if (!crtc_state->has_psr) |
0bc12bcb | 607 | return; |
0bc12bcb | 608 | |
c9ef291a DP |
609 | if (WARN_ON(!CAN_PSR(dev_priv))) |
610 | return; | |
611 | ||
da83ef85 | 612 | WARN_ON(dev_priv->drrs.dp); |
0bc12bcb RV |
613 | mutex_lock(&dev_priv->psr.lock); |
614 | if (dev_priv->psr.enabled) { | |
615 | DRM_DEBUG_KMS("PSR already in use\n"); | |
616 | goto unlock; | |
617 | } | |
618 | ||
95f28d2e | 619 | dev_priv->psr.psr2_enabled = crtc_state->has_psr2; |
0bc12bcb RV |
620 | dev_priv->psr.busy_frontbuffer_bits = 0; |
621 | ||
cf5d862d RV |
622 | intel_psr_setup_vsc(intel_dp, crtc_state); |
623 | intel_psr_enable_sink(intel_dp); | |
624 | intel_psr_enable_source(intel_dp, crtc_state); | |
29d1efe0 RV |
625 | dev_priv->psr.enabled = intel_dp; |
626 | ||
5422b37c | 627 | intel_psr_activate(intel_dp); |
d0ac896a | 628 | |
0bc12bcb RV |
629 | unlock: |
630 | mutex_unlock(&dev_priv->psr.lock); | |
631 | } | |
632 | ||
cf5d862d | 633 | static void |
42f53ffc | 634 | intel_psr_disable_source(struct intel_dp *intel_dp) |
e2bbc343 RV |
635 | { |
636 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
637 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
fac5e23e | 638 | struct drm_i915_private *dev_priv = to_i915(dev); |
0bc12bcb RV |
639 | |
640 | if (dev_priv->psr.active) { | |
14c6547d | 641 | i915_reg_t psr_status; |
77affa31 CW |
642 | u32 psr_status_mask; |
643 | ||
95f28d2e | 644 | if (dev_priv->psr.psr2_enabled) { |
861023e0 | 645 | psr_status = EDP_PSR2_STATUS; |
77affa31 CW |
646 | psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; |
647 | ||
14c6547d DP |
648 | I915_WRITE(EDP_PSR2_CTL, |
649 | I915_READ(EDP_PSR2_CTL) & | |
77affa31 CW |
650 | ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE)); |
651 | ||
3fcb0ca1 | 652 | } else { |
861023e0 | 653 | psr_status = EDP_PSR_STATUS; |
77affa31 CW |
654 | psr_status_mask = EDP_PSR_STATUS_STATE_MASK; |
655 | ||
14c6547d DP |
656 | I915_WRITE(EDP_PSR_CTL, |
657 | I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); | |
3fcb0ca1 | 658 | } |
77affa31 CW |
659 | |
660 | /* Wait till PSR is idle */ | |
661 | if (intel_wait_for_register(dev_priv, | |
14c6547d | 662 | psr_status, psr_status_mask, 0, |
77affa31 CW |
663 | 2000)) |
664 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); | |
665 | ||
0bc12bcb RV |
666 | dev_priv->psr.active = false; |
667 | } else { | |
95f28d2e | 668 | if (dev_priv->psr.psr2_enabled) |
3fcb0ca1 NV |
669 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); |
670 | else | |
671 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); | |
0bc12bcb | 672 | } |
e2bbc343 RV |
673 | } |
674 | ||
cc3054ff JRS |
675 | static void intel_psr_disable_locked(struct intel_dp *intel_dp) |
676 | { | |
677 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
678 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
679 | struct drm_i915_private *dev_priv = to_i915(dev); | |
680 | ||
681 | lockdep_assert_held(&dev_priv->psr.lock); | |
682 | ||
683 | if (!dev_priv->psr.enabled) | |
684 | return; | |
685 | ||
686 | intel_psr_disable_source(intel_dp); | |
687 | ||
688 | /* Disable PSR on Sink */ | |
689 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); | |
690 | ||
691 | dev_priv->psr.enabled = NULL; | |
692 | } | |
693 | ||
e2bbc343 RV |
694 | /** |
695 | * intel_psr_disable - Disable PSR | |
696 | * @intel_dp: Intel DP | |
d2419ffc | 697 | * @old_crtc_state: old CRTC state |
e2bbc343 RV |
698 | * |
699 | * This function needs to be called before disabling pipe. | |
700 | */ | |
d2419ffc VS |
701 | void intel_psr_disable(struct intel_dp *intel_dp, |
702 | const struct intel_crtc_state *old_crtc_state) | |
e2bbc343 RV |
703 | { |
704 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
705 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
fac5e23e | 706 | struct drm_i915_private *dev_priv = to_i915(dev); |
e2bbc343 | 707 | |
4d90f2d5 | 708 | if (!old_crtc_state->has_psr) |
0f328da6 RV |
709 | return; |
710 | ||
c9ef291a DP |
711 | if (WARN_ON(!CAN_PSR(dev_priv))) |
712 | return; | |
713 | ||
e2bbc343 | 714 | mutex_lock(&dev_priv->psr.lock); |
cc3054ff | 715 | intel_psr_disable_locked(intel_dp); |
0bc12bcb | 716 | mutex_unlock(&dev_priv->psr.lock); |
98fa2aec | 717 | cancel_work_sync(&dev_priv->psr.work); |
0bc12bcb RV |
718 | } |
719 | ||
c3d43361 | 720 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) |
c43dbcbb | 721 | { |
c3d43361 TV |
722 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); |
723 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | |
c43dbcbb TV |
724 | i915_reg_t reg; |
725 | u32 mask; | |
726 | ||
c3d43361 TV |
727 | if (!new_crtc_state->has_psr) |
728 | return 0; | |
729 | ||
c43dbcbb TV |
730 | /* |
731 | * The sole user right now is intel_pipe_update_start(), | |
732 | * which won't race with psr_enable/disable, which is | |
733 | * where psr2_enabled is written to. So, we don't need | |
734 | * to acquire the psr.lock. More importantly, we want the | |
735 | * latency inside intel_pipe_update_start() to be as low | |
736 | * as possible, so no need to acquire psr.lock when it is | |
737 | * not needed and will induce latencies in the atomic | |
738 | * update path. | |
739 | */ | |
740 | if (dev_priv->psr.psr2_enabled) { | |
741 | reg = EDP_PSR2_STATUS; | |
742 | mask = EDP_PSR2_STATUS_STATE_MASK; | |
743 | } else { | |
744 | reg = EDP_PSR_STATUS; | |
745 | mask = EDP_PSR_STATUS_STATE_MASK; | |
746 | } | |
747 | ||
748 | /* | |
749 | * Max time for PSR to idle = Inverse of the refresh rate + | |
750 | * 6 ms of exit training time + 1.5 ms of aux channel | |
751 | * handshake. 50 msec is defesive enough to cover everything. | |
752 | */ | |
753 | return intel_wait_for_register(dev_priv, reg, mask, | |
754 | EDP_PSR_STATUS_STATE_IDLE, 50); | |
755 | } | |
756 | ||
757 | static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) | |
0bc12bcb | 758 | { |
daeb725e CW |
759 | struct intel_dp *intel_dp; |
760 | i915_reg_t reg; | |
761 | u32 mask; | |
762 | int err; | |
763 | ||
764 | intel_dp = dev_priv->psr.enabled; | |
765 | if (!intel_dp) | |
766 | return false; | |
0bc12bcb | 767 | |
ce3508fd DP |
768 | if (dev_priv->psr.psr2_enabled) { |
769 | reg = EDP_PSR2_STATUS; | |
770 | mask = EDP_PSR2_STATUS_STATE_MASK; | |
995d3047 | 771 | } else { |
ce3508fd DP |
772 | reg = EDP_PSR_STATUS; |
773 | mask = EDP_PSR_STATUS_STATE_MASK; | |
0bc12bcb | 774 | } |
daeb725e CW |
775 | |
776 | mutex_unlock(&dev_priv->psr.lock); | |
777 | ||
778 | err = intel_wait_for_register(dev_priv, reg, mask, 0, 50); | |
779 | if (err) | |
780 | DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); | |
781 | ||
782 | /* After the unlocked wait, verify that PSR is still wanted! */ | |
0bc12bcb | 783 | mutex_lock(&dev_priv->psr.lock); |
daeb725e CW |
784 | return err == 0 && dev_priv->psr.enabled; |
785 | } | |
0bc12bcb | 786 | |
daeb725e CW |
787 | static void intel_psr_work(struct work_struct *work) |
788 | { | |
789 | struct drm_i915_private *dev_priv = | |
5422b37c | 790 | container_of(work, typeof(*dev_priv), psr.work); |
daeb725e CW |
791 | |
792 | mutex_lock(&dev_priv->psr.lock); | |
793 | ||
5422b37c RV |
794 | if (!dev_priv->psr.enabled) |
795 | goto unlock; | |
796 | ||
daeb725e CW |
797 | /* |
798 | * We have to make sure PSR is ready for re-enable | |
799 | * otherwise it keeps disabled until next full enable/disable cycle. | |
800 | * PSR might take some time to get fully disabled | |
801 | * and be ready for re-enable. | |
802 | */ | |
c43dbcbb | 803 | if (!__psr_wait_for_idle_locked(dev_priv)) |
0bc12bcb RV |
804 | goto unlock; |
805 | ||
806 | /* | |
807 | * The delayed work can race with an invalidate hence we need to | |
808 | * recheck. Since psr_flush first clears this and then reschedules we | |
809 | * won't ever miss a flush when bailing out here. | |
810 | */ | |
c12e0643 | 811 | if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) |
0bc12bcb RV |
812 | goto unlock; |
813 | ||
daeb725e | 814 | intel_psr_activate(dev_priv->psr.enabled); |
0bc12bcb RV |
815 | unlock: |
816 | mutex_unlock(&dev_priv->psr.lock); | |
817 | } | |
818 | ||
5748b6a1 | 819 | static void intel_psr_exit(struct drm_i915_private *dev_priv) |
0bc12bcb | 820 | { |
995d3047 | 821 | u32 val; |
0bc12bcb | 822 | |
995d3047 RV |
823 | if (!dev_priv->psr.active) |
824 | return; | |
825 | ||
ce3508fd DP |
826 | if (dev_priv->psr.psr2_enabled) { |
827 | val = I915_READ(EDP_PSR2_CTL); | |
828 | WARN_ON(!(val & EDP_PSR2_ENABLE)); | |
829 | I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); | |
995d3047 | 830 | } else { |
ce3508fd DP |
831 | val = I915_READ(EDP_PSR_CTL); |
832 | WARN_ON(!(val & EDP_PSR_ENABLE)); | |
833 | I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); | |
0bc12bcb | 834 | } |
995d3047 | 835 | dev_priv->psr.active = false; |
0bc12bcb RV |
836 | } |
837 | ||
b2b89f55 RV |
838 | /** |
839 | * intel_psr_invalidate - Invalidade PSR | |
5748b6a1 | 840 | * @dev_priv: i915 device |
b2b89f55 | 841 | * @frontbuffer_bits: frontbuffer plane tracking bits |
5baf63cc | 842 | * @origin: which operation caused the invalidate |
b2b89f55 RV |
843 | * |
844 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
845 | * with the software frontbuffer tracking. This function gets called every | |
846 | * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be | |
847 | * disabled if the frontbuffer mask contains a buffer relevant to PSR. | |
848 | * | |
849 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." | |
850 | */ | |
5748b6a1 | 851 | void intel_psr_invalidate(struct drm_i915_private *dev_priv, |
5baf63cc | 852 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 853 | { |
0bc12bcb RV |
854 | struct drm_crtc *crtc; |
855 | enum pipe pipe; | |
856 | ||
4371d896 | 857 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
858 | return; |
859 | ||
ce3508fd | 860 | if (origin == ORIGIN_FLIP) |
5baf63cc RV |
861 | return; |
862 | ||
0bc12bcb RV |
863 | mutex_lock(&dev_priv->psr.lock); |
864 | if (!dev_priv->psr.enabled) { | |
865 | mutex_unlock(&dev_priv->psr.lock); | |
866 | return; | |
867 | } | |
868 | ||
869 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | |
870 | pipe = to_intel_crtc(crtc)->pipe; | |
871 | ||
0bc12bcb | 872 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); |
0bc12bcb | 873 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
ec76d629 DV |
874 | |
875 | if (frontbuffer_bits) | |
5748b6a1 | 876 | intel_psr_exit(dev_priv); |
ec76d629 | 877 | |
0bc12bcb RV |
878 | mutex_unlock(&dev_priv->psr.lock); |
879 | } | |
880 | ||
b2b89f55 RV |
881 | /** |
882 | * intel_psr_flush - Flush PSR | |
5748b6a1 | 883 | * @dev_priv: i915 device |
b2b89f55 | 884 | * @frontbuffer_bits: frontbuffer plane tracking bits |
169de131 | 885 | * @origin: which operation caused the flush |
b2b89f55 RV |
886 | * |
887 | * Since the hardware frontbuffer tracking has gaps we need to integrate | |
888 | * with the software frontbuffer tracking. This function gets called every | |
889 | * time frontbuffer rendering has completed and flushed out to memory. PSR | |
890 | * can be enabled again if no other frontbuffer relevant to PSR is dirty. | |
891 | * | |
892 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. | |
893 | */ | |
5748b6a1 | 894 | void intel_psr_flush(struct drm_i915_private *dev_priv, |
169de131 | 895 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
0bc12bcb | 896 | { |
0bc12bcb RV |
897 | struct drm_crtc *crtc; |
898 | enum pipe pipe; | |
899 | ||
4371d896 | 900 | if (!CAN_PSR(dev_priv)) |
0f328da6 RV |
901 | return; |
902 | ||
ce3508fd | 903 | if (origin == ORIGIN_FLIP) |
5baf63cc RV |
904 | return; |
905 | ||
0bc12bcb RV |
906 | mutex_lock(&dev_priv->psr.lock); |
907 | if (!dev_priv->psr.enabled) { | |
908 | mutex_unlock(&dev_priv->psr.lock); | |
909 | return; | |
910 | } | |
911 | ||
912 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | |
913 | pipe = to_intel_crtc(crtc)->pipe; | |
ec76d629 DV |
914 | |
915 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); | |
0bc12bcb RV |
916 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
917 | ||
921ec285 | 918 | /* By definition flush = invalidate + flush */ |
caa1fd66 | 919 | if (frontbuffer_bits) { |
ce3508fd | 920 | if (dev_priv->psr.psr2_enabled) { |
caa1fd66 RV |
921 | intel_psr_exit(dev_priv); |
922 | } else { | |
923 | /* | |
924 | * Display WA #0884: all | |
925 | * This documented WA for bxt can be safely applied | |
926 | * broadly so we can force HW tracking to exit PSR | |
927 | * instead of disabling and re-enabling. | |
a8ada068 | 928 | * Workaround tells us to write 0 to CUR_SURFLIVE_A, |
caa1fd66 RV |
929 | * but it makes more sense write to the current active |
930 | * pipe. | |
931 | */ | |
a8ada068 | 932 | I915_WRITE(CURSURFLIVE(pipe), 0); |
caa1fd66 RV |
933 | } |
934 | } | |
995d3047 | 935 | |
0bc12bcb | 936 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
5422b37c | 937 | schedule_work(&dev_priv->psr.work); |
0bc12bcb RV |
938 | mutex_unlock(&dev_priv->psr.lock); |
939 | } | |
940 | ||
b2b89f55 RV |
941 | /** |
942 | * intel_psr_init - Init basic PSR work and mutex. | |
93de056b | 943 | * @dev_priv: i915 device private |
b2b89f55 RV |
944 | * |
945 | * This function is called only once at driver load to initialize basic | |
946 | * PSR stuff. | |
947 | */ | |
c39055b0 | 948 | void intel_psr_init(struct drm_i915_private *dev_priv) |
0bc12bcb | 949 | { |
0f328da6 RV |
950 | if (!HAS_PSR(dev_priv)) |
951 | return; | |
952 | ||
443a389f VS |
953 | dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? |
954 | HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; | |
955 | ||
c9ef291a DP |
956 | if (!dev_priv->psr.sink_support) |
957 | return; | |
958 | ||
2bdd045e DP |
959 | if (i915_modparams.enable_psr == -1) { |
960 | i915_modparams.enable_psr = dev_priv->vbt.psr.enable; | |
961 | ||
962 | /* Per platform default: all disabled. */ | |
4f044a88 | 963 | i915_modparams.enable_psr = 0; |
2bdd045e | 964 | } |
d94d6e87 | 965 | |
65f61b42 | 966 | /* Set link_standby x link_off defaults */ |
8652744b | 967 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
60e5ffe3 RV |
968 | /* HSW and BDW require workarounds that we don't implement. */ |
969 | dev_priv->psr.link_standby = false; | |
60e5ffe3 RV |
970 | else |
971 | /* For new platforms let's respect VBT back again */ | |
972 | dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; | |
973 | ||
5422b37c | 974 | INIT_WORK(&dev_priv->psr.work, intel_psr_work); |
0bc12bcb RV |
975 | mutex_init(&dev_priv->psr.lock); |
976 | } | |
cc3054ff JRS |
977 | |
978 | void intel_psr_short_pulse(struct intel_dp *intel_dp) | |
979 | { | |
980 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | |
981 | struct drm_device *dev = intel_dig_port->base.base.dev; | |
982 | struct drm_i915_private *dev_priv = to_i915(dev); | |
983 | struct i915_psr *psr = &dev_priv->psr; | |
984 | u8 val; | |
93bf76ed | 985 | const u8 errors = DP_PSR_RFB_STORAGE_ERROR | |
00c8f194 JRS |
986 | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | |
987 | DP_PSR_LINK_CRC_ERROR; | |
cc3054ff JRS |
988 | |
989 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | |
990 | return; | |
991 | ||
992 | mutex_lock(&psr->lock); | |
993 | ||
994 | if (psr->enabled != intel_dp) | |
995 | goto exit; | |
996 | ||
997 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { | |
998 | DRM_ERROR("PSR_STATUS dpcd read failed\n"); | |
999 | goto exit; | |
1000 | } | |
1001 | ||
1002 | if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { | |
1003 | DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); | |
1004 | intel_psr_disable_locked(intel_dp); | |
1005 | } | |
1006 | ||
93bf76ed JRS |
1007 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { |
1008 | DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n"); | |
1009 | goto exit; | |
1010 | } | |
1011 | ||
1012 | if (val & DP_PSR_RFB_STORAGE_ERROR) | |
1013 | DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); | |
1014 | if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) | |
1015 | DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); | |
00c8f194 JRS |
1016 | if (val & DP_PSR_LINK_CRC_ERROR) |
1017 | DRM_ERROR("PSR Link CRC error, disabling PSR\n"); | |
93bf76ed JRS |
1018 | |
1019 | if (val & ~errors) | |
1020 | DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", | |
1021 | val & ~errors); | |
1022 | if (val & errors) | |
1023 | intel_psr_disable_locked(intel_dp); | |
1024 | /* clear status register */ | |
1025 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); | |
1026 | ||
1027 | /* TODO: handle PSR2 errors */ | |
cc3054ff JRS |
1028 | exit: |
1029 | mutex_unlock(&psr->lock); | |
1030 | } |