]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/i915/intel_psr.c
drm/i915/psr: Tie PSR2 support to Y coordinate requirement
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / intel_psr.c
CommitLineData
0bc12bcb
RV
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
b2b89f55
RV
24/**
25 * DOC: Panel Self Refresh (PSR/SRD)
26 *
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
33 *
34 * Panel Self Refresh must be supported by both Hardware (source) and
35 * Panel (sink).
36 *
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
40 *
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
52 */
53
0bc12bcb
RV
54#include <drm/drmP.h>
55
56#include "intel_drv.h"
57#include "i915_drv.h"
58
b891d5e4
DP
59static inline enum intel_display_power_domain
60psr_aux_domain(struct intel_dp *intel_dp)
61{
62 /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
63 * However, for non-A AUX ports the corresponding non-EDP transcoders
64 * would have already enabled power well 2 and DC_OFF. This means we can
65 * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
66 * specific AUX_IO reference without powering up any extra wells.
67 * Note that PSR is enabled only on Port A even though this function
68 * returns the correct domain for other ports too.
69 */
70 return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
71 intel_dp->aux_power_domain;
72}
73
74static void psr_aux_io_power_get(struct intel_dp *intel_dp)
75{
76 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
77 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
78
79 if (INTEL_GEN(dev_priv) < 10)
80 return;
81
82 intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
83}
84
85static void psr_aux_io_power_put(struct intel_dp *intel_dp)
86{
87 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
88 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
89
90 if (INTEL_GEN(dev_priv) < 10)
91 return;
92
93 intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
94}
95
aee3bac0 96static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
77fe36ff
DP
97{
98 uint8_t psr_caps = 0;
99
100 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
101 return false;
102 return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
103}
104
105static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
106{
107 uint8_t dprx = 0;
108
109 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
110 &dprx) != 1)
111 return false;
112 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
113}
114
115static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
116{
117 uint8_t alpm_caps = 0;
118
119 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
120 &alpm_caps) != 1)
121 return false;
122 return alpm_caps & DP_ALPM_CAP;
123}
124
125void intel_psr_init_dpcd(struct intel_dp *intel_dp)
126{
127 struct drm_i915_private *dev_priv =
128 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
129
130 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
131 sizeof(intel_dp->psr_dpcd));
132
aee3bac0 133 if (intel_dp->psr_dpcd[0]) {
77fe36ff
DP
134 dev_priv->psr.sink_support = true;
135 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
136 }
137
138 if (INTEL_GEN(dev_priv) >= 9 &&
aee3bac0
JRS
139 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
140 /*
141 * All panels that supports PSR version 03h (PSR2 +
142 * Y-coordinate) can handle Y-coordinates in VSC but we are
143 * only sure that it is going to be used when required by the
144 * panel. This way panel is capable to do selective update
145 * without a aux frame sync.
146 *
147 * To support PSR version 02h and PSR version 03h without
148 * Y-coordinate requirement panels we would need to enable
149 * GTC first.
150 */
151 dev_priv->psr.psr2_support = intel_dp_get_y_coord_required(intel_dp);
77fe36ff
DP
152 DRM_DEBUG_KMS("PSR2 %s on sink",
153 dev_priv->psr.psr2_support ? "supported" : "not supported");
154
155 if (dev_priv->psr.psr2_support) {
77fe36ff
DP
156 dev_priv->psr.colorimetry_support =
157 intel_dp_get_colorimetry_status(intel_dp);
158 dev_priv->psr.alpm =
159 intel_dp_get_alpm_status(intel_dp);
160 }
161 }
162}
163
e2bbc343
RV
164static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
165{
fac5e23e 166 struct drm_i915_private *dev_priv = to_i915(dev);
e2bbc343
RV
167 uint32_t val;
168
169 val = I915_READ(VLV_PSRSTAT(pipe)) &
170 VLV_EDP_PSR_CURR_STATE_MASK;
171 return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
172 (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
173}
174
d2419ffc
VS
175static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
176 const struct intel_crtc_state *crtc_state)
e2bbc343 177{
d2419ffc
VS
178 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
179 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
e2bbc343
RV
180 uint32_t val;
181
182 /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
d2419ffc 183 val = I915_READ(VLV_VSCSDP(crtc->pipe));
e2bbc343
RV
184 val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
185 val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
d2419ffc 186 I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
e2bbc343
RV
187}
188
2ce4df87
RV
189static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
190 const struct intel_crtc_state *crtc_state)
474d1ec4 191{
97da2ef4 192 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
d2419ffc
VS
193 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
194 struct edp_vsc_psr psr_vsc;
474d1ec4 195
2ce4df87
RV
196 if (dev_priv->psr.psr2_support) {
197 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
198 memset(&psr_vsc, 0, sizeof(psr_vsc));
199 psr_vsc.sdp_header.HB0 = 0;
200 psr_vsc.sdp_header.HB1 = 0x7;
aee3bac0 201 if (dev_priv->psr.colorimetry_support) {
2ce4df87
RV
202 psr_vsc.sdp_header.HB2 = 0x5;
203 psr_vsc.sdp_header.HB3 = 0x13;
aee3bac0 204 } else {
2ce4df87
RV
205 psr_vsc.sdp_header.HB2 = 0x4;
206 psr_vsc.sdp_header.HB3 = 0xe;
2ce4df87 207 }
97da2ef4 208 } else {
2ce4df87
RV
209 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
210 memset(&psr_vsc, 0, sizeof(psr_vsc));
211 psr_vsc.sdp_header.HB0 = 0;
212 psr_vsc.sdp_header.HB1 = 0x7;
213 psr_vsc.sdp_header.HB2 = 0x2;
214 psr_vsc.sdp_header.HB3 = 0x8;
97da2ef4
NV
215 }
216
1d776538
VS
217 intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
218 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
474d1ec4
SJ
219}
220
e2bbc343
RV
221static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
222{
223 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
670b90d2 224 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
e2bbc343
RV
225}
226
b90eed08 227static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
0bc12bcb
RV
228{
229 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d544e918
DP
230 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
231 u32 aux_clock_divider, aux_ctl;
232 int i;
0bc12bcb
RV
233 static const uint8_t aux_msg[] = {
234 [0] = DP_AUX_NATIVE_WRITE << 4,
235 [1] = DP_SET_POWER >> 8,
236 [2] = DP_SET_POWER & 0xff,
237 [3] = 1 - 1,
238 [4] = DP_SET_POWER_D0,
239 };
d544e918
DP
240 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
241 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
242 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
243 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
0bc12bcb
RV
244
245 BUILD_BUG_ON(sizeof(aux_msg) > 20);
b90eed08 246 for (i = 0; i < sizeof(aux_msg); i += 4)
d544e918 247 I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
b90eed08
DP
248 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
249
d544e918
DP
250 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
251
252 /* Start with bits set for DDI_AUX_CTL register */
b90eed08
DP
253 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
254 aux_clock_divider);
d544e918
DP
255
256 /* Select only valid bits for SRD_AUX_CTL */
257 aux_ctl &= psr_aux_mask;
258 I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
b90eed08
DP
259}
260
261static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
262{
263 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
264 struct drm_device *dev = dig_port->base.base.dev;
265 struct drm_i915_private *dev_priv = to_i915(dev);
266
340c93c0
NV
267 /* Enable ALPM at sink for psr2 */
268 if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
269 drm_dp_dpcd_writeb(&intel_dp->aux,
270 DP_RECEIVER_ALPM_CONFIG,
271 DP_ALPM_ENABLE);
6f32ea7e
DV
272 if (dev_priv->psr.link_standby)
273 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
274 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
275 else
276 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
277 DP_PSR_ENABLE);
278
d544e918 279 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
0bc12bcb
RV
280}
281
d2419ffc
VS
282static void vlv_psr_enable_source(struct intel_dp *intel_dp,
283 const struct intel_crtc_state *crtc_state)
e2bbc343
RV
284{
285 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d2419ffc
VS
286 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
287 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
e2bbc343 288
0d0c2794 289 /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
d2419ffc 290 I915_WRITE(VLV_PSRCTL(crtc->pipe),
e2bbc343
RV
291 VLV_EDP_PSR_MODE_SW_TIMER |
292 VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
293 VLV_EDP_PSR_ENABLE);
294}
295
995d3047
RV
296static void vlv_psr_activate(struct intel_dp *intel_dp)
297{
298 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
299 struct drm_device *dev = dig_port->base.base.dev;
fac5e23e 300 struct drm_i915_private *dev_priv = to_i915(dev);
995d3047
RV
301 struct drm_crtc *crtc = dig_port->base.base.crtc;
302 enum pipe pipe = to_intel_crtc(crtc)->pipe;
303
0d0c2794
RV
304 /*
305 * Let's do the transition from PSR_state 1 (inactive) to
306 * PSR_state 2 (transition to active - static frame transmission).
307 * Then Hardware is responsible for the transition to
308 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
995d3047
RV
309 */
310 I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
311 VLV_EDP_PSR_ACTIVE_ENTRY);
312}
313
ed63d24b 314static void hsw_activate_psr1(struct intel_dp *intel_dp)
0bc12bcb
RV
315{
316 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
317 struct drm_device *dev = dig_port->base.base.dev;
fac5e23e 318 struct drm_i915_private *dev_priv = to_i915(dev);
474d1ec4 319
0bc12bcb 320 uint32_t max_sleep_time = 0x1f;
40918e0b
RV
321 /*
322 * Let's respect VBT in case VBT asks a higher idle_frame value.
323 * Let's use 6 as the minimum to cover all known cases including
324 * the off-by-one issue that HW has in some cases. Also there are
325 * cases where sink should be able to train
326 * with the 5 or 6 idle patterns.
d44b4dcb 327 */
40918e0b 328 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
50db1390
DV
329 uint32_t val = EDP_PSR_ENABLE;
330
331 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
332 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
7370c68d 333
772c2a51 334 if (IS_HASWELL(dev_priv))
7370c68d 335 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
0bc12bcb 336
60e5ffe3
RV
337 if (dev_priv->psr.link_standby)
338 val |= EDP_PSR_LINK_STANDBY;
339
50db1390
DV
340 if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
341 val |= EDP_PSR_TP1_TIME_2500us;
342 else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
343 val |= EDP_PSR_TP1_TIME_500us;
344 else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
345 val |= EDP_PSR_TP1_TIME_100us;
346 else
347 val |= EDP_PSR_TP1_TIME_0us;
348
349 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
350 val |= EDP_PSR_TP2_TP3_TIME_2500us;
351 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
352 val |= EDP_PSR_TP2_TP3_TIME_500us;
353 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
354 val |= EDP_PSR_TP2_TP3_TIME_100us;
355 else
356 val |= EDP_PSR_TP2_TP3_TIME_0us;
357
358 if (intel_dp_source_supports_hbr2(intel_dp) &&
359 drm_dp_tps3_supported(intel_dp->dpcd))
360 val |= EDP_PSR_TP1_TP3_SEL;
361 else
362 val |= EDP_PSR_TP1_TP2_SEL;
363
912d6412 364 val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
50db1390 365 I915_WRITE(EDP_PSR_CTL, val);
3fcb0ca1 366}
50db1390 367
ed63d24b 368static void hsw_activate_psr2(struct intel_dp *intel_dp)
3fcb0ca1
NV
369{
370 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
371 struct drm_device *dev = dig_port->base.base.dev;
372 struct drm_i915_private *dev_priv = to_i915(dev);
373 /*
374 * Let's respect VBT in case VBT asks a higher idle_frame value.
375 * Let's use 6 as the minimum to cover all known cases including
376 * the off-by-one issue that HW has in some cases. Also there are
377 * cases where sink should be able to train
378 * with the 5 or 6 idle patterns.
379 */
380 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
381 uint32_t val;
977da084 382 uint8_t sink_latency;
3fcb0ca1
NV
383
384 val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
50db1390
DV
385
386 /* FIXME: selective update is probably totally broken because it doesn't
387 * mesh at all with our frontbuffer tracking. And the hw alone isn't
388 * good enough. */
6433226b 389 val |= EDP_PSR2_ENABLE |
977da084 390 EDP_SU_TRACK_ENABLE;
391
392 if (drm_dp_dpcd_readb(&intel_dp->aux,
393 DP_SYNCHRONIZATION_LATENCY_IN_SINK,
394 &sink_latency) == 1) {
395 sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
396 } else {
397 sink_latency = 0;
398 }
399 val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
50db1390
DV
400
401 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
402 val |= EDP_PSR2_TP2_TIME_2500;
403 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
404 val |= EDP_PSR2_TP2_TIME_500;
405 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
406 val |= EDP_PSR2_TP2_TIME_100;
407 else
408 val |= EDP_PSR2_TP2_TIME_50;
474d1ec4 409
50db1390 410 I915_WRITE(EDP_PSR2_CTL, val);
0bc12bcb
RV
411}
412
ed63d24b 413static void hsw_psr_activate(struct intel_dp *intel_dp)
3fcb0ca1
NV
414{
415 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
416 struct drm_device *dev = dig_port->base.base.dev;
417 struct drm_i915_private *dev_priv = to_i915(dev);
418
ed63d24b
RV
419 /* On HSW+ after we enable PSR on source it will activate it
420 * as soon as it match configure idle_frame count. So
421 * we just actually enable it here on activation time.
422 */
423
3fcb0ca1
NV
424 /* psr1 and psr2 are mutually exclusive.*/
425 if (dev_priv->psr.psr2_support)
ed63d24b 426 hsw_activate_psr2(intel_dp);
3fcb0ca1 427 else
ed63d24b 428 hsw_activate_psr1(intel_dp);
3fcb0ca1
NV
429}
430
c4932d79
RV
431static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
432 struct intel_crtc_state *crtc_state)
433{
434 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
435 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
c90c275c
DP
436 int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
437 int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
438 int psr_max_h = 0, psr_max_v = 0;
c4932d79
RV
439
440 /*
441 * FIXME psr2_support is messed up. It's both computed
442 * dynamically during PSR enable, and extracted from sink
443 * caps during eDP detection.
444 */
445 if (!dev_priv->psr.psr2_support)
446 return false;
447
c90c275c
DP
448 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
449 psr_max_h = 4096;
450 psr_max_v = 2304;
451 } else if (IS_GEN9(dev_priv)) {
452 psr_max_h = 3640;
453 psr_max_v = 2304;
454 }
455
456 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
457 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
458 crtc_hdisplay, crtc_vdisplay,
459 psr_max_h, psr_max_v);
c4932d79
RV
460 return false;
461 }
462
c4932d79
RV
463 return true;
464}
465
4d90f2d5
VS
466void intel_psr_compute_config(struct intel_dp *intel_dp,
467 struct intel_crtc_state *crtc_state)
0bc12bcb
RV
468{
469 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4d90f2d5 470 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
dfd2e9ab 471 const struct drm_display_mode *adjusted_mode =
4d90f2d5 472 &crtc_state->base.adjusted_mode;
dfd2e9ab 473 int psr_setup_time;
0bc12bcb 474
4371d896 475 if (!CAN_PSR(dev_priv))
4d90f2d5
VS
476 return;
477
478 if (!i915_modparams.enable_psr) {
479 DRM_DEBUG_KMS("PSR disable by flag\n");
480 return;
481 }
0bc12bcb 482
dc9b5a0c
RV
483 /*
484 * HSW spec explicitly says PSR is tied to port A.
485 * BDW+ platforms with DDI implementation of PSR have different
486 * PSR registers per transcoder and we only implement transcoder EDP
487 * ones. Since by Display design transcoder EDP is tied to port A
488 * we can safely escape based on the port A.
489 */
8f4f2797 490 if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
dc9b5a0c 491 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
4d90f2d5 492 return;
0bc12bcb
RV
493 }
494
920a14b2 495 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
65f61b42
RV
496 !dev_priv->psr.link_standby) {
497 DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
4d90f2d5 498 return;
65f61b42
RV
499 }
500
772c2a51 501 if (IS_HASWELL(dev_priv) &&
4d90f2d5 502 I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
c8e68b7e 503 S3D_ENABLE) {
0bc12bcb 504 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
4d90f2d5 505 return;
0bc12bcb
RV
506 }
507
772c2a51 508 if (IS_HASWELL(dev_priv) &&
dfd2e9ab 509 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
0bc12bcb 510 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
4d90f2d5 511 return;
0bc12bcb
RV
512 }
513
dfd2e9ab
VS
514 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
515 if (psr_setup_time < 0) {
516 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
517 intel_dp->psr_dpcd[1]);
4d90f2d5 518 return;
dfd2e9ab
VS
519 }
520
521 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
522 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
523 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
524 psr_setup_time);
4d90f2d5
VS
525 return;
526 }
527
06d058e1
DP
528 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
529 DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
530 return;
531 }
532
4d90f2d5 533 crtc_state->has_psr = true;
c4932d79
RV
534 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
535 DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
0bc12bcb
RV
536}
537
e2bbc343 538static void intel_psr_activate(struct intel_dp *intel_dp)
0bc12bcb
RV
539{
540 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
541 struct drm_device *dev = intel_dig_port->base.base.dev;
fac5e23e 542 struct drm_i915_private *dev_priv = to_i915(dev);
0bc12bcb 543
3fcb0ca1
NV
544 if (dev_priv->psr.psr2_support)
545 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
546 else
547 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
0bc12bcb
RV
548 WARN_ON(dev_priv->psr.active);
549 lockdep_assert_held(&dev_priv->psr.lock);
550
e3702ac9 551 dev_priv->psr.activate(intel_dp);
0bc12bcb
RV
552 dev_priv->psr.active = true;
553}
554
4d1fa22f
RV
555static void hsw_psr_enable_source(struct intel_dp *intel_dp,
556 const struct intel_crtc_state *crtc_state)
557{
558 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
559 struct drm_device *dev = dig_port->base.base.dev;
560 struct drm_i915_private *dev_priv = to_i915(dev);
561 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4d1fa22f 562
b891d5e4
DP
563 psr_aux_io_power_get(intel_dp);
564
d544e918
DP
565 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
566 * use hardcoded values PSR AUX transactions
567 */
568 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
569 hsw_psr_setup_aux(intel_dp);
570
4d1fa22f 571 if (dev_priv->psr.psr2_support) {
aee3bac0
JRS
572 u32 chicken = PSR2_VSC_ENABLE_PROG_HEADER
573 | PSR2_ADD_VERTICAL_LINE_COUNT;
4d1fa22f
RV
574 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
575
861023e0 576 I915_WRITE(EDP_PSR_DEBUG,
4d1fa22f
RV
577 EDP_PSR_DEBUG_MASK_MEMUP |
578 EDP_PSR_DEBUG_MASK_HPD |
579 EDP_PSR_DEBUG_MASK_LPSP |
580 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
581 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
582 } else {
583 /*
584 * Per Spec: Avoid continuous PSR exit by masking MEMUP
585 * and HPD. also mask LPSP to avoid dependency on other
586 * drivers that might block runtime_pm besides
587 * preventing other hw tracking issues now we can rely
588 * on frontbuffer tracking.
589 */
861023e0 590 I915_WRITE(EDP_PSR_DEBUG,
4d1fa22f
RV
591 EDP_PSR_DEBUG_MASK_MEMUP |
592 EDP_PSR_DEBUG_MASK_HPD |
593 EDP_PSR_DEBUG_MASK_LPSP);
594 }
595}
596
b2b89f55
RV
597/**
598 * intel_psr_enable - Enable PSR
599 * @intel_dp: Intel DP
d2419ffc 600 * @crtc_state: new CRTC state
b2b89f55
RV
601 *
602 * This function can only be called after the pipe is fully trained and enabled.
603 */
d2419ffc
VS
604void intel_psr_enable(struct intel_dp *intel_dp,
605 const struct intel_crtc_state *crtc_state)
0bc12bcb
RV
606{
607 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
608 struct drm_device *dev = intel_dig_port->base.base.dev;
fac5e23e 609 struct drm_i915_private *dev_priv = to_i915(dev);
0bc12bcb 610
4d90f2d5 611 if (!crtc_state->has_psr)
0bc12bcb 612 return;
0bc12bcb 613
c9ef291a
DP
614 if (WARN_ON(!CAN_PSR(dev_priv)))
615 return;
616
da83ef85 617 WARN_ON(dev_priv->drrs.dp);
0bc12bcb
RV
618 mutex_lock(&dev_priv->psr.lock);
619 if (dev_priv->psr.enabled) {
620 DRM_DEBUG_KMS("PSR already in use\n");
621 goto unlock;
622 }
623
4d90f2d5 624 dev_priv->psr.psr2_support = crtc_state->has_psr2;
0bc12bcb
RV
625 dev_priv->psr.busy_frontbuffer_bits = 0;
626
2a5db87f 627 dev_priv->psr.setup_vsc(intel_dp, crtc_state);
49ad316f 628 dev_priv->psr.enable_sink(intel_dp);
d0d5e0d7 629 dev_priv->psr.enable_source(intel_dp, crtc_state);
29d1efe0
RV
630 dev_priv->psr.enabled = intel_dp;
631
632 if (INTEL_GEN(dev_priv) >= 9) {
633 intel_psr_activate(intel_dp);
634 } else {
635 /*
636 * FIXME: Activation should happen immediately since this
637 * function is just called after pipe is fully trained and
638 * enabled.
639 * However on some platforms we face issues when first
640 * activation follows a modeset so quickly.
641 * - On VLV/CHV we get bank screen on first activation
642 * - On HSW/BDW we get a recoverable frozen screen until
643 * next exit-activate sequence.
644 */
d0ac896a
RV
645 schedule_delayed_work(&dev_priv->psr.work,
646 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
29d1efe0 647 }
d0ac896a 648
0bc12bcb
RV
649unlock:
650 mutex_unlock(&dev_priv->psr.lock);
651}
652
d2419ffc
VS
653static void vlv_psr_disable(struct intel_dp *intel_dp,
654 const struct intel_crtc_state *old_crtc_state)
0bc12bcb
RV
655{
656 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
657 struct drm_device *dev = intel_dig_port->base.base.dev;
fac5e23e 658 struct drm_i915_private *dev_priv = to_i915(dev);
d2419ffc 659 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
e2bbc343 660 uint32_t val;
0bc12bcb 661
e2bbc343 662 if (dev_priv->psr.active) {
0d0c2794 663 /* Put VLV PSR back to PSR_state 0 (disabled). */
eb0241c1 664 if (intel_wait_for_register(dev_priv,
d2419ffc 665 VLV_PSRSTAT(crtc->pipe),
eb0241c1
CW
666 VLV_EDP_PSR_IN_TRANS,
667 0,
668 1))
e2bbc343
RV
669 WARN(1, "PSR transition took longer than expected\n");
670
d2419ffc 671 val = I915_READ(VLV_PSRCTL(crtc->pipe));
e2bbc343
RV
672 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
673 val &= ~VLV_EDP_PSR_ENABLE;
674 val &= ~VLV_EDP_PSR_MODE_MASK;
d2419ffc 675 I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
e2bbc343
RV
676
677 dev_priv->psr.active = false;
678 } else {
d2419ffc 679 WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
0bc12bcb 680 }
e2bbc343
RV
681}
682
d2419ffc
VS
683static void hsw_psr_disable(struct intel_dp *intel_dp,
684 const struct intel_crtc_state *old_crtc_state)
e2bbc343
RV
685{
686 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
687 struct drm_device *dev = intel_dig_port->base.base.dev;
fac5e23e 688 struct drm_i915_private *dev_priv = to_i915(dev);
0bc12bcb
RV
689
690 if (dev_priv->psr.active) {
14c6547d 691 i915_reg_t psr_status;
77affa31
CW
692 u32 psr_status_mask;
693
3fcb0ca1 694 if (dev_priv->psr.psr2_support) {
861023e0 695 psr_status = EDP_PSR2_STATUS;
77affa31
CW
696 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
697
14c6547d
DP
698 I915_WRITE(EDP_PSR2_CTL,
699 I915_READ(EDP_PSR2_CTL) &
77affa31
CW
700 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
701
3fcb0ca1 702 } else {
861023e0 703 psr_status = EDP_PSR_STATUS;
77affa31
CW
704 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
705
14c6547d
DP
706 I915_WRITE(EDP_PSR_CTL,
707 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
3fcb0ca1 708 }
77affa31
CW
709
710 /* Wait till PSR is idle */
711 if (intel_wait_for_register(dev_priv,
14c6547d 712 psr_status, psr_status_mask, 0,
77affa31
CW
713 2000))
714 DRM_ERROR("Timed out waiting for PSR Idle State\n");
715
0bc12bcb
RV
716 dev_priv->psr.active = false;
717 } else {
3fcb0ca1
NV
718 if (dev_priv->psr.psr2_support)
719 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
720 else
721 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
0bc12bcb 722 }
b891d5e4
DP
723
724 psr_aux_io_power_put(intel_dp);
e2bbc343
RV
725}
726
727/**
728 * intel_psr_disable - Disable PSR
729 * @intel_dp: Intel DP
d2419ffc 730 * @old_crtc_state: old CRTC state
e2bbc343
RV
731 *
732 * This function needs to be called before disabling pipe.
733 */
d2419ffc
VS
734void intel_psr_disable(struct intel_dp *intel_dp,
735 const struct intel_crtc_state *old_crtc_state)
e2bbc343
RV
736{
737 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
738 struct drm_device *dev = intel_dig_port->base.base.dev;
fac5e23e 739 struct drm_i915_private *dev_priv = to_i915(dev);
e2bbc343 740
4d90f2d5 741 if (!old_crtc_state->has_psr)
0f328da6
RV
742 return;
743
c9ef291a
DP
744 if (WARN_ON(!CAN_PSR(dev_priv)))
745 return;
746
e2bbc343
RV
747 mutex_lock(&dev_priv->psr.lock);
748 if (!dev_priv->psr.enabled) {
749 mutex_unlock(&dev_priv->psr.lock);
750 return;
751 }
752
424644c2 753 dev_priv->psr.disable_source(intel_dp, old_crtc_state);
0bc12bcb 754
b6e4d534
RV
755 /* Disable PSR on Sink */
756 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
757
0bc12bcb
RV
758 dev_priv->psr.enabled = NULL;
759 mutex_unlock(&dev_priv->psr.lock);
760
761 cancel_delayed_work_sync(&dev_priv->psr.work);
762}
763
764static void intel_psr_work(struct work_struct *work)
765{
766 struct drm_i915_private *dev_priv =
767 container_of(work, typeof(*dev_priv), psr.work.work);
768 struct intel_dp *intel_dp = dev_priv->psr.enabled;
995d3047
RV
769 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
770 enum pipe pipe = to_intel_crtc(crtc)->pipe;
0bc12bcb
RV
771
772 /* We have to make sure PSR is ready for re-enable
773 * otherwise it keeps disabled until next full enable/disable cycle.
774 * PSR might take some time to get fully disabled
775 * and be ready for re-enable.
776 */
2d1fe073 777 if (HAS_DDI(dev_priv)) {
3fcb0ca1
NV
778 if (dev_priv->psr.psr2_support) {
779 if (intel_wait_for_register(dev_priv,
861023e0
DP
780 EDP_PSR2_STATUS,
781 EDP_PSR2_STATUS_STATE_MASK,
782 0,
783 50)) {
3fcb0ca1
NV
784 DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
785 return;
786 }
787 } else {
788 if (intel_wait_for_register(dev_priv,
861023e0
DP
789 EDP_PSR_STATUS,
790 EDP_PSR_STATUS_STATE_MASK,
791 0,
792 50)) {
3fcb0ca1
NV
793 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
794 return;
795 }
995d3047
RV
796 }
797 } else {
12bb6319
CW
798 if (intel_wait_for_register(dev_priv,
799 VLV_PSRSTAT(pipe),
800 VLV_EDP_PSR_IN_TRANS,
801 0,
802 1)) {
995d3047
RV
803 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
804 return;
805 }
0bc12bcb 806 }
0bc12bcb
RV
807 mutex_lock(&dev_priv->psr.lock);
808 intel_dp = dev_priv->psr.enabled;
809
810 if (!intel_dp)
811 goto unlock;
812
813 /*
814 * The delayed work can race with an invalidate hence we need to
815 * recheck. Since psr_flush first clears this and then reschedules we
816 * won't ever miss a flush when bailing out here.
817 */
818 if (dev_priv->psr.busy_frontbuffer_bits)
819 goto unlock;
820
e2bbc343 821 intel_psr_activate(intel_dp);
0bc12bcb
RV
822unlock:
823 mutex_unlock(&dev_priv->psr.lock);
824}
825
5748b6a1 826static void intel_psr_exit(struct drm_i915_private *dev_priv)
0bc12bcb 827{
995d3047
RV
828 struct intel_dp *intel_dp = dev_priv->psr.enabled;
829 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
830 enum pipe pipe = to_intel_crtc(crtc)->pipe;
831 u32 val;
0bc12bcb 832
995d3047
RV
833 if (!dev_priv->psr.active)
834 return;
835
5748b6a1 836 if (HAS_DDI(dev_priv)) {
3fcb0ca1
NV
837 if (dev_priv->psr.psr2_support) {
838 val = I915_READ(EDP_PSR2_CTL);
839 WARN_ON(!(val & EDP_PSR2_ENABLE));
840 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
841 } else {
842 val = I915_READ(EDP_PSR_CTL);
843 WARN_ON(!(val & EDP_PSR_ENABLE));
844 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
845 }
995d3047
RV
846 } else {
847 val = I915_READ(VLV_PSRCTL(pipe));
848
0d0c2794
RV
849 /*
850 * Here we do the transition drirectly from
851 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
852 * PSR_state 5 (exit).
853 * PSR State 4 (active with single frame update) can be skipped.
854 * On PSR_state 5 (exit) Hardware is responsible to transition
855 * back to PSR_state 1 (inactive).
856 * Now we are at Same state after vlv_psr_enable_source.
995d3047
RV
857 */
858 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
859 I915_WRITE(VLV_PSRCTL(pipe), val);
860
0d0c2794
RV
861 /*
862 * Send AUX wake up - Spec says after transitioning to PSR
995d3047
RV
863 * active we have to send AUX wake up by writing 01h in DPCD
864 * 600h of sink device.
865 * XXX: This might slow down the transition, but without this
866 * HW doesn't complete the transition to PSR_state 1 and we
867 * never get the screen updated.
868 */
869 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
870 DP_SET_POWER_D0);
0bc12bcb
RV
871 }
872
995d3047 873 dev_priv->psr.active = false;
0bc12bcb
RV
874}
875
c7240c3b
RV
876/**
877 * intel_psr_single_frame_update - Single Frame Update
5748b6a1 878 * @dev_priv: i915 device
20c8838b 879 * @frontbuffer_bits: frontbuffer plane tracking bits
c7240c3b
RV
880 *
881 * Some platforms support a single frame update feature that is used to
882 * send and update only one frame on Remote Frame Buffer.
883 * So far it is only implemented for Valleyview and Cherryview because
884 * hardware requires this to be done before a page flip.
885 */
5748b6a1 886void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
20c8838b 887 unsigned frontbuffer_bits)
c7240c3b 888{
c7240c3b
RV
889 struct drm_crtc *crtc;
890 enum pipe pipe;
891 u32 val;
892
4371d896 893 if (!CAN_PSR(dev_priv))
0f328da6
RV
894 return;
895
c7240c3b
RV
896 /*
897 * Single frame update is already supported on BDW+ but it requires
898 * many W/A and it isn't really needed.
899 */
5748b6a1 900 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
c7240c3b
RV
901 return;
902
903 mutex_lock(&dev_priv->psr.lock);
904 if (!dev_priv->psr.enabled) {
905 mutex_unlock(&dev_priv->psr.lock);
906 return;
907 }
908
909 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
910 pipe = to_intel_crtc(crtc)->pipe;
c7240c3b 911
20c8838b
DV
912 if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
913 val = I915_READ(VLV_PSRCTL(pipe));
c7240c3b 914
20c8838b
DV
915 /*
916 * We need to set this bit before writing registers for a flip.
917 * This bit will be self-clear when it gets to the PSR active state.
918 */
919 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
920 }
c7240c3b
RV
921 mutex_unlock(&dev_priv->psr.lock);
922}
923
b2b89f55
RV
924/**
925 * intel_psr_invalidate - Invalidade PSR
5748b6a1 926 * @dev_priv: i915 device
b2b89f55 927 * @frontbuffer_bits: frontbuffer plane tracking bits
5baf63cc 928 * @origin: which operation caused the invalidate
b2b89f55
RV
929 *
930 * Since the hardware frontbuffer tracking has gaps we need to integrate
931 * with the software frontbuffer tracking. This function gets called every
932 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
933 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
934 *
935 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
936 */
5748b6a1 937void intel_psr_invalidate(struct drm_i915_private *dev_priv,
5baf63cc 938 unsigned frontbuffer_bits, enum fb_op_origin origin)
0bc12bcb 939{
0bc12bcb
RV
940 struct drm_crtc *crtc;
941 enum pipe pipe;
942
4371d896 943 if (!CAN_PSR(dev_priv))
0f328da6
RV
944 return;
945
5baf63cc
RV
946 if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
947 return;
948
0bc12bcb
RV
949 mutex_lock(&dev_priv->psr.lock);
950 if (!dev_priv->psr.enabled) {
951 mutex_unlock(&dev_priv->psr.lock);
952 return;
953 }
954
955 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
956 pipe = to_intel_crtc(crtc)->pipe;
957
0bc12bcb 958 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
0bc12bcb 959 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
ec76d629
DV
960
961 if (frontbuffer_bits)
5748b6a1 962 intel_psr_exit(dev_priv);
ec76d629 963
0bc12bcb
RV
964 mutex_unlock(&dev_priv->psr.lock);
965}
966
b2b89f55
RV
967/**
968 * intel_psr_flush - Flush PSR
5748b6a1 969 * @dev_priv: i915 device
b2b89f55 970 * @frontbuffer_bits: frontbuffer plane tracking bits
169de131 971 * @origin: which operation caused the flush
b2b89f55
RV
972 *
973 * Since the hardware frontbuffer tracking has gaps we need to integrate
974 * with the software frontbuffer tracking. This function gets called every
975 * time frontbuffer rendering has completed and flushed out to memory. PSR
976 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
977 *
978 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
979 */
5748b6a1 980void intel_psr_flush(struct drm_i915_private *dev_priv,
169de131 981 unsigned frontbuffer_bits, enum fb_op_origin origin)
0bc12bcb 982{
0bc12bcb
RV
983 struct drm_crtc *crtc;
984 enum pipe pipe;
985
4371d896 986 if (!CAN_PSR(dev_priv))
0f328da6
RV
987 return;
988
5baf63cc
RV
989 if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
990 return;
991
0bc12bcb
RV
992 mutex_lock(&dev_priv->psr.lock);
993 if (!dev_priv->psr.enabled) {
994 mutex_unlock(&dev_priv->psr.lock);
995 return;
996 }
997
998 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
999 pipe = to_intel_crtc(crtc)->pipe;
ec76d629
DV
1000
1001 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
0bc12bcb
RV
1002 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1003
921ec285 1004 /* By definition flush = invalidate + flush */
caa1fd66
RV
1005 if (frontbuffer_bits) {
1006 if (dev_priv->psr.psr2_support ||
1007 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1008 intel_psr_exit(dev_priv);
1009 } else {
1010 /*
1011 * Display WA #0884: all
1012 * This documented WA for bxt can be safely applied
1013 * broadly so we can force HW tracking to exit PSR
1014 * instead of disabling and re-enabling.
a8ada068 1015 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
caa1fd66
RV
1016 * but it makes more sense write to the current active
1017 * pipe.
1018 */
a8ada068 1019 I915_WRITE(CURSURFLIVE(pipe), 0);
caa1fd66
RV
1020 }
1021 }
995d3047 1022
0bc12bcb 1023 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
d0ac896a
RV
1024 if (!work_busy(&dev_priv->psr.work.work))
1025 schedule_delayed_work(&dev_priv->psr.work,
20bb97fe 1026 msecs_to_jiffies(100));
0bc12bcb
RV
1027 mutex_unlock(&dev_priv->psr.lock);
1028}
1029
b2b89f55
RV
1030/**
1031 * intel_psr_init - Init basic PSR work and mutex.
93de056b 1032 * @dev_priv: i915 device private
b2b89f55
RV
1033 *
1034 * This function is called only once at driver load to initialize basic
1035 * PSR stuff.
1036 */
c39055b0 1037void intel_psr_init(struct drm_i915_private *dev_priv)
0bc12bcb 1038{
0f328da6
RV
1039 if (!HAS_PSR(dev_priv))
1040 return;
1041
443a389f
VS
1042 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1043 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1044
c9ef291a
DP
1045 if (!dev_priv->psr.sink_support)
1046 return;
1047
2ee7dc49 1048 /* Per platform default: all disabled. */
4f044a88
MW
1049 if (i915_modparams.enable_psr == -1)
1050 i915_modparams.enable_psr = 0;
d94d6e87 1051
65f61b42 1052 /* Set link_standby x link_off defaults */
8652744b 1053 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60e5ffe3
RV
1054 /* HSW and BDW require workarounds that we don't implement. */
1055 dev_priv->psr.link_standby = false;
920a14b2 1056 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
60e5ffe3
RV
1057 /* On VLV and CHV only standby mode is supported. */
1058 dev_priv->psr.link_standby = true;
1059 else
1060 /* For new platforms let's respect VBT back again */
1061 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1062
65f61b42 1063 /* Override link_standby x link_off defaults */
4f044a88 1064 if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
65f61b42
RV
1065 DRM_DEBUG_KMS("PSR: Forcing link standby\n");
1066 dev_priv->psr.link_standby = true;
1067 }
4f044a88 1068 if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
65f61b42
RV
1069 DRM_DEBUG_KMS("PSR: Forcing main link off\n");
1070 dev_priv->psr.link_standby = false;
1071 }
1072
0bc12bcb
RV
1073 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
1074 mutex_init(&dev_priv->psr.lock);
424644c2
RV
1075
1076 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
d0d5e0d7 1077 dev_priv->psr.enable_source = vlv_psr_enable_source;
424644c2 1078 dev_priv->psr.disable_source = vlv_psr_disable;
49ad316f 1079 dev_priv->psr.enable_sink = vlv_psr_enable_sink;
e3702ac9 1080 dev_priv->psr.activate = vlv_psr_activate;
2a5db87f 1081 dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
424644c2 1082 } else {
5baf63cc 1083 dev_priv->psr.has_hw_tracking = true;
d0d5e0d7 1084 dev_priv->psr.enable_source = hsw_psr_enable_source;
424644c2 1085 dev_priv->psr.disable_source = hsw_psr_disable;
49ad316f 1086 dev_priv->psr.enable_sink = hsw_psr_enable_sink;
e3702ac9 1087 dev_priv->psr.activate = hsw_psr_activate;
2a5db87f 1088 dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
424644c2 1089 }
0bc12bcb 1090}