2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
26 #include "display/intel_dp.h"
29 #include "intel_atomic.h"
30 #include "intel_display_types.h"
31 #include "intel_psr.h"
32 #include "intel_sprite.h"
33 #include "intel_hdmi.h"
36 * DOC: Panel Self Refresh (PSR/SRD)
38 * Since Haswell Display controller supports Panel Self-Refresh on display
39 * panels witch have a remote frame buffer (RFB) implemented according to PSR
40 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
41 * when system is idle but display is on as it eliminates display refresh
42 * request to DDR memory completely as long as the frame buffer for that
43 * display is unchanged.
45 * Panel Self Refresh must be supported by both Hardware (source) and
48 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
49 * to power down the link and memory controller. For DSI panels the same idea
50 * is called "manual mode".
52 * The implementation uses the hardware-based PSR support which automatically
53 * enters/exits self-refresh mode. The hardware takes care of sending the
54 * required DP aux message and could even retrain the link (that part isn't
55 * enabled yet though). The hardware also keeps track of any frontbuffer
56 * changes to know when to exit self-refresh mode again. Unfortunately that
57 * part doesn't work too well, hence why the i915 PSR support uses the
58 * software frontbuffer tracking to make sure it doesn't miss a screen
59 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
60 * get called by the frontbuffer tracking code. Note that because of locking
61 * issues the self-refresh re-enable code is done from a work queue, which
62 * must be correctly synchronized/cancelled when shutting down the pipe."
64 * DC3CO (DC3 clock off)
66 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
67 * clock off automatically during PSR2 idle state.
68 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
69 * entry/exit allows the HW to enter a low-power state even when page flipping
70 * periodically (for instance a 30fps video playback scenario).
72 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
73 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
74 * frames, if no other flip occurs and the function above is executed, DC3CO is
75 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
77 * Front buffer modifications do not trigger DC3CO activation on purpose as it
78 * would bring a lot of complexity and most of the moderns systems will only
82 static bool psr_global_enabled(struct drm_i915_private
*i915
)
84 switch (i915
->psr
.debug
& I915_PSR_DEBUG_MODE_MASK
) {
85 case I915_PSR_DEBUG_DEFAULT
:
86 return i915
->params
.enable_psr
;
87 case I915_PSR_DEBUG_DISABLE
:
94 static bool intel_psr2_enabled(struct drm_i915_private
*dev_priv
,
95 const struct intel_crtc_state
*crtc_state
)
97 /* Cannot enable DSC and PSR2 simultaneously */
98 drm_WARN_ON(&dev_priv
->drm
, crtc_state
->dsc
.compression_enable
&&
99 crtc_state
->has_psr2
);
101 switch (dev_priv
->psr
.debug
& I915_PSR_DEBUG_MODE_MASK
) {
102 case I915_PSR_DEBUG_DISABLE
:
103 case I915_PSR_DEBUG_FORCE_PSR1
:
106 return crtc_state
->has_psr2
;
110 static void psr_irq_control(struct drm_i915_private
*dev_priv
)
112 enum transcoder trans_shift
;
117 * gen12+ has registers relative to transcoder and one per transcoder
118 * using the same bit definition: handle it as TRANSCODER_EDP to force
119 * 0 shift in bit definition
121 if (INTEL_GEN(dev_priv
) >= 12) {
123 imr_reg
= TRANS_PSR_IMR(dev_priv
->psr
.transcoder
);
125 trans_shift
= dev_priv
->psr
.transcoder
;
126 imr_reg
= EDP_PSR_IMR
;
129 mask
= EDP_PSR_ERROR(trans_shift
);
130 if (dev_priv
->psr
.debug
& I915_PSR_DEBUG_IRQ
)
131 mask
|= EDP_PSR_POST_EXIT(trans_shift
) |
132 EDP_PSR_PRE_ENTRY(trans_shift
);
134 /* Warning: it is masking/setting reserved bits too */
135 val
= intel_de_read(dev_priv
, imr_reg
);
136 val
&= ~EDP_PSR_TRANS_MASK(trans_shift
);
138 intel_de_write(dev_priv
, imr_reg
, val
);
141 static void psr_event_print(struct drm_i915_private
*i915
,
142 u32 val
, bool psr2_enabled
)
144 drm_dbg_kms(&i915
->drm
, "PSR exit events: 0x%x\n", val
);
145 if (val
& PSR_EVENT_PSR2_WD_TIMER_EXPIRE
)
146 drm_dbg_kms(&i915
->drm
, "\tPSR2 watchdog timer expired\n");
147 if ((val
& PSR_EVENT_PSR2_DISABLED
) && psr2_enabled
)
148 drm_dbg_kms(&i915
->drm
, "\tPSR2 disabled\n");
149 if (val
& PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN
)
150 drm_dbg_kms(&i915
->drm
, "\tSU dirty FIFO underrun\n");
151 if (val
& PSR_EVENT_SU_CRC_FIFO_UNDERRUN
)
152 drm_dbg_kms(&i915
->drm
, "\tSU CRC FIFO underrun\n");
153 if (val
& PSR_EVENT_GRAPHICS_RESET
)
154 drm_dbg_kms(&i915
->drm
, "\tGraphics reset\n");
155 if (val
& PSR_EVENT_PCH_INTERRUPT
)
156 drm_dbg_kms(&i915
->drm
, "\tPCH interrupt\n");
157 if (val
& PSR_EVENT_MEMORY_UP
)
158 drm_dbg_kms(&i915
->drm
, "\tMemory up\n");
159 if (val
& PSR_EVENT_FRONT_BUFFER_MODIFY
)
160 drm_dbg_kms(&i915
->drm
, "\tFront buffer modification\n");
161 if (val
& PSR_EVENT_WD_TIMER_EXPIRE
)
162 drm_dbg_kms(&i915
->drm
, "\tPSR watchdog timer expired\n");
163 if (val
& PSR_EVENT_PIPE_REGISTERS_UPDATE
)
164 drm_dbg_kms(&i915
->drm
, "\tPIPE registers updated\n");
165 if (val
& PSR_EVENT_REGISTER_UPDATE
)
166 drm_dbg_kms(&i915
->drm
, "\tRegister updated\n");
167 if (val
& PSR_EVENT_HDCP_ENABLE
)
168 drm_dbg_kms(&i915
->drm
, "\tHDCP enabled\n");
169 if (val
& PSR_EVENT_KVMR_SESSION_ENABLE
)
170 drm_dbg_kms(&i915
->drm
, "\tKVMR session enabled\n");
171 if (val
& PSR_EVENT_VBI_ENABLE
)
172 drm_dbg_kms(&i915
->drm
, "\tVBI enabled\n");
173 if (val
& PSR_EVENT_LPSP_MODE_EXIT
)
174 drm_dbg_kms(&i915
->drm
, "\tLPSP mode exited\n");
175 if ((val
& PSR_EVENT_PSR_DISABLE
) && !psr2_enabled
)
176 drm_dbg_kms(&i915
->drm
, "\tPSR disabled\n");
179 void intel_psr_irq_handler(struct drm_i915_private
*dev_priv
, u32 psr_iir
)
181 enum transcoder cpu_transcoder
= dev_priv
->psr
.transcoder
;
182 enum transcoder trans_shift
;
184 ktime_t time_ns
= ktime_get();
186 if (INTEL_GEN(dev_priv
) >= 12) {
188 imr_reg
= TRANS_PSR_IMR(dev_priv
->psr
.transcoder
);
190 trans_shift
= dev_priv
->psr
.transcoder
;
191 imr_reg
= EDP_PSR_IMR
;
194 if (psr_iir
& EDP_PSR_PRE_ENTRY(trans_shift
)) {
195 dev_priv
->psr
.last_entry_attempt
= time_ns
;
196 drm_dbg_kms(&dev_priv
->drm
,
197 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
198 transcoder_name(cpu_transcoder
));
201 if (psr_iir
& EDP_PSR_POST_EXIT(trans_shift
)) {
202 dev_priv
->psr
.last_exit
= time_ns
;
203 drm_dbg_kms(&dev_priv
->drm
,
204 "[transcoder %s] PSR exit completed\n",
205 transcoder_name(cpu_transcoder
));
207 if (INTEL_GEN(dev_priv
) >= 9) {
208 u32 val
= intel_de_read(dev_priv
,
209 PSR_EVENT(cpu_transcoder
));
210 bool psr2_enabled
= dev_priv
->psr
.psr2_enabled
;
212 intel_de_write(dev_priv
, PSR_EVENT(cpu_transcoder
),
214 psr_event_print(dev_priv
, val
, psr2_enabled
);
218 if (psr_iir
& EDP_PSR_ERROR(trans_shift
)) {
221 drm_warn(&dev_priv
->drm
, "[transcoder %s] PSR aux error\n",
222 transcoder_name(cpu_transcoder
));
224 dev_priv
->psr
.irq_aux_error
= true;
227 * If this interruption is not masked it will keep
228 * interrupting so fast that it prevents the scheduled
230 * Also after a PSR error, we don't want to arm PSR
231 * again so we don't care about unmask the interruption
232 * or unset irq_aux_error.
234 val
= intel_de_read(dev_priv
, imr_reg
);
235 val
|= EDP_PSR_ERROR(trans_shift
);
236 intel_de_write(dev_priv
, imr_reg
, val
);
238 schedule_work(&dev_priv
->psr
.work
);
242 static bool intel_dp_get_alpm_status(struct intel_dp
*intel_dp
)
246 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CAP
,
249 return alpm_caps
& DP_ALPM_CAP
;
252 static u8
intel_dp_get_sink_sync_latency(struct intel_dp
*intel_dp
)
254 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
255 u8 val
= 8; /* assume the worst if we can't read the value */
257 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
258 DP_SYNCHRONIZATION_LATENCY_IN_SINK
, &val
) == 1)
259 val
&= DP_MAX_RESYNC_FRAME_COUNT_MASK
;
261 drm_dbg_kms(&i915
->drm
,
262 "Unable to get sink synchronization latency, assuming 8 frames\n");
266 static u16
intel_dp_get_su_x_granulartiy(struct intel_dp
*intel_dp
)
268 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
273 * Returning the default X granularity if granularity not required or
276 if (!(intel_dp
->psr_dpcd
[1] & DP_PSR2_SU_GRANULARITY_REQUIRED
))
279 r
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_PSR2_SU_X_GRANULARITY
, &val
, 2);
281 drm_dbg_kms(&i915
->drm
,
282 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
285 * Spec says that if the value read is 0 the default granularity should
288 if (r
!= 2 || val
== 0)
294 void intel_psr_init_dpcd(struct intel_dp
*intel_dp
)
296 struct drm_i915_private
*dev_priv
=
297 to_i915(dp_to_dig_port(intel_dp
)->base
.base
.dev
);
299 if (dev_priv
->psr
.dp
) {
300 drm_warn(&dev_priv
->drm
,
301 "More than one eDP panel found, PSR support should be extended\n");
305 drm_dp_dpcd_read(&intel_dp
->aux
, DP_PSR_SUPPORT
, intel_dp
->psr_dpcd
,
306 sizeof(intel_dp
->psr_dpcd
));
308 if (!intel_dp
->psr_dpcd
[0])
310 drm_dbg_kms(&dev_priv
->drm
, "eDP panel supports PSR version %x\n",
311 intel_dp
->psr_dpcd
[0]);
313 if (drm_dp_has_quirk(&intel_dp
->desc
, 0, DP_DPCD_QUIRK_NO_PSR
)) {
314 drm_dbg_kms(&dev_priv
->drm
,
315 "PSR support not currently available for this panel\n");
319 if (!(intel_dp
->edp_dpcd
[1] & DP_EDP_SET_POWER_CAP
)) {
320 drm_dbg_kms(&dev_priv
->drm
,
321 "Panel lacks power state control, PSR cannot be enabled\n");
325 dev_priv
->psr
.sink_support
= true;
326 dev_priv
->psr
.sink_sync_latency
=
327 intel_dp_get_sink_sync_latency(intel_dp
);
329 dev_priv
->psr
.dp
= intel_dp
;
331 if (INTEL_GEN(dev_priv
) >= 9 &&
332 (intel_dp
->psr_dpcd
[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED
)) {
333 bool y_req
= intel_dp
->psr_dpcd
[1] &
334 DP_PSR2_SU_Y_COORDINATE_REQUIRED
;
335 bool alpm
= intel_dp_get_alpm_status(intel_dp
);
338 * All panels that supports PSR version 03h (PSR2 +
339 * Y-coordinate) can handle Y-coordinates in VSC but we are
340 * only sure that it is going to be used when required by the
341 * panel. This way panel is capable to do selective update
342 * without a aux frame sync.
344 * To support PSR version 02h and PSR version 03h without
345 * Y-coordinate requirement panels we would need to enable
348 dev_priv
->psr
.sink_psr2_support
= y_req
&& alpm
;
349 drm_dbg_kms(&dev_priv
->drm
, "PSR2 %ssupported\n",
350 dev_priv
->psr
.sink_psr2_support
? "" : "not ");
352 if (dev_priv
->psr
.sink_psr2_support
) {
353 dev_priv
->psr
.colorimetry_support
=
354 intel_dp_get_colorimetry_status(intel_dp
);
355 dev_priv
->psr
.su_x_granularity
=
356 intel_dp_get_su_x_granulartiy(intel_dp
);
361 static void hsw_psr_setup_aux(struct intel_dp
*intel_dp
)
363 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
364 u32 aux_clock_divider
, aux_ctl
;
366 static const u8 aux_msg
[] = {
367 [0] = DP_AUX_NATIVE_WRITE
<< 4,
368 [1] = DP_SET_POWER
>> 8,
369 [2] = DP_SET_POWER
& 0xff,
371 [4] = DP_SET_POWER_D0
,
373 u32 psr_aux_mask
= EDP_PSR_AUX_CTL_TIME_OUT_MASK
|
374 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK
|
375 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK
|
376 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK
;
378 BUILD_BUG_ON(sizeof(aux_msg
) > 20);
379 for (i
= 0; i
< sizeof(aux_msg
); i
+= 4)
380 intel_de_write(dev_priv
,
381 EDP_PSR_AUX_DATA(dev_priv
->psr
.transcoder
, i
>> 2),
382 intel_dp_pack_aux(&aux_msg
[i
], sizeof(aux_msg
) - i
));
384 aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, 0);
386 /* Start with bits set for DDI_AUX_CTL register */
387 aux_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
, sizeof(aux_msg
),
390 /* Select only valid bits for SRD_AUX_CTL */
391 aux_ctl
&= psr_aux_mask
;
392 intel_de_write(dev_priv
, EDP_PSR_AUX_CTL(dev_priv
->psr
.transcoder
),
396 static void intel_psr_enable_sink(struct intel_dp
*intel_dp
)
398 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
399 u8 dpcd_val
= DP_PSR_ENABLE
;
401 /* Enable ALPM at sink for psr2 */
402 if (dev_priv
->psr
.psr2_enabled
) {
403 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CONFIG
,
405 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE
);
407 dpcd_val
|= DP_PSR_ENABLE_PSR2
| DP_PSR_IRQ_HPD_WITH_CRC_ERRORS
;
409 if (dev_priv
->psr
.link_standby
)
410 dpcd_val
|= DP_PSR_MAIN_LINK_ACTIVE
;
412 if (INTEL_GEN(dev_priv
) >= 8)
413 dpcd_val
|= DP_PSR_CRC_VERIFICATION
;
416 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, dpcd_val
);
418 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
, DP_SET_POWER_D0
);
421 static u32
intel_psr1_get_tp_time(struct intel_dp
*intel_dp
)
423 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
426 if (INTEL_GEN(dev_priv
) >= 11)
427 val
|= EDP_PSR_TP4_TIME_0US
;
429 if (dev_priv
->params
.psr_safest_params
) {
430 val
|= EDP_PSR_TP1_TIME_2500us
;
431 val
|= EDP_PSR_TP2_TP3_TIME_2500us
;
435 if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
== 0)
436 val
|= EDP_PSR_TP1_TIME_0us
;
437 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
<= 100)
438 val
|= EDP_PSR_TP1_TIME_100us
;
439 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
<= 500)
440 val
|= EDP_PSR_TP1_TIME_500us
;
442 val
|= EDP_PSR_TP1_TIME_2500us
;
444 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
== 0)
445 val
|= EDP_PSR_TP2_TP3_TIME_0us
;
446 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 100)
447 val
|= EDP_PSR_TP2_TP3_TIME_100us
;
448 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 500)
449 val
|= EDP_PSR_TP2_TP3_TIME_500us
;
451 val
|= EDP_PSR_TP2_TP3_TIME_2500us
;
454 if (intel_dp_source_supports_hbr2(intel_dp
) &&
455 drm_dp_tps3_supported(intel_dp
->dpcd
))
456 val
|= EDP_PSR_TP1_TP3_SEL
;
458 val
|= EDP_PSR_TP1_TP2_SEL
;
463 static u8
psr_compute_idle_frames(struct intel_dp
*intel_dp
)
465 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
468 /* Let's use 6 as the minimum to cover all known cases including the
469 * off-by-one issue that HW has in some cases.
471 idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
472 idle_frames
= max(idle_frames
, dev_priv
->psr
.sink_sync_latency
+ 1);
474 if (drm_WARN_ON(&dev_priv
->drm
, idle_frames
> 0xf))
480 static void hsw_activate_psr1(struct intel_dp
*intel_dp
)
482 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
483 u32 max_sleep_time
= 0x1f;
484 u32 val
= EDP_PSR_ENABLE
;
486 val
|= psr_compute_idle_frames(intel_dp
) << EDP_PSR_IDLE_FRAME_SHIFT
;
488 val
|= max_sleep_time
<< EDP_PSR_MAX_SLEEP_TIME_SHIFT
;
489 if (IS_HASWELL(dev_priv
))
490 val
|= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES
;
492 if (dev_priv
->psr
.link_standby
)
493 val
|= EDP_PSR_LINK_STANDBY
;
495 val
|= intel_psr1_get_tp_time(intel_dp
);
497 if (INTEL_GEN(dev_priv
) >= 8)
498 val
|= EDP_PSR_CRC_ENABLE
;
500 val
|= (intel_de_read(dev_priv
, EDP_PSR_CTL(dev_priv
->psr
.transcoder
)) &
501 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK
);
502 intel_de_write(dev_priv
, EDP_PSR_CTL(dev_priv
->psr
.transcoder
), val
);
505 static u32
intel_psr2_get_tp_time(struct intel_dp
*intel_dp
)
507 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
510 if (dev_priv
->params
.psr_safest_params
)
511 return EDP_PSR2_TP2_TIME_2500us
;
513 if (dev_priv
->vbt
.psr
.psr2_tp2_tp3_wakeup_time_us
>= 0 &&
514 dev_priv
->vbt
.psr
.psr2_tp2_tp3_wakeup_time_us
<= 50)
515 val
|= EDP_PSR2_TP2_TIME_50us
;
516 else if (dev_priv
->vbt
.psr
.psr2_tp2_tp3_wakeup_time_us
<= 100)
517 val
|= EDP_PSR2_TP2_TIME_100us
;
518 else if (dev_priv
->vbt
.psr
.psr2_tp2_tp3_wakeup_time_us
<= 500)
519 val
|= EDP_PSR2_TP2_TIME_500us
;
521 val
|= EDP_PSR2_TP2_TIME_2500us
;
526 static void hsw_activate_psr2(struct intel_dp
*intel_dp
)
528 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
531 val
= psr_compute_idle_frames(intel_dp
) << EDP_PSR2_IDLE_FRAME_SHIFT
;
533 val
|= EDP_PSR2_ENABLE
| EDP_SU_TRACK_ENABLE
;
534 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
535 val
|= EDP_Y_COORDINATE_ENABLE
;
537 val
|= EDP_PSR2_FRAME_BEFORE_SU(dev_priv
->psr
.sink_sync_latency
+ 1);
538 val
|= intel_psr2_get_tp_time(intel_dp
);
540 if (INTEL_GEN(dev_priv
) >= 12) {
542 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
543 * values from BSpec. In order to setting an optimal power
544 * consumption, lower than 4k resoluition mode needs to decrese
545 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
546 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
548 val
|= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2
;
549 val
|= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
550 val
|= TGL_EDP_PSR2_FAST_WAKE(7);
551 } else if (INTEL_GEN(dev_priv
) >= 9) {
552 val
|= EDP_PSR2_IO_BUFFER_WAKE(7);
553 val
|= EDP_PSR2_FAST_WAKE(7);
556 if (dev_priv
->psr
.psr2_sel_fetch_enabled
) {
558 if (IS_TGL_DISP_REVID(dev_priv
, TGL_REVID_A0
, TGL_REVID_A0
) ||
559 IS_RKL_REVID(dev_priv
, RKL_REVID_A0
, RKL_REVID_A0
))
560 intel_de_rmw(dev_priv
, CHICKEN_PAR1_1
,
561 DIS_RAM_BYPASS_PSR2_MAN_TRACK
,
562 DIS_RAM_BYPASS_PSR2_MAN_TRACK
);
564 intel_de_write(dev_priv
,
565 PSR2_MAN_TRK_CTL(dev_priv
->psr
.transcoder
),
566 PSR2_MAN_TRK_CTL_ENABLE
);
567 } else if (HAS_PSR2_SEL_FETCH(dev_priv
)) {
568 intel_de_write(dev_priv
,
569 PSR2_MAN_TRK_CTL(dev_priv
->psr
.transcoder
), 0);
573 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
574 * recommending keep this bit unset while PSR2 is enabled.
576 intel_de_write(dev_priv
, EDP_PSR_CTL(dev_priv
->psr
.transcoder
), 0);
578 intel_de_write(dev_priv
, EDP_PSR2_CTL(dev_priv
->psr
.transcoder
), val
);
582 transcoder_has_psr2(struct drm_i915_private
*dev_priv
, enum transcoder trans
)
584 if (INTEL_GEN(dev_priv
) < 9)
586 else if (INTEL_GEN(dev_priv
) >= 12)
587 return trans
== TRANSCODER_A
;
589 return trans
== TRANSCODER_EDP
;
592 static u32
intel_get_frame_time_us(const struct intel_crtc_state
*cstate
)
594 if (!cstate
|| !cstate
->hw
.active
)
597 return DIV_ROUND_UP(1000 * 1000,
598 drm_mode_vrefresh(&cstate
->hw
.adjusted_mode
));
601 static void psr2_program_idle_frames(struct drm_i915_private
*dev_priv
,
606 idle_frames
<<= EDP_PSR2_IDLE_FRAME_SHIFT
;
607 val
= intel_de_read(dev_priv
, EDP_PSR2_CTL(dev_priv
->psr
.transcoder
));
608 val
&= ~EDP_PSR2_IDLE_FRAME_MASK
;
610 intel_de_write(dev_priv
, EDP_PSR2_CTL(dev_priv
->psr
.transcoder
), val
);
613 static void tgl_psr2_enable_dc3co(struct drm_i915_private
*dev_priv
)
615 psr2_program_idle_frames(dev_priv
, 0);
616 intel_display_power_set_target_dc_state(dev_priv
, DC_STATE_EN_DC3CO
);
619 static void tgl_psr2_disable_dc3co(struct drm_i915_private
*dev_priv
)
621 struct intel_dp
*intel_dp
= dev_priv
->psr
.dp
;
623 intel_display_power_set_target_dc_state(dev_priv
, DC_STATE_EN_UPTO_DC6
);
624 psr2_program_idle_frames(dev_priv
, psr_compute_idle_frames(intel_dp
));
627 static void tgl_dc3co_disable_work(struct work_struct
*work
)
629 struct drm_i915_private
*dev_priv
=
630 container_of(work
, typeof(*dev_priv
), psr
.dc3co_work
.work
);
632 mutex_lock(&dev_priv
->psr
.lock
);
633 /* If delayed work is pending, it is not idle */
634 if (delayed_work_pending(&dev_priv
->psr
.dc3co_work
))
637 tgl_psr2_disable_dc3co(dev_priv
);
639 mutex_unlock(&dev_priv
->psr
.lock
);
642 static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private
*dev_priv
)
644 if (!dev_priv
->psr
.dc3co_enabled
)
647 cancel_delayed_work(&dev_priv
->psr
.dc3co_work
);
648 /* Before PSR2 exit disallow dc3co*/
649 tgl_psr2_disable_dc3co(dev_priv
);
653 tgl_dc3co_exitline_compute_config(struct intel_dp
*intel_dp
,
654 struct intel_crtc_state
*crtc_state
)
656 const u32 crtc_vdisplay
= crtc_state
->uapi
.adjusted_mode
.crtc_vdisplay
;
657 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
658 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
661 if (!(dev_priv
->csr
.allowed_dc_mask
& DC_STATE_EN_DC3CO
))
664 /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
665 if (to_intel_crtc(crtc_state
->uapi
.crtc
)->pipe
!= PIPE_A
||
666 dig_port
->base
.port
!= PORT_A
)
670 * DC3CO Exit time 200us B.Spec 49196
671 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
674 intel_usecs_to_scanlines(&crtc_state
->uapi
.adjusted_mode
, 200) + 1;
676 if (drm_WARN_ON(&dev_priv
->drm
, exit_scanlines
> crtc_vdisplay
))
679 crtc_state
->dc3co_exitline
= crtc_vdisplay
- exit_scanlines
;
682 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp
*intel_dp
,
683 struct intel_crtc_state
*crtc_state
)
685 struct intel_atomic_state
*state
= to_intel_atomic_state(crtc_state
->uapi
.state
);
686 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
687 struct intel_plane_state
*plane_state
;
688 struct intel_plane
*plane
;
691 if (!dev_priv
->params
.enable_psr2_sel_fetch
) {
692 drm_dbg_kms(&dev_priv
->drm
,
693 "PSR2 sel fetch not enabled, disabled by parameter\n");
697 if (crtc_state
->uapi
.async_flip
) {
698 drm_dbg_kms(&dev_priv
->drm
,
699 "PSR2 sel fetch not enabled, async flip enabled\n");
703 for_each_new_intel_plane_in_state(state
, plane
, plane_state
, i
) {
704 if (plane_state
->uapi
.rotation
!= DRM_MODE_ROTATE_0
) {
705 drm_dbg_kms(&dev_priv
->drm
,
706 "PSR2 sel fetch not enabled, plane rotated\n");
711 return crtc_state
->enable_psr2_sel_fetch
= true;
714 static bool intel_psr2_config_valid(struct intel_dp
*intel_dp
,
715 struct intel_crtc_state
*crtc_state
)
717 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
718 int crtc_hdisplay
= crtc_state
->hw
.adjusted_mode
.crtc_hdisplay
;
719 int crtc_vdisplay
= crtc_state
->hw
.adjusted_mode
.crtc_vdisplay
;
720 int psr_max_h
= 0, psr_max_v
= 0, max_bpp
= 0;
722 if (!dev_priv
->psr
.sink_psr2_support
)
725 if (!transcoder_has_psr2(dev_priv
, crtc_state
->cpu_transcoder
)) {
726 drm_dbg_kms(&dev_priv
->drm
,
727 "PSR2 not supported in transcoder %s\n",
728 transcoder_name(crtc_state
->cpu_transcoder
));
733 * DSC and PSR2 cannot be enabled simultaneously. If a requested
734 * resolution requires DSC to be enabled, priority is given to DSC
737 if (crtc_state
->dsc
.compression_enable
) {
738 drm_dbg_kms(&dev_priv
->drm
,
739 "PSR2 cannot be enabled since DSC is enabled\n");
743 if (crtc_state
->crc_enabled
) {
744 drm_dbg_kms(&dev_priv
->drm
,
745 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
749 if (INTEL_GEN(dev_priv
) >= 12) {
753 } else if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) {
757 } else if (IS_GEN(dev_priv
, 9)) {
763 if (crtc_state
->pipe_bpp
> max_bpp
) {
764 drm_dbg_kms(&dev_priv
->drm
,
765 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
766 crtc_state
->pipe_bpp
, max_bpp
);
771 * HW sends SU blocks of size four scan lines, which means the starting
772 * X coordinate and Y granularity requirements will always be met. We
773 * only need to validate the SU block width is a multiple of
776 if (crtc_hdisplay
% dev_priv
->psr
.su_x_granularity
) {
777 drm_dbg_kms(&dev_priv
->drm
,
778 "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
779 crtc_hdisplay
, dev_priv
->psr
.su_x_granularity
);
783 if (HAS_PSR2_SEL_FETCH(dev_priv
)) {
784 if (!intel_psr2_sel_fetch_config_valid(intel_dp
, crtc_state
) &&
785 !HAS_PSR_HW_TRACKING(dev_priv
)) {
786 drm_dbg_kms(&dev_priv
->drm
,
787 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
792 if (!crtc_state
->enable_psr2_sel_fetch
&&
793 (crtc_hdisplay
> psr_max_h
|| crtc_vdisplay
> psr_max_v
)) {
794 drm_dbg_kms(&dev_priv
->drm
,
795 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
796 crtc_hdisplay
, crtc_vdisplay
,
797 psr_max_h
, psr_max_v
);
801 tgl_dc3co_exitline_compute_config(intel_dp
, crtc_state
);
805 void intel_psr_compute_config(struct intel_dp
*intel_dp
,
806 struct intel_crtc_state
*crtc_state
)
808 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
809 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
810 const struct drm_display_mode
*adjusted_mode
=
811 &crtc_state
->hw
.adjusted_mode
;
814 if (!CAN_PSR(dev_priv
))
817 if (intel_dp
!= dev_priv
->psr
.dp
)
820 if (!psr_global_enabled(dev_priv
))
823 * HSW spec explicitly says PSR is tied to port A.
824 * BDW+ platforms have a instance of PSR registers per transcoder but
825 * for now it only supports one instance of PSR, so lets keep it
826 * hardcoded to PORT_A
828 if (dig_port
->base
.port
!= PORT_A
) {
829 drm_dbg_kms(&dev_priv
->drm
,
830 "PSR condition failed: Port not supported\n");
834 if (dev_priv
->psr
.sink_not_reliable
) {
835 drm_dbg_kms(&dev_priv
->drm
,
836 "PSR sink implementation is not reliable\n");
840 if (adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
841 drm_dbg_kms(&dev_priv
->drm
,
842 "PSR condition failed: Interlaced mode enabled\n");
846 psr_setup_time
= drm_dp_psr_setup_time(intel_dp
->psr_dpcd
);
847 if (psr_setup_time
< 0) {
848 drm_dbg_kms(&dev_priv
->drm
,
849 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
850 intel_dp
->psr_dpcd
[1]);
854 if (intel_usecs_to_scanlines(adjusted_mode
, psr_setup_time
) >
855 adjusted_mode
->crtc_vtotal
- adjusted_mode
->crtc_vdisplay
- 1) {
856 drm_dbg_kms(&dev_priv
->drm
,
857 "PSR condition failed: PSR setup time (%d us) too long\n",
862 crtc_state
->has_psr
= true;
863 crtc_state
->has_psr2
= intel_psr2_config_valid(intel_dp
, crtc_state
);
864 crtc_state
->infoframes
.enable
|= intel_hdmi_infoframe_enable(DP_SDP_VSC
);
867 static void intel_psr_activate(struct intel_dp
*intel_dp
)
869 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
871 if (transcoder_has_psr2(dev_priv
, dev_priv
->psr
.transcoder
))
872 drm_WARN_ON(&dev_priv
->drm
,
873 intel_de_read(dev_priv
, EDP_PSR2_CTL(dev_priv
->psr
.transcoder
)) & EDP_PSR2_ENABLE
);
875 drm_WARN_ON(&dev_priv
->drm
,
876 intel_de_read(dev_priv
, EDP_PSR_CTL(dev_priv
->psr
.transcoder
)) & EDP_PSR_ENABLE
);
877 drm_WARN_ON(&dev_priv
->drm
, dev_priv
->psr
.active
);
878 lockdep_assert_held(&dev_priv
->psr
.lock
);
880 /* psr1 and psr2 are mutually exclusive.*/
881 if (dev_priv
->psr
.psr2_enabled
)
882 hsw_activate_psr2(intel_dp
);
884 hsw_activate_psr1(intel_dp
);
886 dev_priv
->psr
.active
= true;
889 static void intel_psr_enable_source(struct intel_dp
*intel_dp
,
890 const struct intel_crtc_state
*crtc_state
)
892 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
893 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
896 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
897 * use hardcoded values PSR AUX transactions
899 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
900 hsw_psr_setup_aux(intel_dp
);
902 if (dev_priv
->psr
.psr2_enabled
&& (IS_GEN(dev_priv
, 9) &&
903 !IS_GEMINILAKE(dev_priv
))) {
904 i915_reg_t reg
= CHICKEN_TRANS(cpu_transcoder
);
905 u32 chicken
= intel_de_read(dev_priv
, reg
);
907 chicken
|= PSR2_VSC_ENABLE_PROG_HEADER
|
908 PSR2_ADD_VERTICAL_LINE_COUNT
;
909 intel_de_write(dev_priv
, reg
, chicken
);
913 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
914 * mask LPSP to avoid dependency on other drivers that might block
915 * runtime_pm besides preventing other hw tracking issues now we
916 * can rely on frontbuffer tracking.
918 mask
= EDP_PSR_DEBUG_MASK_MEMUP
|
919 EDP_PSR_DEBUG_MASK_HPD
|
920 EDP_PSR_DEBUG_MASK_LPSP
|
921 EDP_PSR_DEBUG_MASK_MAX_SLEEP
;
923 if (INTEL_GEN(dev_priv
) < 11)
924 mask
|= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE
;
926 intel_de_write(dev_priv
, EDP_PSR_DEBUG(dev_priv
->psr
.transcoder
),
929 psr_irq_control(dev_priv
);
931 if (crtc_state
->dc3co_exitline
) {
935 * TODO: if future platforms supports DC3CO in more than one
936 * transcoder, EXITLINE will need to be unset when disabling PSR
938 val
= intel_de_read(dev_priv
, EXITLINE(cpu_transcoder
));
939 val
&= ~EXITLINE_MASK
;
940 val
|= crtc_state
->dc3co_exitline
<< EXITLINE_SHIFT
;
941 val
|= EXITLINE_ENABLE
;
942 intel_de_write(dev_priv
, EXITLINE(cpu_transcoder
), val
);
945 if (HAS_PSR_HW_TRACKING(dev_priv
))
946 intel_de_rmw(dev_priv
, CHICKEN_PAR1_1
, IGNORE_PSR2_HW_TRACKING
,
947 dev_priv
->psr
.psr2_sel_fetch_enabled
?
948 IGNORE_PSR2_HW_TRACKING
: 0);
951 static void intel_psr_enable_locked(struct drm_i915_private
*dev_priv
,
952 const struct intel_crtc_state
*crtc_state
,
953 const struct drm_connector_state
*conn_state
)
955 struct intel_dp
*intel_dp
= dev_priv
->psr
.dp
;
956 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
957 struct intel_encoder
*encoder
= &dig_port
->base
;
960 drm_WARN_ON(&dev_priv
->drm
, dev_priv
->psr
.enabled
);
962 dev_priv
->psr
.psr2_enabled
= intel_psr2_enabled(dev_priv
, crtc_state
);
963 dev_priv
->psr
.busy_frontbuffer_bits
= 0;
964 dev_priv
->psr
.pipe
= to_intel_crtc(crtc_state
->uapi
.crtc
)->pipe
;
965 dev_priv
->psr
.dc3co_enabled
= !!crtc_state
->dc3co_exitline
;
966 dev_priv
->psr
.transcoder
= crtc_state
->cpu_transcoder
;
967 /* DC5/DC6 requires at least 6 idle frames */
968 val
= usecs_to_jiffies(intel_get_frame_time_us(crtc_state
) * 6);
969 dev_priv
->psr
.dc3co_exit_delay
= val
;
970 dev_priv
->psr
.psr2_sel_fetch_enabled
= crtc_state
->enable_psr2_sel_fetch
;
973 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
974 * will still keep the error set even after the reset done in the
975 * irq_preinstall and irq_uninstall hooks.
976 * And enabling in this situation cause the screen to freeze in the
977 * first time that PSR HW tries to activate so lets keep PSR disabled
978 * to avoid any rendering problems.
980 if (INTEL_GEN(dev_priv
) >= 12) {
981 val
= intel_de_read(dev_priv
,
982 TRANS_PSR_IIR(dev_priv
->psr
.transcoder
));
983 val
&= EDP_PSR_ERROR(0);
985 val
= intel_de_read(dev_priv
, EDP_PSR_IIR
);
986 val
&= EDP_PSR_ERROR(dev_priv
->psr
.transcoder
);
989 dev_priv
->psr
.sink_not_reliable
= true;
990 drm_dbg_kms(&dev_priv
->drm
,
991 "PSR interruption error set, not enabling PSR\n");
995 drm_dbg_kms(&dev_priv
->drm
, "Enabling PSR%s\n",
996 dev_priv
->psr
.psr2_enabled
? "2" : "1");
997 intel_dp_compute_psr_vsc_sdp(intel_dp
, crtc_state
, conn_state
,
999 intel_write_dp_vsc_sdp(encoder
, crtc_state
, &dev_priv
->psr
.vsc
);
1000 intel_psr_enable_sink(intel_dp
);
1001 intel_psr_enable_source(intel_dp
, crtc_state
);
1002 dev_priv
->psr
.enabled
= true;
1004 intel_psr_activate(intel_dp
);
1008 * intel_psr_enable - Enable PSR
1009 * @intel_dp: Intel DP
1010 * @crtc_state: new CRTC state
1011 * @conn_state: new CONNECTOR state
1013 * This function can only be called after the pipe is fully trained and enabled.
1015 void intel_psr_enable(struct intel_dp
*intel_dp
,
1016 const struct intel_crtc_state
*crtc_state
,
1017 const struct drm_connector_state
*conn_state
)
1019 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1021 if (!CAN_PSR(dev_priv
) || dev_priv
->psr
.dp
!= intel_dp
)
1024 dev_priv
->psr
.force_mode_changed
= false;
1026 if (!crtc_state
->has_psr
)
1029 drm_WARN_ON(&dev_priv
->drm
, dev_priv
->drrs
.dp
);
1031 mutex_lock(&dev_priv
->psr
.lock
);
1033 if (!psr_global_enabled(dev_priv
)) {
1034 drm_dbg_kms(&dev_priv
->drm
, "PSR disabled by flag\n");
1038 intel_psr_enable_locked(dev_priv
, crtc_state
, conn_state
);
1041 mutex_unlock(&dev_priv
->psr
.lock
);
1044 static void intel_psr_exit(struct drm_i915_private
*dev_priv
)
1048 if (!dev_priv
->psr
.active
) {
1049 if (transcoder_has_psr2(dev_priv
, dev_priv
->psr
.transcoder
)) {
1050 val
= intel_de_read(dev_priv
,
1051 EDP_PSR2_CTL(dev_priv
->psr
.transcoder
));
1052 drm_WARN_ON(&dev_priv
->drm
, val
& EDP_PSR2_ENABLE
);
1055 val
= intel_de_read(dev_priv
,
1056 EDP_PSR_CTL(dev_priv
->psr
.transcoder
));
1057 drm_WARN_ON(&dev_priv
->drm
, val
& EDP_PSR_ENABLE
);
1062 if (dev_priv
->psr
.psr2_enabled
) {
1063 tgl_disallow_dc3co_on_psr2_exit(dev_priv
);
1064 val
= intel_de_read(dev_priv
,
1065 EDP_PSR2_CTL(dev_priv
->psr
.transcoder
));
1066 drm_WARN_ON(&dev_priv
->drm
, !(val
& EDP_PSR2_ENABLE
));
1067 val
&= ~EDP_PSR2_ENABLE
;
1068 intel_de_write(dev_priv
,
1069 EDP_PSR2_CTL(dev_priv
->psr
.transcoder
), val
);
1071 val
= intel_de_read(dev_priv
,
1072 EDP_PSR_CTL(dev_priv
->psr
.transcoder
));
1073 drm_WARN_ON(&dev_priv
->drm
, !(val
& EDP_PSR_ENABLE
));
1074 val
&= ~EDP_PSR_ENABLE
;
1075 intel_de_write(dev_priv
,
1076 EDP_PSR_CTL(dev_priv
->psr
.transcoder
), val
);
1078 dev_priv
->psr
.active
= false;
1081 static void intel_psr_disable_locked(struct intel_dp
*intel_dp
)
1083 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1084 i915_reg_t psr_status
;
1085 u32 psr_status_mask
;
1087 lockdep_assert_held(&dev_priv
->psr
.lock
);
1089 if (!dev_priv
->psr
.enabled
)
1092 drm_dbg_kms(&dev_priv
->drm
, "Disabling PSR%s\n",
1093 dev_priv
->psr
.psr2_enabled
? "2" : "1");
1095 intel_psr_exit(dev_priv
);
1097 if (dev_priv
->psr
.psr2_enabled
) {
1098 psr_status
= EDP_PSR2_STATUS(dev_priv
->psr
.transcoder
);
1099 psr_status_mask
= EDP_PSR2_STATUS_STATE_MASK
;
1101 psr_status
= EDP_PSR_STATUS(dev_priv
->psr
.transcoder
);
1102 psr_status_mask
= EDP_PSR_STATUS_STATE_MASK
;
1105 /* Wait till PSR is idle */
1106 if (intel_de_wait_for_clear(dev_priv
, psr_status
,
1107 psr_status_mask
, 2000))
1108 drm_err(&dev_priv
->drm
, "Timed out waiting PSR idle state\n");
1111 if (dev_priv
->psr
.psr2_sel_fetch_enabled
&&
1112 (IS_TGL_DISP_REVID(dev_priv
, TGL_REVID_A0
, TGL_REVID_A0
) ||
1113 IS_RKL_REVID(dev_priv
, RKL_REVID_A0
, RKL_REVID_A0
)))
1114 intel_de_rmw(dev_priv
, CHICKEN_PAR1_1
,
1115 DIS_RAM_BYPASS_PSR2_MAN_TRACK
, 0);
1117 /* Disable PSR on Sink */
1118 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, 0);
1120 if (dev_priv
->psr
.psr2_enabled
)
1121 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CONFIG
, 0);
1123 dev_priv
->psr
.enabled
= false;
1127 * intel_psr_disable - Disable PSR
1128 * @intel_dp: Intel DP
1129 * @old_crtc_state: old CRTC state
1131 * This function needs to be called before disabling pipe.
1133 void intel_psr_disable(struct intel_dp
*intel_dp
,
1134 const struct intel_crtc_state
*old_crtc_state
)
1136 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1138 if (!old_crtc_state
->has_psr
)
1141 if (drm_WARN_ON(&dev_priv
->drm
, !CAN_PSR(dev_priv
)))
1144 mutex_lock(&dev_priv
->psr
.lock
);
1146 intel_psr_disable_locked(intel_dp
);
1148 mutex_unlock(&dev_priv
->psr
.lock
);
1149 cancel_work_sync(&dev_priv
->psr
.work
);
1150 cancel_delayed_work_sync(&dev_priv
->psr
.dc3co_work
);
1153 static void psr_force_hw_tracking_exit(struct drm_i915_private
*dev_priv
)
1155 if (INTEL_GEN(dev_priv
) >= 9)
1157 * Display WA #0884: skl+
1158 * This documented WA for bxt can be safely applied
1159 * broadly so we can force HW tracking to exit PSR
1160 * instead of disabling and re-enabling.
1161 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1162 * but it makes more sense write to the current active
1165 intel_de_write(dev_priv
, CURSURFLIVE(dev_priv
->psr
.pipe
), 0);
1168 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
1169 * on older gens so doing the manual exit instead.
1171 intel_psr_exit(dev_priv
);
1174 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state
*crtc_state
)
1176 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
1177 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1178 struct i915_psr
*psr
= &dev_priv
->psr
;
1180 if (!HAS_PSR2_SEL_FETCH(dev_priv
) ||
1181 !crtc_state
->enable_psr2_sel_fetch
)
1184 intel_de_write(dev_priv
, PSR2_MAN_TRK_CTL(psr
->transcoder
),
1185 crtc_state
->psr2_man_track_ctl
);
1188 void intel_psr2_sel_fetch_update(struct intel_atomic_state
*state
,
1189 struct intel_crtc
*crtc
)
1191 struct intel_crtc_state
*crtc_state
= intel_atomic_get_new_crtc_state(state
, crtc
);
1193 if (!crtc_state
->enable_psr2_sel_fetch
)
1196 crtc_state
->psr2_man_track_ctl
= PSR2_MAN_TRK_CTL_ENABLE
|
1197 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME
;
1201 * intel_psr_update - Update PSR state
1202 * @intel_dp: Intel DP
1203 * @crtc_state: new CRTC state
1204 * @conn_state: new CONNECTOR state
1206 * This functions will update PSR states, disabling, enabling or switching PSR
1207 * version when executing fastsets. For full modeset, intel_psr_disable() and
1208 * intel_psr_enable() should be called instead.
1210 void intel_psr_update(struct intel_dp
*intel_dp
,
1211 const struct intel_crtc_state
*crtc_state
,
1212 const struct drm_connector_state
*conn_state
)
1214 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1215 struct i915_psr
*psr
= &dev_priv
->psr
;
1216 bool enable
, psr2_enable
;
1218 if (!CAN_PSR(dev_priv
) || READ_ONCE(psr
->dp
) != intel_dp
)
1221 dev_priv
->psr
.force_mode_changed
= false;
1223 mutex_lock(&dev_priv
->psr
.lock
);
1225 enable
= crtc_state
->has_psr
&& psr_global_enabled(dev_priv
);
1226 psr2_enable
= intel_psr2_enabled(dev_priv
, crtc_state
);
1228 if (enable
== psr
->enabled
&& psr2_enable
== psr
->psr2_enabled
) {
1229 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1230 if (crtc_state
->crc_enabled
&& psr
->enabled
)
1231 psr_force_hw_tracking_exit(dev_priv
);
1232 else if (INTEL_GEN(dev_priv
) < 9 && psr
->enabled
) {
1234 * Activate PSR again after a force exit when enabling
1237 if (!dev_priv
->psr
.active
&&
1238 !dev_priv
->psr
.busy_frontbuffer_bits
)
1239 schedule_work(&dev_priv
->psr
.work
);
1246 intel_psr_disable_locked(intel_dp
);
1249 intel_psr_enable_locked(dev_priv
, crtc_state
, conn_state
);
1252 mutex_unlock(&dev_priv
->psr
.lock
);
1256 * intel_psr_wait_for_idle - wait for PSR1 to idle
1257 * @new_crtc_state: new CRTC state
1258 * @out_value: PSR status in case of failure
1260 * This function is expected to be called from pipe_update_start() where it is
1261 * not expected to race with PSR enable or disable.
1263 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1265 int intel_psr_wait_for_idle(const struct intel_crtc_state
*new_crtc_state
,
1268 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->uapi
.crtc
);
1269 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1271 if (!dev_priv
->psr
.enabled
|| !new_crtc_state
->has_psr
)
1274 /* FIXME: Update this for PSR2 if we need to wait for idle */
1275 if (READ_ONCE(dev_priv
->psr
.psr2_enabled
))
1279 * From bspec: Panel Self Refresh (BDW+)
1280 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1281 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1282 * defensive enough to cover everything.
1285 return __intel_wait_for_register(&dev_priv
->uncore
,
1286 EDP_PSR_STATUS(dev_priv
->psr
.transcoder
),
1287 EDP_PSR_STATUS_STATE_MASK
,
1288 EDP_PSR_STATUS_STATE_IDLE
, 2, 50,
1292 static bool __psr_wait_for_idle_locked(struct drm_i915_private
*dev_priv
)
1298 if (!dev_priv
->psr
.enabled
)
1301 if (dev_priv
->psr
.psr2_enabled
) {
1302 reg
= EDP_PSR2_STATUS(dev_priv
->psr
.transcoder
);
1303 mask
= EDP_PSR2_STATUS_STATE_MASK
;
1305 reg
= EDP_PSR_STATUS(dev_priv
->psr
.transcoder
);
1306 mask
= EDP_PSR_STATUS_STATE_MASK
;
1309 mutex_unlock(&dev_priv
->psr
.lock
);
1311 err
= intel_de_wait_for_clear(dev_priv
, reg
, mask
, 50);
1313 drm_err(&dev_priv
->drm
,
1314 "Timed out waiting for PSR Idle for re-enable\n");
1316 /* After the unlocked wait, verify that PSR is still wanted! */
1317 mutex_lock(&dev_priv
->psr
.lock
);
1318 return err
== 0 && dev_priv
->psr
.enabled
;
1321 static int intel_psr_fastset_force(struct drm_i915_private
*dev_priv
)
1323 struct drm_device
*dev
= &dev_priv
->drm
;
1324 struct drm_modeset_acquire_ctx ctx
;
1325 struct drm_atomic_state
*state
;
1326 struct intel_crtc
*crtc
;
1329 state
= drm_atomic_state_alloc(dev
);
1333 drm_modeset_acquire_init(&ctx
, DRM_MODESET_ACQUIRE_INTERRUPTIBLE
);
1334 state
->acquire_ctx
= &ctx
;
1337 for_each_intel_crtc(dev
, crtc
) {
1338 struct intel_crtc_state
*crtc_state
=
1339 intel_atomic_get_crtc_state(state
, crtc
);
1341 if (IS_ERR(crtc_state
)) {
1342 err
= PTR_ERR(crtc_state
);
1346 if (crtc_state
->hw
.active
&& crtc_state
->has_psr
) {
1347 /* Mark mode as changed to trigger a pipe->update() */
1348 crtc_state
->uapi
.mode_changed
= true;
1353 err
= drm_atomic_commit(state
);
1356 if (err
== -EDEADLK
) {
1357 drm_atomic_state_clear(state
);
1358 err
= drm_modeset_backoff(&ctx
);
1363 drm_modeset_drop_locks(&ctx
);
1364 drm_modeset_acquire_fini(&ctx
);
1365 drm_atomic_state_put(state
);
1370 int intel_psr_debug_set(struct drm_i915_private
*dev_priv
, u64 val
)
1372 const u32 mode
= val
& I915_PSR_DEBUG_MODE_MASK
;
1376 if (val
& ~(I915_PSR_DEBUG_IRQ
| I915_PSR_DEBUG_MODE_MASK
) ||
1377 mode
> I915_PSR_DEBUG_FORCE_PSR1
) {
1378 drm_dbg_kms(&dev_priv
->drm
, "Invalid debug mask %llx\n", val
);
1382 ret
= mutex_lock_interruptible(&dev_priv
->psr
.lock
);
1386 old_mode
= dev_priv
->psr
.debug
& I915_PSR_DEBUG_MODE_MASK
;
1387 dev_priv
->psr
.debug
= val
;
1390 * Do it right away if it's already enabled, otherwise it will be done
1391 * when enabling the source.
1393 if (dev_priv
->psr
.enabled
)
1394 psr_irq_control(dev_priv
);
1396 mutex_unlock(&dev_priv
->psr
.lock
);
1398 if (old_mode
!= mode
)
1399 ret
= intel_psr_fastset_force(dev_priv
);
1404 static void intel_psr_handle_irq(struct drm_i915_private
*dev_priv
)
1406 struct i915_psr
*psr
= &dev_priv
->psr
;
1408 intel_psr_disable_locked(psr
->dp
);
1409 psr
->sink_not_reliable
= true;
1410 /* let's make sure that sink is awaken */
1411 drm_dp_dpcd_writeb(&psr
->dp
->aux
, DP_SET_POWER
, DP_SET_POWER_D0
);
1414 static void intel_psr_work(struct work_struct
*work
)
1416 struct drm_i915_private
*dev_priv
=
1417 container_of(work
, typeof(*dev_priv
), psr
.work
);
1419 mutex_lock(&dev_priv
->psr
.lock
);
1421 if (!dev_priv
->psr
.enabled
)
1424 if (READ_ONCE(dev_priv
->psr
.irq_aux_error
))
1425 intel_psr_handle_irq(dev_priv
);
1428 * We have to make sure PSR is ready for re-enable
1429 * otherwise it keeps disabled until next full enable/disable cycle.
1430 * PSR might take some time to get fully disabled
1431 * and be ready for re-enable.
1433 if (!__psr_wait_for_idle_locked(dev_priv
))
1437 * The delayed work can race with an invalidate hence we need to
1438 * recheck. Since psr_flush first clears this and then reschedules we
1439 * won't ever miss a flush when bailing out here.
1441 if (dev_priv
->psr
.busy_frontbuffer_bits
|| dev_priv
->psr
.active
)
1444 intel_psr_activate(dev_priv
->psr
.dp
);
1446 mutex_unlock(&dev_priv
->psr
.lock
);
1450 * intel_psr_invalidate - Invalidade PSR
1451 * @dev_priv: i915 device
1452 * @frontbuffer_bits: frontbuffer plane tracking bits
1453 * @origin: which operation caused the invalidate
1455 * Since the hardware frontbuffer tracking has gaps we need to integrate
1456 * with the software frontbuffer tracking. This function gets called every
1457 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1458 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1460 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1462 void intel_psr_invalidate(struct drm_i915_private
*dev_priv
,
1463 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
1465 if (!CAN_PSR(dev_priv
))
1468 if (origin
== ORIGIN_FLIP
)
1471 mutex_lock(&dev_priv
->psr
.lock
);
1472 if (!dev_priv
->psr
.enabled
) {
1473 mutex_unlock(&dev_priv
->psr
.lock
);
1477 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(dev_priv
->psr
.pipe
);
1478 dev_priv
->psr
.busy_frontbuffer_bits
|= frontbuffer_bits
;
1480 if (frontbuffer_bits
)
1481 intel_psr_exit(dev_priv
);
1483 mutex_unlock(&dev_priv
->psr
.lock
);
1487 * When we will be completely rely on PSR2 S/W tracking in future,
1488 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
1489 * event also therefore tgl_dc3co_flush() require to be changed
1490 * accordingly in future.
1493 tgl_dc3co_flush(struct drm_i915_private
*dev_priv
,
1494 unsigned int frontbuffer_bits
, enum fb_op_origin origin
)
1496 mutex_lock(&dev_priv
->psr
.lock
);
1498 if (!dev_priv
->psr
.dc3co_enabled
)
1501 if (!dev_priv
->psr
.psr2_enabled
|| !dev_priv
->psr
.active
)
1505 * At every frontbuffer flush flip event modified delay of delayed work,
1506 * when delayed work schedules that means display has been idle.
1508 if (!(frontbuffer_bits
&
1509 INTEL_FRONTBUFFER_ALL_MASK(dev_priv
->psr
.pipe
)))
1512 tgl_psr2_enable_dc3co(dev_priv
);
1513 mod_delayed_work(system_wq
, &dev_priv
->psr
.dc3co_work
,
1514 dev_priv
->psr
.dc3co_exit_delay
);
1517 mutex_unlock(&dev_priv
->psr
.lock
);
1521 * intel_psr_flush - Flush PSR
1522 * @dev_priv: i915 device
1523 * @frontbuffer_bits: frontbuffer plane tracking bits
1524 * @origin: which operation caused the flush
1526 * Since the hardware frontbuffer tracking has gaps we need to integrate
1527 * with the software frontbuffer tracking. This function gets called every
1528 * time frontbuffer rendering has completed and flushed out to memory. PSR
1529 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1531 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1533 void intel_psr_flush(struct drm_i915_private
*dev_priv
,
1534 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
1536 if (!CAN_PSR(dev_priv
))
1539 if (origin
== ORIGIN_FLIP
) {
1540 tgl_dc3co_flush(dev_priv
, frontbuffer_bits
, origin
);
1544 mutex_lock(&dev_priv
->psr
.lock
);
1545 if (!dev_priv
->psr
.enabled
) {
1546 mutex_unlock(&dev_priv
->psr
.lock
);
1550 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(dev_priv
->psr
.pipe
);
1551 dev_priv
->psr
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
1553 /* By definition flush = invalidate + flush */
1554 if (frontbuffer_bits
)
1555 psr_force_hw_tracking_exit(dev_priv
);
1557 if (!dev_priv
->psr
.active
&& !dev_priv
->psr
.busy_frontbuffer_bits
)
1558 schedule_work(&dev_priv
->psr
.work
);
1559 mutex_unlock(&dev_priv
->psr
.lock
);
1563 * intel_psr_init - Init basic PSR work and mutex.
1564 * @dev_priv: i915 device private
1566 * This function is called only once at driver load to initialize basic
1569 void intel_psr_init(struct drm_i915_private
*dev_priv
)
1571 if (!HAS_PSR(dev_priv
))
1574 if (!dev_priv
->psr
.sink_support
)
1577 if (IS_HASWELL(dev_priv
))
1579 * HSW don't have PSR registers on the same space as transcoder
1580 * so set this to a value that when subtract to the register
1581 * in transcoder space results in the right offset for HSW
1583 dev_priv
->hsw_psr_mmio_adjust
= _SRD_CTL_EDP
- _HSW_EDP_PSR_BASE
;
1585 if (dev_priv
->params
.enable_psr
== -1)
1586 if (INTEL_GEN(dev_priv
) < 9 || !dev_priv
->vbt
.psr
.enable
)
1587 dev_priv
->params
.enable_psr
= 0;
1589 /* Set link_standby x link_off defaults */
1590 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1591 /* HSW and BDW require workarounds that we don't implement. */
1592 dev_priv
->psr
.link_standby
= false;
1593 else if (INTEL_GEN(dev_priv
) < 12)
1594 /* For new platforms up to TGL let's respect VBT back again */
1595 dev_priv
->psr
.link_standby
= dev_priv
->vbt
.psr
.full_link
;
1597 INIT_WORK(&dev_priv
->psr
.work
, intel_psr_work
);
1598 INIT_DELAYED_WORK(&dev_priv
->psr
.dc3co_work
, tgl_dc3co_disable_work
);
1599 mutex_init(&dev_priv
->psr
.lock
);
1602 static int psr_get_status_and_error_status(struct intel_dp
*intel_dp
,
1603 u8
*status
, u8
*error_status
)
1605 struct drm_dp_aux
*aux
= &intel_dp
->aux
;
1608 ret
= drm_dp_dpcd_readb(aux
, DP_PSR_STATUS
, status
);
1612 ret
= drm_dp_dpcd_readb(aux
, DP_PSR_ERROR_STATUS
, error_status
);
1616 *status
= *status
& DP_PSR_SINK_STATE_MASK
;
1621 static void psr_alpm_check(struct intel_dp
*intel_dp
)
1623 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1624 struct drm_dp_aux
*aux
= &intel_dp
->aux
;
1625 struct i915_psr
*psr
= &dev_priv
->psr
;
1629 if (!psr
->psr2_enabled
)
1632 r
= drm_dp_dpcd_readb(aux
, DP_RECEIVER_ALPM_STATUS
, &val
);
1634 drm_err(&dev_priv
->drm
, "Error reading ALPM status\n");
1638 if (val
& DP_ALPM_LOCK_TIMEOUT_ERROR
) {
1639 intel_psr_disable_locked(intel_dp
);
1640 psr
->sink_not_reliable
= true;
1641 drm_dbg_kms(&dev_priv
->drm
,
1642 "ALPM lock timeout error, disabling PSR\n");
1644 /* Clearing error */
1645 drm_dp_dpcd_writeb(aux
, DP_RECEIVER_ALPM_STATUS
, val
);
1649 static void psr_capability_changed_check(struct intel_dp
*intel_dp
)
1651 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1652 struct i915_psr
*psr
= &dev_priv
->psr
;
1656 r
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_PSR_ESI
, &val
);
1658 drm_err(&dev_priv
->drm
, "Error reading DP_PSR_ESI\n");
1662 if (val
& DP_PSR_CAPS_CHANGE
) {
1663 intel_psr_disable_locked(intel_dp
);
1664 psr
->sink_not_reliable
= true;
1665 drm_dbg_kms(&dev_priv
->drm
,
1666 "Sink PSR capability changed, disabling PSR\n");
1669 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_ESI
, val
);
1673 void intel_psr_short_pulse(struct intel_dp
*intel_dp
)
1675 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1676 struct i915_psr
*psr
= &dev_priv
->psr
;
1677 u8 status
, error_status
;
1678 const u8 errors
= DP_PSR_RFB_STORAGE_ERROR
|
1679 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR
|
1680 DP_PSR_LINK_CRC_ERROR
;
1682 if (!CAN_PSR(dev_priv
) || !intel_dp_is_edp(intel_dp
))
1685 mutex_lock(&psr
->lock
);
1687 if (!psr
->enabled
|| psr
->dp
!= intel_dp
)
1690 if (psr_get_status_and_error_status(intel_dp
, &status
, &error_status
)) {
1691 drm_err(&dev_priv
->drm
,
1692 "Error reading PSR status or error status\n");
1696 if (status
== DP_PSR_SINK_INTERNAL_ERROR
|| (error_status
& errors
)) {
1697 intel_psr_disable_locked(intel_dp
);
1698 psr
->sink_not_reliable
= true;
1701 if (status
== DP_PSR_SINK_INTERNAL_ERROR
&& !error_status
)
1702 drm_dbg_kms(&dev_priv
->drm
,
1703 "PSR sink internal error, disabling PSR\n");
1704 if (error_status
& DP_PSR_RFB_STORAGE_ERROR
)
1705 drm_dbg_kms(&dev_priv
->drm
,
1706 "PSR RFB storage error, disabling PSR\n");
1707 if (error_status
& DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR
)
1708 drm_dbg_kms(&dev_priv
->drm
,
1709 "PSR VSC SDP uncorrectable error, disabling PSR\n");
1710 if (error_status
& DP_PSR_LINK_CRC_ERROR
)
1711 drm_dbg_kms(&dev_priv
->drm
,
1712 "PSR Link CRC error, disabling PSR\n");
1714 if (error_status
& ~errors
)
1715 drm_err(&dev_priv
->drm
,
1716 "PSR_ERROR_STATUS unhandled errors %x\n",
1717 error_status
& ~errors
);
1718 /* clear status register */
1719 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_ERROR_STATUS
, error_status
);
1721 psr_alpm_check(intel_dp
);
1722 psr_capability_changed_check(intel_dp
);
1725 mutex_unlock(&psr
->lock
);
1728 bool intel_psr_enabled(struct intel_dp
*intel_dp
)
1730 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1733 if (!CAN_PSR(dev_priv
) || !intel_dp_is_edp(intel_dp
))
1736 mutex_lock(&dev_priv
->psr
.lock
);
1737 ret
= (dev_priv
->psr
.dp
== intel_dp
&& dev_priv
->psr
.enabled
);
1738 mutex_unlock(&dev_priv
->psr
.lock
);
1743 void intel_psr_atomic_check(struct drm_connector
*connector
,
1744 struct drm_connector_state
*old_state
,
1745 struct drm_connector_state
*new_state
)
1747 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
1748 struct intel_connector
*intel_connector
;
1749 struct intel_digital_port
*dig_port
;
1750 struct drm_crtc_state
*crtc_state
;
1752 if (!CAN_PSR(dev_priv
) || !new_state
->crtc
||
1753 !dev_priv
->psr
.force_mode_changed
)
1756 intel_connector
= to_intel_connector(connector
);
1757 dig_port
= enc_to_dig_port(to_intel_encoder(new_state
->best_encoder
));
1758 if (dev_priv
->psr
.dp
!= &dig_port
->dp
)
1761 crtc_state
= drm_atomic_get_new_crtc_state(new_state
->state
,
1763 crtc_state
->mode_changed
= true;
1766 void intel_psr_set_force_mode_changed(struct intel_dp
*intel_dp
)
1768 struct drm_i915_private
*dev_priv
;
1773 dev_priv
= dp_to_i915(intel_dp
);
1774 if (!CAN_PSR(dev_priv
) || intel_dp
!= dev_priv
->psr
.dp
)
1777 dev_priv
->psr
.force_mode_changed
= true;