2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 static bool is_edp_psr(struct intel_dp
*intel_dp
)
61 return intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
;
64 static bool vlv_is_psr_active_on_pipe(struct drm_device
*dev
, int pipe
)
66 struct drm_i915_private
*dev_priv
= to_i915(dev
);
69 val
= I915_READ(VLV_PSRSTAT(pipe
)) &
70 VLV_EDP_PSR_CURR_STATE_MASK
;
71 return (val
== VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
72 (val
== VLV_EDP_PSR_ACTIVE_SF_UPDATE
);
75 static void intel_psr_write_vsc(struct intel_dp
*intel_dp
,
76 const struct edp_vsc_psr
*vsc_psr
)
78 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
79 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
80 struct drm_i915_private
*dev_priv
= to_i915(dev
);
81 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
82 enum transcoder cpu_transcoder
= crtc
->config
->cpu_transcoder
;
83 i915_reg_t ctl_reg
= HSW_TVIDEO_DIP_CTL(cpu_transcoder
);
84 uint32_t *data
= (uint32_t *) vsc_psr
;
87 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
88 the video DIP being updated before program video DIP data buffer
89 registers for DIP being updated. */
90 I915_WRITE(ctl_reg
, 0);
91 POSTING_READ(ctl_reg
);
93 for (i
= 0; i
< sizeof(*vsc_psr
); i
+= 4) {
94 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder
,
98 for (; i
< VIDEO_DIP_VSC_DATA_SIZE
; i
+= 4)
99 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder
,
102 I915_WRITE(ctl_reg
, VIDEO_DIP_ENABLE_VSC_HSW
);
103 POSTING_READ(ctl_reg
);
106 static void vlv_psr_setup_vsc(struct intel_dp
*intel_dp
,
107 const struct intel_crtc_state
*crtc_state
)
109 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
110 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
113 /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
114 val
= I915_READ(VLV_VSCSDP(crtc
->pipe
));
115 val
&= ~VLV_EDP_PSR_SDP_FREQ_MASK
;
116 val
|= VLV_EDP_PSR_SDP_FREQ_EVFRAME
;
117 I915_WRITE(VLV_VSCSDP(crtc
->pipe
), val
);
120 static void hsw_psr_setup_vsc(struct intel_dp
*intel_dp
,
121 const struct intel_crtc_state
*crtc_state
)
123 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
124 struct drm_i915_private
*dev_priv
= to_i915(intel_dig_port
->base
.base
.dev
);
125 struct edp_vsc_psr psr_vsc
;
127 if (dev_priv
->psr
.psr2_support
) {
128 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
129 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
130 psr_vsc
.sdp_header
.HB0
= 0;
131 psr_vsc
.sdp_header
.HB1
= 0x7;
132 if (dev_priv
->psr
.colorimetry_support
&&
133 dev_priv
->psr
.y_cord_support
) {
134 psr_vsc
.sdp_header
.HB2
= 0x5;
135 psr_vsc
.sdp_header
.HB3
= 0x13;
136 } else if (dev_priv
->psr
.y_cord_support
) {
137 psr_vsc
.sdp_header
.HB2
= 0x4;
138 psr_vsc
.sdp_header
.HB3
= 0xe;
140 psr_vsc
.sdp_header
.HB2
= 0x3;
141 psr_vsc
.sdp_header
.HB3
= 0xc;
144 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
145 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
146 psr_vsc
.sdp_header
.HB0
= 0;
147 psr_vsc
.sdp_header
.HB1
= 0x7;
148 psr_vsc
.sdp_header
.HB2
= 0x2;
149 psr_vsc
.sdp_header
.HB3
= 0x8;
152 intel_psr_write_vsc(intel_dp
, &psr_vsc
);
155 static void vlv_psr_enable_sink(struct intel_dp
*intel_dp
)
157 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
158 DP_PSR_ENABLE
| DP_PSR_MAIN_LINK_ACTIVE
);
161 static i915_reg_t
psr_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
164 if (INTEL_INFO(dev_priv
)->gen
>= 9)
165 return DP_AUX_CH_CTL(port
);
167 return EDP_PSR_AUX_CTL
;
170 static i915_reg_t
psr_aux_data_reg(struct drm_i915_private
*dev_priv
,
171 enum port port
, int index
)
173 if (INTEL_INFO(dev_priv
)->gen
>= 9)
174 return DP_AUX_CH_DATA(port
, index
);
176 return EDP_PSR_AUX_DATA(index
);
179 static void hsw_psr_enable_sink(struct intel_dp
*intel_dp
)
181 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
182 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
183 struct drm_i915_private
*dev_priv
= to_i915(dev
);
184 uint32_t aux_clock_divider
;
185 i915_reg_t aux_ctl_reg
;
186 static const uint8_t aux_msg
[] = {
187 [0] = DP_AUX_NATIVE_WRITE
<< 4,
188 [1] = DP_SET_POWER
>> 8,
189 [2] = DP_SET_POWER
& 0xff,
191 [4] = DP_SET_POWER_D0
,
193 enum port port
= dig_port
->port
;
197 BUILD_BUG_ON(sizeof(aux_msg
) > 20);
199 aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, 0);
201 /* Enable AUX frame sync at sink */
202 if (dev_priv
->psr
.aux_frame_sync
)
203 drm_dp_dpcd_writeb(&intel_dp
->aux
,
204 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF
,
205 DP_AUX_FRAME_SYNC_ENABLE
);
206 /* Enable ALPM at sink for psr2 */
207 if (dev_priv
->psr
.psr2_support
&& dev_priv
->psr
.alpm
)
208 drm_dp_dpcd_writeb(&intel_dp
->aux
,
209 DP_RECEIVER_ALPM_CONFIG
,
211 if (dev_priv
->psr
.link_standby
)
212 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
213 DP_PSR_ENABLE
| DP_PSR_MAIN_LINK_ACTIVE
);
215 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
218 aux_ctl_reg
= psr_aux_ctl_reg(dev_priv
, port
);
220 /* Setup AUX registers */
221 for (i
= 0; i
< sizeof(aux_msg
); i
+= 4)
222 I915_WRITE(psr_aux_data_reg(dev_priv
, port
, i
>> 2),
223 intel_dp_pack_aux(&aux_msg
[i
], sizeof(aux_msg
) - i
));
225 aux_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
, 0, sizeof(aux_msg
),
227 I915_WRITE(aux_ctl_reg
, aux_ctl
);
230 static void vlv_psr_enable_source(struct intel_dp
*intel_dp
,
231 const struct intel_crtc_state
*crtc_state
)
233 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
234 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
235 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
237 /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
238 I915_WRITE(VLV_PSRCTL(crtc
->pipe
),
239 VLV_EDP_PSR_MODE_SW_TIMER
|
240 VLV_EDP_PSR_SRC_TRANSMITTER_STATE
|
244 static void vlv_psr_activate(struct intel_dp
*intel_dp
)
246 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
247 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
248 struct drm_i915_private
*dev_priv
= to_i915(dev
);
249 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
250 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
252 /* Let's do the transition from PSR_state 1 to PSR_state 2
253 * that is PSR transition to active - static frame transmission.
254 * Then Hardware is responsible for the transition to PSR_state 3
255 * that is PSR active - no Remote Frame Buffer (RFB) update.
257 I915_WRITE(VLV_PSRCTL(pipe
), I915_READ(VLV_PSRCTL(pipe
)) |
258 VLV_EDP_PSR_ACTIVE_ENTRY
);
261 static void hsw_activate_psr1(struct intel_dp
*intel_dp
)
263 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
264 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
265 struct drm_i915_private
*dev_priv
= to_i915(dev
);
267 uint32_t max_sleep_time
= 0x1f;
269 * Let's respect VBT in case VBT asks a higher idle_frame value.
270 * Let's use 6 as the minimum to cover all known cases including
271 * the off-by-one issue that HW has in some cases. Also there are
272 * cases where sink should be able to train
273 * with the 5 or 6 idle patterns.
275 uint32_t idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
276 uint32_t val
= EDP_PSR_ENABLE
;
278 val
|= max_sleep_time
<< EDP_PSR_MAX_SLEEP_TIME_SHIFT
;
279 val
|= idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
;
281 if (IS_HASWELL(dev_priv
))
282 val
|= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES
;
284 if (dev_priv
->psr
.link_standby
)
285 val
|= EDP_PSR_LINK_STANDBY
;
287 if (dev_priv
->vbt
.psr
.tp1_wakeup_time
> 5)
288 val
|= EDP_PSR_TP1_TIME_2500us
;
289 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time
> 1)
290 val
|= EDP_PSR_TP1_TIME_500us
;
291 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time
> 0)
292 val
|= EDP_PSR_TP1_TIME_100us
;
294 val
|= EDP_PSR_TP1_TIME_0us
;
296 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 5)
297 val
|= EDP_PSR_TP2_TP3_TIME_2500us
;
298 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 1)
299 val
|= EDP_PSR_TP2_TP3_TIME_500us
;
300 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 0)
301 val
|= EDP_PSR_TP2_TP3_TIME_100us
;
303 val
|= EDP_PSR_TP2_TP3_TIME_0us
;
305 if (intel_dp_source_supports_hbr2(intel_dp
) &&
306 drm_dp_tps3_supported(intel_dp
->dpcd
))
307 val
|= EDP_PSR_TP1_TP3_SEL
;
309 val
|= EDP_PSR_TP1_TP2_SEL
;
311 val
|= I915_READ(EDP_PSR_CTL
) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK
;
312 I915_WRITE(EDP_PSR_CTL
, val
);
315 static void hsw_activate_psr2(struct intel_dp
*intel_dp
)
317 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
318 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
319 struct drm_i915_private
*dev_priv
= to_i915(dev
);
321 * Let's respect VBT in case VBT asks a higher idle_frame value.
322 * Let's use 6 as the minimum to cover all known cases including
323 * the off-by-one issue that HW has in some cases. Also there are
324 * cases where sink should be able to train
325 * with the 5 or 6 idle patterns.
327 uint32_t idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
330 val
= idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
;
332 /* FIXME: selective update is probably totally broken because it doesn't
333 * mesh at all with our frontbuffer tracking. And the hw alone isn't
335 val
|= EDP_PSR2_ENABLE
|
336 EDP_SU_TRACK_ENABLE
|
337 EDP_FRAMES_BEFORE_SU_ENTRY
;
339 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 5)
340 val
|= EDP_PSR2_TP2_TIME_2500
;
341 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 1)
342 val
|= EDP_PSR2_TP2_TIME_500
;
343 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 0)
344 val
|= EDP_PSR2_TP2_TIME_100
;
346 val
|= EDP_PSR2_TP2_TIME_50
;
348 I915_WRITE(EDP_PSR2_CTL
, val
);
351 static void hsw_psr_activate(struct intel_dp
*intel_dp
)
353 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
354 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
355 struct drm_i915_private
*dev_priv
= to_i915(dev
);
357 /* On HSW+ after we enable PSR on source it will activate it
358 * as soon as it match configure idle_frame count. So
359 * we just actually enable it here on activation time.
362 /* psr1 and psr2 are mutually exclusive.*/
363 if (dev_priv
->psr
.psr2_support
)
364 hsw_activate_psr2(intel_dp
);
366 hsw_activate_psr1(intel_dp
);
369 static bool intel_psr_match_conditions(struct intel_dp
*intel_dp
)
371 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
372 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
373 struct drm_i915_private
*dev_priv
= to_i915(dev
);
374 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
375 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
376 const struct drm_display_mode
*adjusted_mode
=
377 &intel_crtc
->config
->base
.adjusted_mode
;
380 lockdep_assert_held(&dev_priv
->psr
.lock
);
381 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
382 WARN_ON(!drm_modeset_is_locked(&crtc
->mutex
));
384 dev_priv
->psr
.source_ok
= false;
387 * HSW spec explicitly says PSR is tied to port A.
388 * BDW+ platforms with DDI implementation of PSR have different
389 * PSR registers per transcoder and we only implement transcoder EDP
390 * ones. Since by Display design transcoder EDP is tied to port A
391 * we can safely escape based on the port A.
393 if (HAS_DDI(dev_priv
) && dig_port
->port
!= PORT_A
) {
394 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
398 if (!i915
.enable_psr
) {
399 DRM_DEBUG_KMS("PSR disable by flag\n");
403 if ((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
404 !dev_priv
->psr
.link_standby
) {
405 DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
409 if (IS_HASWELL(dev_priv
) &&
410 I915_READ(HSW_STEREO_3D_CTL(intel_crtc
->config
->cpu_transcoder
)) &
412 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
416 if (IS_HASWELL(dev_priv
) &&
417 adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
418 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
422 psr_setup_time
= drm_dp_psr_setup_time(intel_dp
->psr_dpcd
);
423 if (psr_setup_time
< 0) {
424 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
425 intel_dp
->psr_dpcd
[1]);
429 if (intel_usecs_to_scanlines(adjusted_mode
, psr_setup_time
) >
430 adjusted_mode
->crtc_vtotal
- adjusted_mode
->crtc_vdisplay
- 1) {
431 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
436 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
437 if (dev_priv
->psr
.psr2_support
&&
438 (intel_crtc
->config
->pipe_src_w
> 3200 ||
439 intel_crtc
->config
->pipe_src_h
> 2000)) {
440 dev_priv
->psr
.psr2_support
= false;
445 * FIXME:enable psr2 only for y-cordinate psr2 panels
446 * After gtc implementation , remove this restriction.
448 if (!dev_priv
->psr
.y_cord_support
&& dev_priv
->psr
.psr2_support
) {
449 DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
453 dev_priv
->psr
.source_ok
= true;
457 static void intel_psr_activate(struct intel_dp
*intel_dp
)
459 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
460 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
461 struct drm_i915_private
*dev_priv
= to_i915(dev
);
463 if (dev_priv
->psr
.psr2_support
)
464 WARN_ON(I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
);
466 WARN_ON(I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
);
467 WARN_ON(dev_priv
->psr
.active
);
468 lockdep_assert_held(&dev_priv
->psr
.lock
);
470 dev_priv
->psr
.activate(intel_dp
);
471 dev_priv
->psr
.active
= true;
474 static void hsw_psr_enable_source(struct intel_dp
*intel_dp
,
475 const struct intel_crtc_state
*crtc_state
)
477 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
478 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
479 struct drm_i915_private
*dev_priv
= to_i915(dev
);
480 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
483 if (dev_priv
->psr
.psr2_support
) {
484 chicken
= PSR2_VSC_ENABLE_PROG_HEADER
;
485 if (dev_priv
->psr
.y_cord_support
)
486 chicken
|= PSR2_ADD_VERTICAL_LINE_COUNT
;
487 I915_WRITE(CHICKEN_TRANS(cpu_transcoder
), chicken
);
489 I915_WRITE(EDP_PSR_DEBUG_CTL
,
490 EDP_PSR_DEBUG_MASK_MEMUP
|
491 EDP_PSR_DEBUG_MASK_HPD
|
492 EDP_PSR_DEBUG_MASK_LPSP
|
493 EDP_PSR_DEBUG_MASK_MAX_SLEEP
|
494 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE
);
497 * Per Spec: Avoid continuous PSR exit by masking MEMUP
498 * and HPD. also mask LPSP to avoid dependency on other
499 * drivers that might block runtime_pm besides
500 * preventing other hw tracking issues now we can rely
501 * on frontbuffer tracking.
503 I915_WRITE(EDP_PSR_DEBUG_CTL
,
504 EDP_PSR_DEBUG_MASK_MEMUP
|
505 EDP_PSR_DEBUG_MASK_HPD
|
506 EDP_PSR_DEBUG_MASK_LPSP
);
511 * intel_psr_enable - Enable PSR
512 * @intel_dp: Intel DP
513 * @crtc_state: new CRTC state
515 * This function can only be called after the pipe is fully trained and enabled.
517 void intel_psr_enable(struct intel_dp
*intel_dp
,
518 const struct intel_crtc_state
*crtc_state
)
520 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
521 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
522 struct drm_i915_private
*dev_priv
= to_i915(dev
);
524 if (!HAS_PSR(dev_priv
))
527 if (!is_edp_psr(intel_dp
)) {
528 DRM_DEBUG_KMS("PSR not supported by this panel\n");
532 mutex_lock(&dev_priv
->psr
.lock
);
533 if (dev_priv
->psr
.enabled
) {
534 DRM_DEBUG_KMS("PSR already in use\n");
538 if (!intel_psr_match_conditions(intel_dp
))
541 dev_priv
->psr
.busy_frontbuffer_bits
= 0;
543 if (HAS_DDI(dev_priv
)) {
545 hsw_psr_setup_vsc(intel_dp
, crtc_state
);
547 /* Enable PSR on the panel */
548 hsw_psr_enable_sink(intel_dp
);
550 hsw_psr_enable_source(intel_dp
, crtc_state
);
552 if (INTEL_GEN(dev_priv
) >= 9)
553 intel_psr_activate(intel_dp
);
555 vlv_psr_setup_vsc(intel_dp
, crtc_state
);
557 /* Enable PSR on the panel */
558 vlv_psr_enable_sink(intel_dp
);
560 vlv_psr_enable_source(intel_dp
, crtc_state
);
564 * FIXME: Activation should happen immediately since this function
565 * is just called after pipe is fully trained and enabled.
566 * However on every platform we face issues when first activation
567 * follows a modeset so quickly.
568 * - On VLV/CHV we get bank screen on first activation
569 * - On HSW/BDW we get a recoverable frozen screen until next
570 * exit-activate sequence.
572 if (INTEL_GEN(dev_priv
) < 9)
573 schedule_delayed_work(&dev_priv
->psr
.work
,
574 msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5));
576 dev_priv
->psr
.enabled
= intel_dp
;
578 mutex_unlock(&dev_priv
->psr
.lock
);
581 static void vlv_psr_disable(struct intel_dp
*intel_dp
,
582 const struct intel_crtc_state
*old_crtc_state
)
584 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
585 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
586 struct drm_i915_private
*dev_priv
= to_i915(dev
);
587 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
590 if (dev_priv
->psr
.active
) {
591 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
592 if (intel_wait_for_register(dev_priv
,
593 VLV_PSRSTAT(crtc
->pipe
),
594 VLV_EDP_PSR_IN_TRANS
,
597 WARN(1, "PSR transition took longer than expected\n");
599 val
= I915_READ(VLV_PSRCTL(crtc
->pipe
));
600 val
&= ~VLV_EDP_PSR_ACTIVE_ENTRY
;
601 val
&= ~VLV_EDP_PSR_ENABLE
;
602 val
&= ~VLV_EDP_PSR_MODE_MASK
;
603 I915_WRITE(VLV_PSRCTL(crtc
->pipe
), val
);
605 dev_priv
->psr
.active
= false;
607 WARN_ON(vlv_is_psr_active_on_pipe(dev
, crtc
->pipe
));
611 static void hsw_psr_disable(struct intel_dp
*intel_dp
,
612 const struct intel_crtc_state
*old_crtc_state
)
614 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
615 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
616 struct drm_i915_private
*dev_priv
= to_i915(dev
);
618 if (dev_priv
->psr
.active
) {
622 if (dev_priv
->psr
.aux_frame_sync
)
623 drm_dp_dpcd_writeb(&intel_dp
->aux
,
624 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF
,
627 if (dev_priv
->psr
.psr2_support
) {
628 psr_ctl
= EDP_PSR2_CTL
;
629 psr_status_mask
= EDP_PSR2_STATUS_STATE_MASK
;
633 ~(EDP_PSR2_ENABLE
| EDP_SU_TRACK_ENABLE
));
636 psr_ctl
= EDP_PSR_STATUS_CTL
;
637 psr_status_mask
= EDP_PSR_STATUS_STATE_MASK
;
640 I915_READ(psr_ctl
) & ~EDP_PSR_ENABLE
);
643 /* Wait till PSR is idle */
644 if (intel_wait_for_register(dev_priv
,
645 psr_ctl
, psr_status_mask
, 0,
647 DRM_ERROR("Timed out waiting for PSR Idle State\n");
649 dev_priv
->psr
.active
= false;
651 if (dev_priv
->psr
.psr2_support
)
652 WARN_ON(I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
);
654 WARN_ON(I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
);
659 * intel_psr_disable - Disable PSR
660 * @intel_dp: Intel DP
661 * @old_crtc_state: old CRTC state
663 * This function needs to be called before disabling pipe.
665 void intel_psr_disable(struct intel_dp
*intel_dp
,
666 const struct intel_crtc_state
*old_crtc_state
)
668 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
669 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
670 struct drm_i915_private
*dev_priv
= to_i915(dev
);
672 if (!HAS_PSR(dev_priv
))
675 mutex_lock(&dev_priv
->psr
.lock
);
676 if (!dev_priv
->psr
.enabled
) {
677 mutex_unlock(&dev_priv
->psr
.lock
);
681 dev_priv
->psr
.disable_source(intel_dp
, old_crtc_state
);
683 /* Disable PSR on Sink */
684 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, 0);
686 dev_priv
->psr
.enabled
= NULL
;
687 mutex_unlock(&dev_priv
->psr
.lock
);
689 cancel_delayed_work_sync(&dev_priv
->psr
.work
);
692 static void intel_psr_work(struct work_struct
*work
)
694 struct drm_i915_private
*dev_priv
=
695 container_of(work
, typeof(*dev_priv
), psr
.work
.work
);
696 struct intel_dp
*intel_dp
= dev_priv
->psr
.enabled
;
697 struct drm_crtc
*crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
698 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
700 /* We have to make sure PSR is ready for re-enable
701 * otherwise it keeps disabled until next full enable/disable cycle.
702 * PSR might take some time to get fully disabled
703 * and be ready for re-enable.
705 if (HAS_DDI(dev_priv
)) {
706 if (dev_priv
->psr
.psr2_support
) {
707 if (intel_wait_for_register(dev_priv
,
709 EDP_PSR2_STATUS_STATE_MASK
,
712 DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
716 if (intel_wait_for_register(dev_priv
,
718 EDP_PSR_STATUS_STATE_MASK
,
721 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
726 if (intel_wait_for_register(dev_priv
,
728 VLV_EDP_PSR_IN_TRANS
,
731 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
735 mutex_lock(&dev_priv
->psr
.lock
);
736 intel_dp
= dev_priv
->psr
.enabled
;
742 * The delayed work can race with an invalidate hence we need to
743 * recheck. Since psr_flush first clears this and then reschedules we
744 * won't ever miss a flush when bailing out here.
746 if (dev_priv
->psr
.busy_frontbuffer_bits
)
749 intel_psr_activate(intel_dp
);
751 mutex_unlock(&dev_priv
->psr
.lock
);
754 static void intel_psr_exit(struct drm_i915_private
*dev_priv
)
756 struct intel_dp
*intel_dp
= dev_priv
->psr
.enabled
;
757 struct drm_crtc
*crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
758 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
761 if (!dev_priv
->psr
.active
)
764 if (HAS_DDI(dev_priv
)) {
765 if (dev_priv
->psr
.aux_frame_sync
)
766 drm_dp_dpcd_writeb(&intel_dp
->aux
,
767 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF
,
769 if (dev_priv
->psr
.psr2_support
) {
770 val
= I915_READ(EDP_PSR2_CTL
);
771 WARN_ON(!(val
& EDP_PSR2_ENABLE
));
772 I915_WRITE(EDP_PSR2_CTL
, val
& ~EDP_PSR2_ENABLE
);
774 val
= I915_READ(EDP_PSR_CTL
);
775 WARN_ON(!(val
& EDP_PSR_ENABLE
));
776 I915_WRITE(EDP_PSR_CTL
, val
& ~EDP_PSR_ENABLE
);
779 val
= I915_READ(VLV_PSRCTL(pipe
));
781 /* Here we do the transition from PSR_state 3 to PSR_state 5
782 * directly once PSR State 4 that is active with single frame
783 * update can be skipped. PSR_state 5 that is PSR exit then
784 * Hardware is responsible to transition back to PSR_state 1
785 * that is PSR inactive. Same state after
786 * vlv_edp_psr_enable_source.
788 val
&= ~VLV_EDP_PSR_ACTIVE_ENTRY
;
789 I915_WRITE(VLV_PSRCTL(pipe
), val
);
791 /* Send AUX wake up - Spec says after transitioning to PSR
792 * active we have to send AUX wake up by writing 01h in DPCD
793 * 600h of sink device.
794 * XXX: This might slow down the transition, but without this
795 * HW doesn't complete the transition to PSR_state 1 and we
796 * never get the screen updated.
798 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
802 dev_priv
->psr
.active
= false;
806 * intel_psr_single_frame_update - Single Frame Update
807 * @dev_priv: i915 device
808 * @frontbuffer_bits: frontbuffer plane tracking bits
810 * Some platforms support a single frame update feature that is used to
811 * send and update only one frame on Remote Frame Buffer.
812 * So far it is only implemented for Valleyview and Cherryview because
813 * hardware requires this to be done before a page flip.
815 void intel_psr_single_frame_update(struct drm_i915_private
*dev_priv
,
816 unsigned frontbuffer_bits
)
818 struct drm_crtc
*crtc
;
822 if (!HAS_PSR(dev_priv
))
826 * Single frame update is already supported on BDW+ but it requires
827 * many W/A and it isn't really needed.
829 if (!IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
))
832 mutex_lock(&dev_priv
->psr
.lock
);
833 if (!dev_priv
->psr
.enabled
) {
834 mutex_unlock(&dev_priv
->psr
.lock
);
838 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
839 pipe
= to_intel_crtc(crtc
)->pipe
;
841 if (frontbuffer_bits
& INTEL_FRONTBUFFER_ALL_MASK(pipe
)) {
842 val
= I915_READ(VLV_PSRCTL(pipe
));
845 * We need to set this bit before writing registers for a flip.
846 * This bit will be self-clear when it gets to the PSR active state.
848 I915_WRITE(VLV_PSRCTL(pipe
), val
| VLV_EDP_PSR_SINGLE_FRAME_UPDATE
);
850 mutex_unlock(&dev_priv
->psr
.lock
);
854 * intel_psr_invalidate - Invalidade PSR
855 * @dev_priv: i915 device
856 * @frontbuffer_bits: frontbuffer plane tracking bits
858 * Since the hardware frontbuffer tracking has gaps we need to integrate
859 * with the software frontbuffer tracking. This function gets called every
860 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
861 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
863 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
865 void intel_psr_invalidate(struct drm_i915_private
*dev_priv
,
866 unsigned frontbuffer_bits
)
868 struct drm_crtc
*crtc
;
871 if (!HAS_PSR(dev_priv
))
874 mutex_lock(&dev_priv
->psr
.lock
);
875 if (!dev_priv
->psr
.enabled
) {
876 mutex_unlock(&dev_priv
->psr
.lock
);
880 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
881 pipe
= to_intel_crtc(crtc
)->pipe
;
883 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
884 dev_priv
->psr
.busy_frontbuffer_bits
|= frontbuffer_bits
;
886 if (frontbuffer_bits
)
887 intel_psr_exit(dev_priv
);
889 mutex_unlock(&dev_priv
->psr
.lock
);
893 * intel_psr_flush - Flush PSR
894 * @dev_priv: i915 device
895 * @frontbuffer_bits: frontbuffer plane tracking bits
896 * @origin: which operation caused the flush
898 * Since the hardware frontbuffer tracking has gaps we need to integrate
899 * with the software frontbuffer tracking. This function gets called every
900 * time frontbuffer rendering has completed and flushed out to memory. PSR
901 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
903 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
905 void intel_psr_flush(struct drm_i915_private
*dev_priv
,
906 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
908 struct drm_crtc
*crtc
;
911 if (!HAS_PSR(dev_priv
))
914 mutex_lock(&dev_priv
->psr
.lock
);
915 if (!dev_priv
->psr
.enabled
) {
916 mutex_unlock(&dev_priv
->psr
.lock
);
920 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
921 pipe
= to_intel_crtc(crtc
)->pipe
;
923 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
924 dev_priv
->psr
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
926 /* By definition flush = invalidate + flush */
927 if (frontbuffer_bits
)
928 intel_psr_exit(dev_priv
);
930 if (!dev_priv
->psr
.active
&& !dev_priv
->psr
.busy_frontbuffer_bits
)
931 if (!work_busy(&dev_priv
->psr
.work
.work
))
932 schedule_delayed_work(&dev_priv
->psr
.work
,
933 msecs_to_jiffies(100));
934 mutex_unlock(&dev_priv
->psr
.lock
);
938 * intel_psr_init - Init basic PSR work and mutex.
939 * @dev_priv: i915 device private
941 * This function is called only once at driver load to initialize basic
944 void intel_psr_init(struct drm_i915_private
*dev_priv
)
946 if (!HAS_PSR(dev_priv
))
949 dev_priv
->psr_mmio_base
= IS_HASWELL(dev_priv
) ?
950 HSW_EDP_PSR_BASE
: BDW_EDP_PSR_BASE
;
952 /* Per platform default: all disabled. */
953 if (i915
.enable_psr
== -1)
956 /* Set link_standby x link_off defaults */
957 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
958 /* HSW and BDW require workarounds that we don't implement. */
959 dev_priv
->psr
.link_standby
= false;
960 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
961 /* On VLV and CHV only standby mode is supported. */
962 dev_priv
->psr
.link_standby
= true;
964 /* For new platforms let's respect VBT back again */
965 dev_priv
->psr
.link_standby
= dev_priv
->vbt
.psr
.full_link
;
967 /* Override link_standby x link_off defaults */
968 if (i915
.enable_psr
== 2 && !dev_priv
->psr
.link_standby
) {
969 DRM_DEBUG_KMS("PSR: Forcing link standby\n");
970 dev_priv
->psr
.link_standby
= true;
972 if (i915
.enable_psr
== 3 && dev_priv
->psr
.link_standby
) {
973 DRM_DEBUG_KMS("PSR: Forcing main link off\n");
974 dev_priv
->psr
.link_standby
= false;
977 INIT_DELAYED_WORK(&dev_priv
->psr
.work
, intel_psr_work
);
978 mutex_init(&dev_priv
->psr
.lock
);
980 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
981 dev_priv
->psr
.disable_source
= vlv_psr_disable
;
982 dev_priv
->psr
.activate
= vlv_psr_activate
;
984 dev_priv
->psr
.disable_source
= hsw_psr_disable
;
985 dev_priv
->psr
.activate
= hsw_psr_activate
;