2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 static bool is_edp_psr(struct intel_dp
*intel_dp
)
61 return intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
;
64 static bool vlv_is_psr_active_on_pipe(struct drm_device
*dev
, int pipe
)
66 struct drm_i915_private
*dev_priv
= to_i915(dev
);
69 val
= I915_READ(VLV_PSRSTAT(pipe
)) &
70 VLV_EDP_PSR_CURR_STATE_MASK
;
71 return (val
== VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
72 (val
== VLV_EDP_PSR_ACTIVE_SF_UPDATE
);
75 static void intel_psr_write_vsc(struct intel_dp
*intel_dp
,
76 const struct edp_vsc_psr
*vsc_psr
)
78 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
79 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
80 struct drm_i915_private
*dev_priv
= to_i915(dev
);
81 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
82 enum transcoder cpu_transcoder
= crtc
->config
->cpu_transcoder
;
83 i915_reg_t ctl_reg
= HSW_TVIDEO_DIP_CTL(cpu_transcoder
);
84 uint32_t *data
= (uint32_t *) vsc_psr
;
87 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
88 the video DIP being updated before program video DIP data buffer
89 registers for DIP being updated. */
90 I915_WRITE(ctl_reg
, 0);
91 POSTING_READ(ctl_reg
);
93 for (i
= 0; i
< sizeof(*vsc_psr
); i
+= 4) {
94 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder
,
98 for (; i
< VIDEO_DIP_VSC_DATA_SIZE
; i
+= 4)
99 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder
,
102 I915_WRITE(ctl_reg
, VIDEO_DIP_ENABLE_VSC_HSW
);
103 POSTING_READ(ctl_reg
);
106 static void vlv_psr_setup_vsc(struct intel_dp
*intel_dp
,
107 const struct intel_crtc_state
*crtc_state
)
109 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
110 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
113 /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
114 val
= I915_READ(VLV_VSCSDP(crtc
->pipe
));
115 val
&= ~VLV_EDP_PSR_SDP_FREQ_MASK
;
116 val
|= VLV_EDP_PSR_SDP_FREQ_EVFRAME
;
117 I915_WRITE(VLV_VSCSDP(crtc
->pipe
), val
);
120 static void hsw_psr_setup_vsc(struct intel_dp
*intel_dp
,
121 const struct intel_crtc_state
*crtc_state
)
123 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
124 struct drm_i915_private
*dev_priv
= to_i915(intel_dig_port
->base
.base
.dev
);
125 struct edp_vsc_psr psr_vsc
;
127 if (dev_priv
->psr
.psr2_support
) {
128 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
129 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
130 psr_vsc
.sdp_header
.HB0
= 0;
131 psr_vsc
.sdp_header
.HB1
= 0x7;
132 if (dev_priv
->psr
.colorimetry_support
&&
133 dev_priv
->psr
.y_cord_support
) {
134 psr_vsc
.sdp_header
.HB2
= 0x5;
135 psr_vsc
.sdp_header
.HB3
= 0x13;
136 } else if (dev_priv
->psr
.y_cord_support
) {
137 psr_vsc
.sdp_header
.HB2
= 0x4;
138 psr_vsc
.sdp_header
.HB3
= 0xe;
140 psr_vsc
.sdp_header
.HB2
= 0x3;
141 psr_vsc
.sdp_header
.HB3
= 0xc;
144 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
145 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
146 psr_vsc
.sdp_header
.HB0
= 0;
147 psr_vsc
.sdp_header
.HB1
= 0x7;
148 psr_vsc
.sdp_header
.HB2
= 0x2;
149 psr_vsc
.sdp_header
.HB3
= 0x8;
152 intel_psr_write_vsc(intel_dp
, &psr_vsc
);
155 static void vlv_psr_enable_sink(struct intel_dp
*intel_dp
)
157 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
158 DP_PSR_ENABLE
| DP_PSR_MAIN_LINK_ACTIVE
);
161 static i915_reg_t
psr_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
164 if (INTEL_INFO(dev_priv
)->gen
>= 9)
165 return DP_AUX_CH_CTL(port
);
167 return EDP_PSR_AUX_CTL
;
170 static i915_reg_t
psr_aux_data_reg(struct drm_i915_private
*dev_priv
,
171 enum port port
, int index
)
173 if (INTEL_INFO(dev_priv
)->gen
>= 9)
174 return DP_AUX_CH_DATA(port
, index
);
176 return EDP_PSR_AUX_DATA(index
);
179 static void hsw_psr_enable_sink(struct intel_dp
*intel_dp
)
181 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
182 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
183 struct drm_i915_private
*dev_priv
= to_i915(dev
);
184 uint32_t aux_clock_divider
;
185 i915_reg_t aux_ctl_reg
;
186 static const uint8_t aux_msg
[] = {
187 [0] = DP_AUX_NATIVE_WRITE
<< 4,
188 [1] = DP_SET_POWER
>> 8,
189 [2] = DP_SET_POWER
& 0xff,
191 [4] = DP_SET_POWER_D0
,
193 enum port port
= dig_port
->port
;
197 BUILD_BUG_ON(sizeof(aux_msg
) > 20);
199 aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, 0);
201 /* Enable AUX frame sync at sink */
202 if (dev_priv
->psr
.aux_frame_sync
)
203 drm_dp_dpcd_writeb(&intel_dp
->aux
,
204 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF
,
205 DP_AUX_FRAME_SYNC_ENABLE
);
206 /* Enable ALPM at sink for psr2 */
207 if (dev_priv
->psr
.psr2_support
&& dev_priv
->psr
.alpm
)
208 drm_dp_dpcd_writeb(&intel_dp
->aux
,
209 DP_RECEIVER_ALPM_CONFIG
,
211 if (dev_priv
->psr
.link_standby
)
212 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
213 DP_PSR_ENABLE
| DP_PSR_MAIN_LINK_ACTIVE
);
215 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
,
218 aux_ctl_reg
= psr_aux_ctl_reg(dev_priv
, port
);
220 /* Setup AUX registers */
221 for (i
= 0; i
< sizeof(aux_msg
); i
+= 4)
222 I915_WRITE(psr_aux_data_reg(dev_priv
, port
, i
>> 2),
223 intel_dp_pack_aux(&aux_msg
[i
], sizeof(aux_msg
) - i
));
225 aux_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
, 0, sizeof(aux_msg
),
227 I915_WRITE(aux_ctl_reg
, aux_ctl
);
230 static void vlv_psr_enable_source(struct intel_dp
*intel_dp
,
231 const struct intel_crtc_state
*crtc_state
)
233 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
234 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
235 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
237 /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
238 I915_WRITE(VLV_PSRCTL(crtc
->pipe
),
239 VLV_EDP_PSR_MODE_SW_TIMER
|
240 VLV_EDP_PSR_SRC_TRANSMITTER_STATE
|
244 static void vlv_psr_activate(struct intel_dp
*intel_dp
)
246 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
247 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
248 struct drm_i915_private
*dev_priv
= to_i915(dev
);
249 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
250 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
253 * Let's do the transition from PSR_state 1 (inactive) to
254 * PSR_state 2 (transition to active - static frame transmission).
255 * Then Hardware is responsible for the transition to
256 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
258 I915_WRITE(VLV_PSRCTL(pipe
), I915_READ(VLV_PSRCTL(pipe
)) |
259 VLV_EDP_PSR_ACTIVE_ENTRY
);
262 static void hsw_activate_psr1(struct intel_dp
*intel_dp
)
264 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
265 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
266 struct drm_i915_private
*dev_priv
= to_i915(dev
);
268 uint32_t max_sleep_time
= 0x1f;
270 * Let's respect VBT in case VBT asks a higher idle_frame value.
271 * Let's use 6 as the minimum to cover all known cases including
272 * the off-by-one issue that HW has in some cases. Also there are
273 * cases where sink should be able to train
274 * with the 5 or 6 idle patterns.
276 uint32_t idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
277 uint32_t val
= EDP_PSR_ENABLE
;
279 val
|= max_sleep_time
<< EDP_PSR_MAX_SLEEP_TIME_SHIFT
;
280 val
|= idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
;
282 if (IS_HASWELL(dev_priv
))
283 val
|= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES
;
285 if (dev_priv
->psr
.link_standby
)
286 val
|= EDP_PSR_LINK_STANDBY
;
288 if (dev_priv
->vbt
.psr
.tp1_wakeup_time
> 5)
289 val
|= EDP_PSR_TP1_TIME_2500us
;
290 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time
> 1)
291 val
|= EDP_PSR_TP1_TIME_500us
;
292 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time
> 0)
293 val
|= EDP_PSR_TP1_TIME_100us
;
295 val
|= EDP_PSR_TP1_TIME_0us
;
297 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 5)
298 val
|= EDP_PSR_TP2_TP3_TIME_2500us
;
299 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 1)
300 val
|= EDP_PSR_TP2_TP3_TIME_500us
;
301 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 0)
302 val
|= EDP_PSR_TP2_TP3_TIME_100us
;
304 val
|= EDP_PSR_TP2_TP3_TIME_0us
;
306 if (intel_dp_source_supports_hbr2(intel_dp
) &&
307 drm_dp_tps3_supported(intel_dp
->dpcd
))
308 val
|= EDP_PSR_TP1_TP3_SEL
;
310 val
|= EDP_PSR_TP1_TP2_SEL
;
312 val
|= I915_READ(EDP_PSR_CTL
) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK
;
313 I915_WRITE(EDP_PSR_CTL
, val
);
316 static void hsw_activate_psr2(struct intel_dp
*intel_dp
)
318 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
319 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
320 struct drm_i915_private
*dev_priv
= to_i915(dev
);
322 * Let's respect VBT in case VBT asks a higher idle_frame value.
323 * Let's use 6 as the minimum to cover all known cases including
324 * the off-by-one issue that HW has in some cases. Also there are
325 * cases where sink should be able to train
326 * with the 5 or 6 idle patterns.
328 uint32_t idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
331 val
= idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
;
333 /* FIXME: selective update is probably totally broken because it doesn't
334 * mesh at all with our frontbuffer tracking. And the hw alone isn't
336 val
|= EDP_PSR2_ENABLE
|
337 EDP_SU_TRACK_ENABLE
|
338 EDP_FRAMES_BEFORE_SU_ENTRY
;
340 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 5)
341 val
|= EDP_PSR2_TP2_TIME_2500
;
342 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 1)
343 val
|= EDP_PSR2_TP2_TIME_500
;
344 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time
> 0)
345 val
|= EDP_PSR2_TP2_TIME_100
;
347 val
|= EDP_PSR2_TP2_TIME_50
;
349 I915_WRITE(EDP_PSR2_CTL
, val
);
352 static void hsw_psr_activate(struct intel_dp
*intel_dp
)
354 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
355 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
356 struct drm_i915_private
*dev_priv
= to_i915(dev
);
358 /* On HSW+ after we enable PSR on source it will activate it
359 * as soon as it match configure idle_frame count. So
360 * we just actually enable it here on activation time.
363 /* psr1 and psr2 are mutually exclusive.*/
364 if (dev_priv
->psr
.psr2_support
)
365 hsw_activate_psr2(intel_dp
);
367 hsw_activate_psr1(intel_dp
);
370 static bool intel_psr_match_conditions(struct intel_dp
*intel_dp
)
372 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
373 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
374 struct drm_i915_private
*dev_priv
= to_i915(dev
);
375 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
376 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
377 const struct drm_display_mode
*adjusted_mode
=
378 &intel_crtc
->config
->base
.adjusted_mode
;
381 lockdep_assert_held(&dev_priv
->psr
.lock
);
382 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
383 WARN_ON(!drm_modeset_is_locked(&crtc
->mutex
));
385 dev_priv
->psr
.source_ok
= false;
388 * HSW spec explicitly says PSR is tied to port A.
389 * BDW+ platforms with DDI implementation of PSR have different
390 * PSR registers per transcoder and we only implement transcoder EDP
391 * ones. Since by Display design transcoder EDP is tied to port A
392 * we can safely escape based on the port A.
394 if (HAS_DDI(dev_priv
) && dig_port
->port
!= PORT_A
) {
395 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
399 if (!i915
.enable_psr
) {
400 DRM_DEBUG_KMS("PSR disable by flag\n");
404 if ((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
405 !dev_priv
->psr
.link_standby
) {
406 DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
410 if (IS_HASWELL(dev_priv
) &&
411 I915_READ(HSW_STEREO_3D_CTL(intel_crtc
->config
->cpu_transcoder
)) &
413 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
417 if (IS_HASWELL(dev_priv
) &&
418 adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
419 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
423 psr_setup_time
= drm_dp_psr_setup_time(intel_dp
->psr_dpcd
);
424 if (psr_setup_time
< 0) {
425 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
426 intel_dp
->psr_dpcd
[1]);
430 if (intel_usecs_to_scanlines(adjusted_mode
, psr_setup_time
) >
431 adjusted_mode
->crtc_vtotal
- adjusted_mode
->crtc_vdisplay
- 1) {
432 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
437 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
438 if (dev_priv
->psr
.psr2_support
&&
439 (intel_crtc
->config
->pipe_src_w
> 3200 ||
440 intel_crtc
->config
->pipe_src_h
> 2000)) {
441 dev_priv
->psr
.psr2_support
= false;
446 * FIXME:enable psr2 only for y-cordinate psr2 panels
447 * After gtc implementation , remove this restriction.
449 if (!dev_priv
->psr
.y_cord_support
&& dev_priv
->psr
.psr2_support
) {
450 DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
454 dev_priv
->psr
.source_ok
= true;
458 static void intel_psr_activate(struct intel_dp
*intel_dp
)
460 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
461 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
462 struct drm_i915_private
*dev_priv
= to_i915(dev
);
464 if (dev_priv
->psr
.psr2_support
)
465 WARN_ON(I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
);
467 WARN_ON(I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
);
468 WARN_ON(dev_priv
->psr
.active
);
469 lockdep_assert_held(&dev_priv
->psr
.lock
);
471 dev_priv
->psr
.activate(intel_dp
);
472 dev_priv
->psr
.active
= true;
475 static void hsw_psr_enable_source(struct intel_dp
*intel_dp
,
476 const struct intel_crtc_state
*crtc_state
)
478 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
479 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
480 struct drm_i915_private
*dev_priv
= to_i915(dev
);
481 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
484 if (dev_priv
->psr
.psr2_support
) {
485 chicken
= PSR2_VSC_ENABLE_PROG_HEADER
;
486 if (dev_priv
->psr
.y_cord_support
)
487 chicken
|= PSR2_ADD_VERTICAL_LINE_COUNT
;
488 I915_WRITE(CHICKEN_TRANS(cpu_transcoder
), chicken
);
490 I915_WRITE(EDP_PSR_DEBUG_CTL
,
491 EDP_PSR_DEBUG_MASK_MEMUP
|
492 EDP_PSR_DEBUG_MASK_HPD
|
493 EDP_PSR_DEBUG_MASK_LPSP
|
494 EDP_PSR_DEBUG_MASK_MAX_SLEEP
|
495 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE
);
498 * Per Spec: Avoid continuous PSR exit by masking MEMUP
499 * and HPD. also mask LPSP to avoid dependency on other
500 * drivers that might block runtime_pm besides
501 * preventing other hw tracking issues now we can rely
502 * on frontbuffer tracking.
504 I915_WRITE(EDP_PSR_DEBUG_CTL
,
505 EDP_PSR_DEBUG_MASK_MEMUP
|
506 EDP_PSR_DEBUG_MASK_HPD
|
507 EDP_PSR_DEBUG_MASK_LPSP
);
512 * intel_psr_enable - Enable PSR
513 * @intel_dp: Intel DP
514 * @crtc_state: new CRTC state
516 * This function can only be called after the pipe is fully trained and enabled.
518 void intel_psr_enable(struct intel_dp
*intel_dp
,
519 const struct intel_crtc_state
*crtc_state
)
521 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
522 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
523 struct drm_i915_private
*dev_priv
= to_i915(dev
);
525 if (!HAS_PSR(dev_priv
))
528 if (!is_edp_psr(intel_dp
)) {
529 DRM_DEBUG_KMS("PSR not supported by this panel\n");
533 mutex_lock(&dev_priv
->psr
.lock
);
534 if (dev_priv
->psr
.enabled
) {
535 DRM_DEBUG_KMS("PSR already in use\n");
539 if (!intel_psr_match_conditions(intel_dp
))
542 dev_priv
->psr
.busy_frontbuffer_bits
= 0;
544 dev_priv
->psr
.setup_vsc(intel_dp
, crtc_state
);
545 dev_priv
->psr
.enable_sink(intel_dp
);
546 dev_priv
->psr
.enable_source(intel_dp
, crtc_state
);
547 dev_priv
->psr
.enabled
= intel_dp
;
549 if (INTEL_GEN(dev_priv
) >= 9) {
550 intel_psr_activate(intel_dp
);
553 * FIXME: Activation should happen immediately since this
554 * function is just called after pipe is fully trained and
556 * However on some platforms we face issues when first
557 * activation follows a modeset so quickly.
558 * - On VLV/CHV we get bank screen on first activation
559 * - On HSW/BDW we get a recoverable frozen screen until
560 * next exit-activate sequence.
562 schedule_delayed_work(&dev_priv
->psr
.work
,
563 msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5));
567 mutex_unlock(&dev_priv
->psr
.lock
);
570 static void vlv_psr_disable(struct intel_dp
*intel_dp
,
571 const struct intel_crtc_state
*old_crtc_state
)
573 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
574 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
575 struct drm_i915_private
*dev_priv
= to_i915(dev
);
576 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
579 if (dev_priv
->psr
.active
) {
580 /* Put VLV PSR back to PSR_state 0 (disabled). */
581 if (intel_wait_for_register(dev_priv
,
582 VLV_PSRSTAT(crtc
->pipe
),
583 VLV_EDP_PSR_IN_TRANS
,
586 WARN(1, "PSR transition took longer than expected\n");
588 val
= I915_READ(VLV_PSRCTL(crtc
->pipe
));
589 val
&= ~VLV_EDP_PSR_ACTIVE_ENTRY
;
590 val
&= ~VLV_EDP_PSR_ENABLE
;
591 val
&= ~VLV_EDP_PSR_MODE_MASK
;
592 I915_WRITE(VLV_PSRCTL(crtc
->pipe
), val
);
594 dev_priv
->psr
.active
= false;
596 WARN_ON(vlv_is_psr_active_on_pipe(dev
, crtc
->pipe
));
600 static void hsw_psr_disable(struct intel_dp
*intel_dp
,
601 const struct intel_crtc_state
*old_crtc_state
)
603 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
604 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
605 struct drm_i915_private
*dev_priv
= to_i915(dev
);
607 if (dev_priv
->psr
.active
) {
611 if (dev_priv
->psr
.aux_frame_sync
)
612 drm_dp_dpcd_writeb(&intel_dp
->aux
,
613 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF
,
616 if (dev_priv
->psr
.psr2_support
) {
617 psr_ctl
= EDP_PSR2_CTL
;
618 psr_status_mask
= EDP_PSR2_STATUS_STATE_MASK
;
622 ~(EDP_PSR2_ENABLE
| EDP_SU_TRACK_ENABLE
));
625 psr_ctl
= EDP_PSR_STATUS_CTL
;
626 psr_status_mask
= EDP_PSR_STATUS_STATE_MASK
;
629 I915_READ(psr_ctl
) & ~EDP_PSR_ENABLE
);
632 /* Wait till PSR is idle */
633 if (intel_wait_for_register(dev_priv
,
634 psr_ctl
, psr_status_mask
, 0,
636 DRM_ERROR("Timed out waiting for PSR Idle State\n");
638 dev_priv
->psr
.active
= false;
640 if (dev_priv
->psr
.psr2_support
)
641 WARN_ON(I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
);
643 WARN_ON(I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
);
648 * intel_psr_disable - Disable PSR
649 * @intel_dp: Intel DP
650 * @old_crtc_state: old CRTC state
652 * This function needs to be called before disabling pipe.
654 void intel_psr_disable(struct intel_dp
*intel_dp
,
655 const struct intel_crtc_state
*old_crtc_state
)
657 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
658 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
659 struct drm_i915_private
*dev_priv
= to_i915(dev
);
661 if (!HAS_PSR(dev_priv
))
664 mutex_lock(&dev_priv
->psr
.lock
);
665 if (!dev_priv
->psr
.enabled
) {
666 mutex_unlock(&dev_priv
->psr
.lock
);
670 dev_priv
->psr
.disable_source(intel_dp
, old_crtc_state
);
672 /* Disable PSR on Sink */
673 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, 0);
675 dev_priv
->psr
.enabled
= NULL
;
676 mutex_unlock(&dev_priv
->psr
.lock
);
678 cancel_delayed_work_sync(&dev_priv
->psr
.work
);
681 static void intel_psr_work(struct work_struct
*work
)
683 struct drm_i915_private
*dev_priv
=
684 container_of(work
, typeof(*dev_priv
), psr
.work
.work
);
685 struct intel_dp
*intel_dp
= dev_priv
->psr
.enabled
;
686 struct drm_crtc
*crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
687 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
689 /* We have to make sure PSR is ready for re-enable
690 * otherwise it keeps disabled until next full enable/disable cycle.
691 * PSR might take some time to get fully disabled
692 * and be ready for re-enable.
694 if (HAS_DDI(dev_priv
)) {
695 if (dev_priv
->psr
.psr2_support
) {
696 if (intel_wait_for_register(dev_priv
,
698 EDP_PSR2_STATUS_STATE_MASK
,
701 DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
705 if (intel_wait_for_register(dev_priv
,
707 EDP_PSR_STATUS_STATE_MASK
,
710 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
715 if (intel_wait_for_register(dev_priv
,
717 VLV_EDP_PSR_IN_TRANS
,
720 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
724 mutex_lock(&dev_priv
->psr
.lock
);
725 intel_dp
= dev_priv
->psr
.enabled
;
731 * The delayed work can race with an invalidate hence we need to
732 * recheck. Since psr_flush first clears this and then reschedules we
733 * won't ever miss a flush when bailing out here.
735 if (dev_priv
->psr
.busy_frontbuffer_bits
)
738 intel_psr_activate(intel_dp
);
740 mutex_unlock(&dev_priv
->psr
.lock
);
743 static void intel_psr_exit(struct drm_i915_private
*dev_priv
)
745 struct intel_dp
*intel_dp
= dev_priv
->psr
.enabled
;
746 struct drm_crtc
*crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
747 enum pipe pipe
= to_intel_crtc(crtc
)->pipe
;
750 if (!dev_priv
->psr
.active
)
753 if (HAS_DDI(dev_priv
)) {
754 if (dev_priv
->psr
.aux_frame_sync
)
755 drm_dp_dpcd_writeb(&intel_dp
->aux
,
756 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF
,
758 if (dev_priv
->psr
.psr2_support
) {
759 val
= I915_READ(EDP_PSR2_CTL
);
760 WARN_ON(!(val
& EDP_PSR2_ENABLE
));
761 I915_WRITE(EDP_PSR2_CTL
, val
& ~EDP_PSR2_ENABLE
);
763 val
= I915_READ(EDP_PSR_CTL
);
764 WARN_ON(!(val
& EDP_PSR_ENABLE
));
765 I915_WRITE(EDP_PSR_CTL
, val
& ~EDP_PSR_ENABLE
);
768 val
= I915_READ(VLV_PSRCTL(pipe
));
771 * Here we do the transition drirectly from
772 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
773 * PSR_state 5 (exit).
774 * PSR State 4 (active with single frame update) can be skipped.
775 * On PSR_state 5 (exit) Hardware is responsible to transition
776 * back to PSR_state 1 (inactive).
777 * Now we are at Same state after vlv_psr_enable_source.
779 val
&= ~VLV_EDP_PSR_ACTIVE_ENTRY
;
780 I915_WRITE(VLV_PSRCTL(pipe
), val
);
783 * Send AUX wake up - Spec says after transitioning to PSR
784 * active we have to send AUX wake up by writing 01h in DPCD
785 * 600h of sink device.
786 * XXX: This might slow down the transition, but without this
787 * HW doesn't complete the transition to PSR_state 1 and we
788 * never get the screen updated.
790 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
794 dev_priv
->psr
.active
= false;
798 * intel_psr_single_frame_update - Single Frame Update
799 * @dev_priv: i915 device
800 * @frontbuffer_bits: frontbuffer plane tracking bits
802 * Some platforms support a single frame update feature that is used to
803 * send and update only one frame on Remote Frame Buffer.
804 * So far it is only implemented for Valleyview and Cherryview because
805 * hardware requires this to be done before a page flip.
807 void intel_psr_single_frame_update(struct drm_i915_private
*dev_priv
,
808 unsigned frontbuffer_bits
)
810 struct drm_crtc
*crtc
;
814 if (!HAS_PSR(dev_priv
))
818 * Single frame update is already supported on BDW+ but it requires
819 * many W/A and it isn't really needed.
821 if (!IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
))
824 mutex_lock(&dev_priv
->psr
.lock
);
825 if (!dev_priv
->psr
.enabled
) {
826 mutex_unlock(&dev_priv
->psr
.lock
);
830 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
831 pipe
= to_intel_crtc(crtc
)->pipe
;
833 if (frontbuffer_bits
& INTEL_FRONTBUFFER_ALL_MASK(pipe
)) {
834 val
= I915_READ(VLV_PSRCTL(pipe
));
837 * We need to set this bit before writing registers for a flip.
838 * This bit will be self-clear when it gets to the PSR active state.
840 I915_WRITE(VLV_PSRCTL(pipe
), val
| VLV_EDP_PSR_SINGLE_FRAME_UPDATE
);
842 mutex_unlock(&dev_priv
->psr
.lock
);
846 * intel_psr_invalidate - Invalidade PSR
847 * @dev_priv: i915 device
848 * @frontbuffer_bits: frontbuffer plane tracking bits
850 * Since the hardware frontbuffer tracking has gaps we need to integrate
851 * with the software frontbuffer tracking. This function gets called every
852 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
853 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
855 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
857 void intel_psr_invalidate(struct drm_i915_private
*dev_priv
,
858 unsigned frontbuffer_bits
)
860 struct drm_crtc
*crtc
;
863 if (!HAS_PSR(dev_priv
))
866 mutex_lock(&dev_priv
->psr
.lock
);
867 if (!dev_priv
->psr
.enabled
) {
868 mutex_unlock(&dev_priv
->psr
.lock
);
872 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
873 pipe
= to_intel_crtc(crtc
)->pipe
;
875 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
876 dev_priv
->psr
.busy_frontbuffer_bits
|= frontbuffer_bits
;
878 if (frontbuffer_bits
)
879 intel_psr_exit(dev_priv
);
881 mutex_unlock(&dev_priv
->psr
.lock
);
885 * intel_psr_flush - Flush PSR
886 * @dev_priv: i915 device
887 * @frontbuffer_bits: frontbuffer plane tracking bits
888 * @origin: which operation caused the flush
890 * Since the hardware frontbuffer tracking has gaps we need to integrate
891 * with the software frontbuffer tracking. This function gets called every
892 * time frontbuffer rendering has completed and flushed out to memory. PSR
893 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
895 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
897 void intel_psr_flush(struct drm_i915_private
*dev_priv
,
898 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
900 struct drm_crtc
*crtc
;
903 if (!HAS_PSR(dev_priv
))
906 mutex_lock(&dev_priv
->psr
.lock
);
907 if (!dev_priv
->psr
.enabled
) {
908 mutex_unlock(&dev_priv
->psr
.lock
);
912 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
913 pipe
= to_intel_crtc(crtc
)->pipe
;
915 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
916 dev_priv
->psr
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
918 /* By definition flush = invalidate + flush */
919 if (frontbuffer_bits
)
920 intel_psr_exit(dev_priv
);
922 if (!dev_priv
->psr
.active
&& !dev_priv
->psr
.busy_frontbuffer_bits
)
923 if (!work_busy(&dev_priv
->psr
.work
.work
))
924 schedule_delayed_work(&dev_priv
->psr
.work
,
925 msecs_to_jiffies(100));
926 mutex_unlock(&dev_priv
->psr
.lock
);
930 * intel_psr_init - Init basic PSR work and mutex.
931 * @dev_priv: i915 device private
933 * This function is called only once at driver load to initialize basic
936 void intel_psr_init(struct drm_i915_private
*dev_priv
)
938 if (!HAS_PSR(dev_priv
))
941 dev_priv
->psr_mmio_base
= IS_HASWELL(dev_priv
) ?
942 HSW_EDP_PSR_BASE
: BDW_EDP_PSR_BASE
;
944 /* Per platform default: all disabled. */
945 if (i915
.enable_psr
== -1)
948 /* Set link_standby x link_off defaults */
949 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
950 /* HSW and BDW require workarounds that we don't implement. */
951 dev_priv
->psr
.link_standby
= false;
952 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
953 /* On VLV and CHV only standby mode is supported. */
954 dev_priv
->psr
.link_standby
= true;
956 /* For new platforms let's respect VBT back again */
957 dev_priv
->psr
.link_standby
= dev_priv
->vbt
.psr
.full_link
;
959 /* Override link_standby x link_off defaults */
960 if (i915
.enable_psr
== 2 && !dev_priv
->psr
.link_standby
) {
961 DRM_DEBUG_KMS("PSR: Forcing link standby\n");
962 dev_priv
->psr
.link_standby
= true;
964 if (i915
.enable_psr
== 3 && dev_priv
->psr
.link_standby
) {
965 DRM_DEBUG_KMS("PSR: Forcing main link off\n");
966 dev_priv
->psr
.link_standby
= false;
969 INIT_DELAYED_WORK(&dev_priv
->psr
.work
, intel_psr_work
);
970 mutex_init(&dev_priv
->psr
.lock
);
972 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
973 dev_priv
->psr
.enable_source
= vlv_psr_enable_source
;
974 dev_priv
->psr
.disable_source
= vlv_psr_disable
;
975 dev_priv
->psr
.enable_sink
= vlv_psr_enable_sink
;
976 dev_priv
->psr
.activate
= vlv_psr_activate
;
977 dev_priv
->psr
.setup_vsc
= vlv_psr_setup_vsc
;
979 dev_priv
->psr
.enable_source
= hsw_psr_enable_source
;
980 dev_priv
->psr
.disable_source
= hsw_psr_disable
;
981 dev_priv
->psr
.enable_sink
= hsw_psr_enable_sink
;
982 dev_priv
->psr
.activate
= hsw_psr_activate
;
983 dev_priv
->psr
.setup_vsc
= hsw_psr_setup_vsc
;