2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_crtc.h>
37 #include <drm/drm_dp_helper.h>
38 #include <drm/drm_edid.h>
39 #include <drm/drm_hdcp.h>
40 #include <drm/drm_probe_helper.h>
41 #include "intel_drv.h"
42 #include <drm/i915_drm.h>
45 #define DP_DPRX_ESI_LEN 14
47 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
48 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
49 #define DP_DSC_MIN_SUPPORTED_BPC 8
50 #define DP_DSC_MAX_SUPPORTED_BPC 10
52 /* DP DSC throughput values used for slice count calculations KPixels/s */
53 #define DP_DSC_PEAK_PIXEL_RATE 2720000
54 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
55 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
57 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
58 #define DP_DSC_FEC_OVERHEAD_FACTOR 976
60 /* Compliance test status bits */
61 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
62 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
63 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
64 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
71 static const struct dp_link_dpll g4x_dpll
[] = {
73 { .p1
= 2, .p2
= 10, .n
= 2, .m1
= 23, .m2
= 8 } },
75 { .p1
= 1, .p2
= 10, .n
= 1, .m1
= 14, .m2
= 2 } }
78 static const struct dp_link_dpll pch_dpll
[] = {
80 { .p1
= 2, .p2
= 10, .n
= 1, .m1
= 12, .m2
= 9 } },
82 { .p1
= 1, .p2
= 10, .n
= 2, .m1
= 14, .m2
= 8 } }
85 static const struct dp_link_dpll vlv_dpll
[] = {
87 { .p1
= 3, .p2
= 2, .n
= 5, .m1
= 3, .m2
= 81 } },
89 { .p1
= 2, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 27 } }
93 * CHV supports eDP 1.4 that have more link rates.
94 * Below only provides the fixed rate but exclude variable rate.
96 static const struct dp_link_dpll chv_dpll
[] = {
98 * CHV requires to program fractional division for m2.
99 * m2 is stored in fixed point format using formula below
100 * (m2_int << 22) | m2_fraction
102 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
103 { .p1
= 4, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 0x819999a } },
104 { 270000, /* m2_int = 27, m2_fraction = 0 */
105 { .p1
= 4, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } },
108 /* Constants for DP DSC configurations */
109 static const u8 valid_dsc_bpp
[] = {6, 8, 10, 12, 15};
111 /* With Single pipe configuration, HW is capable of supporting maximum
112 * of 4 slices per line.
114 static const u8 valid_dsc_slicecount
[] = {1, 2, 4};
117 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
118 * @intel_dp: DP struct
120 * If a CPU or PCH DP output is attached to an eDP panel, this function
121 * will return true, and false otherwise.
123 bool intel_dp_is_edp(struct intel_dp
*intel_dp
)
125 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
127 return intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
;
130 static struct intel_dp
*intel_attached_dp(struct drm_connector
*connector
)
132 return enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
135 static void intel_dp_link_down(struct intel_encoder
*encoder
,
136 const struct intel_crtc_state
*old_crtc_state
);
137 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
);
138 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
);
139 static void vlv_init_panel_power_sequencer(struct intel_encoder
*encoder
,
140 const struct intel_crtc_state
*crtc_state
);
141 static void vlv_steal_power_sequencer(struct drm_i915_private
*dev_priv
,
143 static void intel_dp_unset_edid(struct intel_dp
*intel_dp
);
145 /* update sink rates from dpcd */
146 static void intel_dp_set_sink_rates(struct intel_dp
*intel_dp
)
148 static const int dp_rates
[] = {
149 162000, 270000, 540000, 810000
153 max_rate
= drm_dp_bw_code_to_link_rate(intel_dp
->dpcd
[DP_MAX_LINK_RATE
]);
155 for (i
= 0; i
< ARRAY_SIZE(dp_rates
); i
++) {
156 if (dp_rates
[i
] > max_rate
)
158 intel_dp
->sink_rates
[i
] = dp_rates
[i
];
161 intel_dp
->num_sink_rates
= i
;
164 /* Get length of rates array potentially limited by max_rate. */
165 static int intel_dp_rate_limit_len(const int *rates
, int len
, int max_rate
)
169 /* Limit results by potentially reduced max rate */
170 for (i
= 0; i
< len
; i
++) {
171 if (rates
[len
- i
- 1] <= max_rate
)
178 /* Get length of common rates array potentially limited by max_rate. */
179 static int intel_dp_common_len_rate_limit(const struct intel_dp
*intel_dp
,
182 return intel_dp_rate_limit_len(intel_dp
->common_rates
,
183 intel_dp
->num_common_rates
, max_rate
);
186 /* Theoretical max between source and sink */
187 static int intel_dp_max_common_rate(struct intel_dp
*intel_dp
)
189 return intel_dp
->common_rates
[intel_dp
->num_common_rates
- 1];
192 static int intel_dp_get_fia_supported_lane_count(struct intel_dp
*intel_dp
)
194 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
195 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
196 enum tc_port tc_port
= intel_port_to_tc(dev_priv
, dig_port
->base
.port
);
199 if (tc_port
== PORT_TC_NONE
|| dig_port
->tc_type
!= TC_PORT_TYPEC
)
202 lane_info
= (I915_READ(PORT_TX_DFLEXDPSP
) &
203 DP_LANE_ASSIGNMENT_MASK(tc_port
)) >>
204 DP_LANE_ASSIGNMENT_SHIFT(tc_port
);
208 MISSING_CASE(lane_info
);
222 /* Theoretical max between source and sink */
223 static int intel_dp_max_common_lane_count(struct intel_dp
*intel_dp
)
225 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
226 int source_max
= intel_dig_port
->max_lanes
;
227 int sink_max
= drm_dp_max_lane_count(intel_dp
->dpcd
);
228 int fia_max
= intel_dp_get_fia_supported_lane_count(intel_dp
);
230 return min3(source_max
, sink_max
, fia_max
);
233 int intel_dp_max_lane_count(struct intel_dp
*intel_dp
)
235 return intel_dp
->max_link_lane_count
;
239 intel_dp_link_required(int pixel_clock
, int bpp
)
241 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
242 return DIV_ROUND_UP(pixel_clock
* bpp
, 8);
246 intel_dp_max_data_rate(int max_link_clock
, int max_lanes
)
248 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
249 * link rate that is generally expressed in Gbps. Since, 8 bits of data
250 * is transmitted every LS_Clk per lane, there is no need to account for
251 * the channel encoding that is done in the PHY layer here.
254 return max_link_clock
* max_lanes
;
258 intel_dp_downstream_max_dotclock(struct intel_dp
*intel_dp
)
260 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
261 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
262 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
263 int max_dotclk
= dev_priv
->max_dotclk_freq
;
266 int type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
268 if (type
!= DP_DS_PORT_TYPE_VGA
)
271 ds_max_dotclk
= drm_dp_downstream_max_clock(intel_dp
->dpcd
,
272 intel_dp
->downstream_ports
);
274 if (ds_max_dotclk
!= 0)
275 max_dotclk
= min(max_dotclk
, ds_max_dotclk
);
280 static int cnl_max_source_rate(struct intel_dp
*intel_dp
)
282 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
283 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
284 enum port port
= dig_port
->base
.port
;
286 u32 voltage
= I915_READ(CNL_PORT_COMP_DW3
) & VOLTAGE_INFO_MASK
;
288 /* Low voltage SKUs are limited to max of 5.4G */
289 if (voltage
== VOLTAGE_INFO_0_85V
)
292 /* For this SKU 8.1G is supported in all ports */
293 if (IS_CNL_WITH_PORT_F(dev_priv
))
296 /* For other SKUs, max rate on ports A and D is 5.4G */
297 if (port
== PORT_A
|| port
== PORT_D
)
303 static int icl_max_source_rate(struct intel_dp
*intel_dp
)
305 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
306 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
307 enum port port
= dig_port
->base
.port
;
309 if (intel_port_is_combophy(dev_priv
, port
) &&
310 !intel_dp_is_edp(intel_dp
))
317 intel_dp_set_source_rates(struct intel_dp
*intel_dp
)
319 /* The values must be in increasing order */
320 static const int cnl_rates
[] = {
321 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
323 static const int bxt_rates
[] = {
324 162000, 216000, 243000, 270000, 324000, 432000, 540000
326 static const int skl_rates
[] = {
327 162000, 216000, 270000, 324000, 432000, 540000
329 static const int hsw_rates
[] = {
330 162000, 270000, 540000
332 static const int g4x_rates
[] = {
335 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
336 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
337 const struct ddi_vbt_port_info
*info
=
338 &dev_priv
->vbt
.ddi_port_info
[dig_port
->base
.port
];
339 const int *source_rates
;
340 int size
, max_rate
= 0, vbt_max_rate
= info
->dp_max_link_rate
;
342 /* This should only be done once */
343 WARN_ON(intel_dp
->source_rates
|| intel_dp
->num_source_rates
);
345 if (INTEL_GEN(dev_priv
) >= 10) {
346 source_rates
= cnl_rates
;
347 size
= ARRAY_SIZE(cnl_rates
);
348 if (IS_GEN(dev_priv
, 10))
349 max_rate
= cnl_max_source_rate(intel_dp
);
351 max_rate
= icl_max_source_rate(intel_dp
);
352 } else if (IS_GEN9_LP(dev_priv
)) {
353 source_rates
= bxt_rates
;
354 size
= ARRAY_SIZE(bxt_rates
);
355 } else if (IS_GEN9_BC(dev_priv
)) {
356 source_rates
= skl_rates
;
357 size
= ARRAY_SIZE(skl_rates
);
358 } else if ((IS_HASWELL(dev_priv
) && !IS_HSW_ULX(dev_priv
)) ||
359 IS_BROADWELL(dev_priv
)) {
360 source_rates
= hsw_rates
;
361 size
= ARRAY_SIZE(hsw_rates
);
363 source_rates
= g4x_rates
;
364 size
= ARRAY_SIZE(g4x_rates
);
367 if (max_rate
&& vbt_max_rate
)
368 max_rate
= min(max_rate
, vbt_max_rate
);
369 else if (vbt_max_rate
)
370 max_rate
= vbt_max_rate
;
373 size
= intel_dp_rate_limit_len(source_rates
, size
, max_rate
);
375 intel_dp
->source_rates
= source_rates
;
376 intel_dp
->num_source_rates
= size
;
379 static int intersect_rates(const int *source_rates
, int source_len
,
380 const int *sink_rates
, int sink_len
,
383 int i
= 0, j
= 0, k
= 0;
385 while (i
< source_len
&& j
< sink_len
) {
386 if (source_rates
[i
] == sink_rates
[j
]) {
387 if (WARN_ON(k
>= DP_MAX_SUPPORTED_RATES
))
389 common_rates
[k
] = source_rates
[i
];
393 } else if (source_rates
[i
] < sink_rates
[j
]) {
402 /* return index of rate in rates array, or -1 if not found */
403 static int intel_dp_rate_index(const int *rates
, int len
, int rate
)
407 for (i
= 0; i
< len
; i
++)
408 if (rate
== rates
[i
])
414 static void intel_dp_set_common_rates(struct intel_dp
*intel_dp
)
416 WARN_ON(!intel_dp
->num_source_rates
|| !intel_dp
->num_sink_rates
);
418 intel_dp
->num_common_rates
= intersect_rates(intel_dp
->source_rates
,
419 intel_dp
->num_source_rates
,
420 intel_dp
->sink_rates
,
421 intel_dp
->num_sink_rates
,
422 intel_dp
->common_rates
);
424 /* Paranoia, there should always be something in common. */
425 if (WARN_ON(intel_dp
->num_common_rates
== 0)) {
426 intel_dp
->common_rates
[0] = 162000;
427 intel_dp
->num_common_rates
= 1;
431 static bool intel_dp_link_params_valid(struct intel_dp
*intel_dp
, int link_rate
,
435 * FIXME: we need to synchronize the current link parameters with
436 * hardware readout. Currently fast link training doesn't work on
439 if (link_rate
== 0 ||
440 link_rate
> intel_dp
->max_link_rate
)
443 if (lane_count
== 0 ||
444 lane_count
> intel_dp_max_lane_count(intel_dp
))
450 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp
*intel_dp
,
454 const struct drm_display_mode
*fixed_mode
=
455 intel_dp
->attached_connector
->panel
.fixed_mode
;
456 int mode_rate
, max_rate
;
458 mode_rate
= intel_dp_link_required(fixed_mode
->clock
, 18);
459 max_rate
= intel_dp_max_data_rate(link_rate
, lane_count
);
460 if (mode_rate
> max_rate
)
466 int intel_dp_get_link_train_fallback_values(struct intel_dp
*intel_dp
,
467 int link_rate
, u8 lane_count
)
471 index
= intel_dp_rate_index(intel_dp
->common_rates
,
472 intel_dp
->num_common_rates
,
475 if (intel_dp_is_edp(intel_dp
) &&
476 !intel_dp_can_link_train_fallback_for_edp(intel_dp
,
477 intel_dp
->common_rates
[index
- 1],
479 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
482 intel_dp
->max_link_rate
= intel_dp
->common_rates
[index
- 1];
483 intel_dp
->max_link_lane_count
= lane_count
;
484 } else if (lane_count
> 1) {
485 if (intel_dp_is_edp(intel_dp
) &&
486 !intel_dp_can_link_train_fallback_for_edp(intel_dp
,
487 intel_dp_max_common_rate(intel_dp
),
489 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
492 intel_dp
->max_link_rate
= intel_dp_max_common_rate(intel_dp
);
493 intel_dp
->max_link_lane_count
= lane_count
>> 1;
495 DRM_ERROR("Link Training Unsuccessful\n");
502 static enum drm_mode_status
503 intel_dp_mode_valid(struct drm_connector
*connector
,
504 struct drm_display_mode
*mode
)
506 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
507 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
508 struct drm_display_mode
*fixed_mode
= intel_connector
->panel
.fixed_mode
;
509 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
510 int target_clock
= mode
->clock
;
511 int max_rate
, mode_rate
, max_lanes
, max_link_clock
;
513 u16 dsc_max_output_bpp
= 0;
514 u8 dsc_slice_count
= 0;
516 if (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)
517 return MODE_NO_DBLESCAN
;
519 max_dotclk
= intel_dp_downstream_max_dotclock(intel_dp
);
521 if (intel_dp_is_edp(intel_dp
) && fixed_mode
) {
522 if (mode
->hdisplay
> fixed_mode
->hdisplay
)
525 if (mode
->vdisplay
> fixed_mode
->vdisplay
)
528 target_clock
= fixed_mode
->clock
;
531 max_link_clock
= intel_dp_max_link_rate(intel_dp
);
532 max_lanes
= intel_dp_max_lane_count(intel_dp
);
534 max_rate
= intel_dp_max_data_rate(max_link_clock
, max_lanes
);
535 mode_rate
= intel_dp_link_required(target_clock
, 18);
538 * Output bpp is stored in 6.4 format so right shift by 4 to get the
539 * integer value since we support only integer values of bpp.
541 if ((INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) &&
542 drm_dp_sink_supports_dsc(intel_dp
->dsc_dpcd
)) {
543 if (intel_dp_is_edp(intel_dp
)) {
545 drm_edp_dsc_sink_output_bpp(intel_dp
->dsc_dpcd
) >> 4;
547 drm_dp_dsc_sink_max_slice_count(intel_dp
->dsc_dpcd
,
549 } else if (drm_dp_sink_supports_fec(intel_dp
->fec_capable
)) {
551 intel_dp_dsc_get_output_bpp(max_link_clock
,
554 mode
->hdisplay
) >> 4;
556 intel_dp_dsc_get_slice_count(intel_dp
,
562 if ((mode_rate
> max_rate
&& !(dsc_max_output_bpp
&& dsc_slice_count
)) ||
563 target_clock
> max_dotclk
)
564 return MODE_CLOCK_HIGH
;
566 if (mode
->clock
< 10000)
567 return MODE_CLOCK_LOW
;
569 if (mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
570 return MODE_H_ILLEGAL
;
575 u32
intel_dp_pack_aux(const u8
*src
, int src_bytes
)
582 for (i
= 0; i
< src_bytes
; i
++)
583 v
|= ((u32
)src
[i
]) << ((3 - i
) * 8);
587 static void intel_dp_unpack_aux(u32 src
, u8
*dst
, int dst_bytes
)
592 for (i
= 0; i
< dst_bytes
; i
++)
593 dst
[i
] = src
>> ((3-i
) * 8);
597 intel_dp_init_panel_power_sequencer(struct intel_dp
*intel_dp
);
599 intel_dp_init_panel_power_sequencer_registers(struct intel_dp
*intel_dp
,
600 bool force_disable_vdd
);
602 intel_dp_pps_init(struct intel_dp
*intel_dp
);
604 static intel_wakeref_t
605 pps_lock(struct intel_dp
*intel_dp
)
607 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
608 intel_wakeref_t wakeref
;
611 * See intel_power_sequencer_reset() why we need
612 * a power domain reference here.
614 wakeref
= intel_display_power_get(dev_priv
,
615 intel_aux_power_domain(dp_to_dig_port(intel_dp
)));
617 mutex_lock(&dev_priv
->pps_mutex
);
622 static intel_wakeref_t
623 pps_unlock(struct intel_dp
*intel_dp
, intel_wakeref_t wakeref
)
625 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
627 mutex_unlock(&dev_priv
->pps_mutex
);
628 intel_display_power_put(dev_priv
,
629 intel_aux_power_domain(dp_to_dig_port(intel_dp
)),
634 #define with_pps_lock(dp, wf) \
635 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
638 vlv_power_sequencer_kick(struct intel_dp
*intel_dp
)
640 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
641 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
642 enum pipe pipe
= intel_dp
->pps_pipe
;
643 bool pll_enabled
, release_cl_override
= false;
644 enum dpio_phy phy
= DPIO_PHY(pipe
);
645 enum dpio_channel ch
= vlv_pipe_to_channel(pipe
);
648 if (WARN(I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
,
649 "skipping pipe %c power sequencer kick due to port %c being active\n",
650 pipe_name(pipe
), port_name(intel_dig_port
->base
.port
)))
653 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
654 pipe_name(pipe
), port_name(intel_dig_port
->base
.port
));
656 /* Preserve the BIOS-computed detected bit. This is
657 * supposed to be read-only.
659 DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
660 DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
661 DP
|= DP_PORT_WIDTH(1);
662 DP
|= DP_LINK_TRAIN_PAT_1
;
664 if (IS_CHERRYVIEW(dev_priv
))
665 DP
|= DP_PIPE_SEL_CHV(pipe
);
667 DP
|= DP_PIPE_SEL(pipe
);
669 pll_enabled
= I915_READ(DPLL(pipe
)) & DPLL_VCO_ENABLE
;
672 * The DPLL for the pipe must be enabled for this to work.
673 * So enable temporarily it if it's not already enabled.
676 release_cl_override
= IS_CHERRYVIEW(dev_priv
) &&
677 !chv_phy_powergate_ch(dev_priv
, phy
, ch
, true);
679 if (vlv_force_pll_on(dev_priv
, pipe
, IS_CHERRYVIEW(dev_priv
) ?
680 &chv_dpll
[0].dpll
: &vlv_dpll
[0].dpll
)) {
681 DRM_ERROR("Failed to force on pll for pipe %c!\n",
688 * Similar magic as in intel_dp_enable_port().
689 * We _must_ do this port enable + disable trick
690 * to make this power sequencer lock onto the port.
691 * Otherwise even VDD force bit won't work.
693 I915_WRITE(intel_dp
->output_reg
, DP
);
694 POSTING_READ(intel_dp
->output_reg
);
696 I915_WRITE(intel_dp
->output_reg
, DP
| DP_PORT_EN
);
697 POSTING_READ(intel_dp
->output_reg
);
699 I915_WRITE(intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
700 POSTING_READ(intel_dp
->output_reg
);
703 vlv_force_pll_off(dev_priv
, pipe
);
705 if (release_cl_override
)
706 chv_phy_powergate_ch(dev_priv
, phy
, ch
, false);
710 static enum pipe
vlv_find_free_pps(struct drm_i915_private
*dev_priv
)
712 struct intel_encoder
*encoder
;
713 unsigned int pipes
= (1 << PIPE_A
) | (1 << PIPE_B
);
716 * We don't have power sequencer currently.
717 * Pick one that's not used by other ports.
719 for_each_intel_dp(&dev_priv
->drm
, encoder
) {
720 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
722 if (encoder
->type
== INTEL_OUTPUT_EDP
) {
723 WARN_ON(intel_dp
->active_pipe
!= INVALID_PIPE
&&
724 intel_dp
->active_pipe
!= intel_dp
->pps_pipe
);
726 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
727 pipes
&= ~(1 << intel_dp
->pps_pipe
);
729 WARN_ON(intel_dp
->pps_pipe
!= INVALID_PIPE
);
731 if (intel_dp
->active_pipe
!= INVALID_PIPE
)
732 pipes
&= ~(1 << intel_dp
->active_pipe
);
739 return ffs(pipes
) - 1;
743 vlv_power_sequencer_pipe(struct intel_dp
*intel_dp
)
745 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
746 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
749 lockdep_assert_held(&dev_priv
->pps_mutex
);
751 /* We should never land here with regular DP ports */
752 WARN_ON(!intel_dp_is_edp(intel_dp
));
754 WARN_ON(intel_dp
->active_pipe
!= INVALID_PIPE
&&
755 intel_dp
->active_pipe
!= intel_dp
->pps_pipe
);
757 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
758 return intel_dp
->pps_pipe
;
760 pipe
= vlv_find_free_pps(dev_priv
);
763 * Didn't find one. This should not happen since there
764 * are two power sequencers and up to two eDP ports.
766 if (WARN_ON(pipe
== INVALID_PIPE
))
769 vlv_steal_power_sequencer(dev_priv
, pipe
);
770 intel_dp
->pps_pipe
= pipe
;
772 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
773 pipe_name(intel_dp
->pps_pipe
),
774 port_name(intel_dig_port
->base
.port
));
776 /* init power sequencer on this pipe and port */
777 intel_dp_init_panel_power_sequencer(intel_dp
);
778 intel_dp_init_panel_power_sequencer_registers(intel_dp
, true);
781 * Even vdd force doesn't work until we've made
782 * the power sequencer lock in on the port.
784 vlv_power_sequencer_kick(intel_dp
);
786 return intel_dp
->pps_pipe
;
790 bxt_power_sequencer_idx(struct intel_dp
*intel_dp
)
792 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
793 int backlight_controller
= dev_priv
->vbt
.backlight
.controller
;
795 lockdep_assert_held(&dev_priv
->pps_mutex
);
797 /* We should never land here with regular DP ports */
798 WARN_ON(!intel_dp_is_edp(intel_dp
));
800 if (!intel_dp
->pps_reset
)
801 return backlight_controller
;
803 intel_dp
->pps_reset
= false;
806 * Only the HW needs to be reprogrammed, the SW state is fixed and
807 * has been setup during connector init.
809 intel_dp_init_panel_power_sequencer_registers(intel_dp
, false);
811 return backlight_controller
;
814 typedef bool (*vlv_pipe_check
)(struct drm_i915_private
*dev_priv
,
817 static bool vlv_pipe_has_pp_on(struct drm_i915_private
*dev_priv
,
820 return I915_READ(PP_STATUS(pipe
)) & PP_ON
;
823 static bool vlv_pipe_has_vdd_on(struct drm_i915_private
*dev_priv
,
826 return I915_READ(PP_CONTROL(pipe
)) & EDP_FORCE_VDD
;
829 static bool vlv_pipe_any(struct drm_i915_private
*dev_priv
,
836 vlv_initial_pps_pipe(struct drm_i915_private
*dev_priv
,
838 vlv_pipe_check pipe_check
)
842 for (pipe
= PIPE_A
; pipe
<= PIPE_B
; pipe
++) {
843 u32 port_sel
= I915_READ(PP_ON_DELAYS(pipe
)) &
844 PANEL_PORT_SELECT_MASK
;
846 if (port_sel
!= PANEL_PORT_SELECT_VLV(port
))
849 if (!pipe_check(dev_priv
, pipe
))
859 vlv_initial_power_sequencer_setup(struct intel_dp
*intel_dp
)
861 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
862 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
863 enum port port
= intel_dig_port
->base
.port
;
865 lockdep_assert_held(&dev_priv
->pps_mutex
);
867 /* try to find a pipe with this port selected */
868 /* first pick one where the panel is on */
869 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
871 /* didn't find one? pick one where vdd is on */
872 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
873 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
874 vlv_pipe_has_vdd_on
);
875 /* didn't find one? pick one with just the correct port */
876 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
877 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
880 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
881 if (intel_dp
->pps_pipe
== INVALID_PIPE
) {
882 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
887 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
888 port_name(port
), pipe_name(intel_dp
->pps_pipe
));
890 intel_dp_init_panel_power_sequencer(intel_dp
);
891 intel_dp_init_panel_power_sequencer_registers(intel_dp
, false);
894 void intel_power_sequencer_reset(struct drm_i915_private
*dev_priv
)
896 struct intel_encoder
*encoder
;
898 if (WARN_ON(!IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
) &&
899 !IS_GEN9_LP(dev_priv
)))
903 * We can't grab pps_mutex here due to deadlock with power_domain
904 * mutex when power_domain functions are called while holding pps_mutex.
905 * That also means that in order to use pps_pipe the code needs to
906 * hold both a power domain reference and pps_mutex, and the power domain
907 * reference get/put must be done while _not_ holding pps_mutex.
908 * pps_{lock,unlock}() do these steps in the correct order, so one
909 * should use them always.
912 for_each_intel_dp(&dev_priv
->drm
, encoder
) {
913 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
915 WARN_ON(intel_dp
->active_pipe
!= INVALID_PIPE
);
917 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
920 if (IS_GEN9_LP(dev_priv
))
921 intel_dp
->pps_reset
= true;
923 intel_dp
->pps_pipe
= INVALID_PIPE
;
927 struct pps_registers
{
935 static void intel_pps_get_registers(struct intel_dp
*intel_dp
,
936 struct pps_registers
*regs
)
938 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
941 memset(regs
, 0, sizeof(*regs
));
943 if (IS_GEN9_LP(dev_priv
))
944 pps_idx
= bxt_power_sequencer_idx(intel_dp
);
945 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
946 pps_idx
= vlv_power_sequencer_pipe(intel_dp
);
948 regs
->pp_ctrl
= PP_CONTROL(pps_idx
);
949 regs
->pp_stat
= PP_STATUS(pps_idx
);
950 regs
->pp_on
= PP_ON_DELAYS(pps_idx
);
951 regs
->pp_off
= PP_OFF_DELAYS(pps_idx
);
953 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
954 if (IS_GEN9_LP(dev_priv
) || INTEL_PCH_TYPE(dev_priv
) >= PCH_CNP
)
955 regs
->pp_div
= INVALID_MMIO_REG
;
957 regs
->pp_div
= PP_DIVISOR(pps_idx
);
961 _pp_ctrl_reg(struct intel_dp
*intel_dp
)
963 struct pps_registers regs
;
965 intel_pps_get_registers(intel_dp
, ®s
);
971 _pp_stat_reg(struct intel_dp
*intel_dp
)
973 struct pps_registers regs
;
975 intel_pps_get_registers(intel_dp
, ®s
);
980 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
981 This function only applicable when panel PM state is not to be tracked */
982 static int edp_notify_handler(struct notifier_block
*this, unsigned long code
,
985 struct intel_dp
*intel_dp
= container_of(this, typeof(* intel_dp
),
987 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
988 intel_wakeref_t wakeref
;
990 if (!intel_dp_is_edp(intel_dp
) || code
!= SYS_RESTART
)
993 with_pps_lock(intel_dp
, wakeref
) {
994 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
995 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
996 i915_reg_t pp_ctrl_reg
, pp_div_reg
;
999 pp_ctrl_reg
= PP_CONTROL(pipe
);
1000 pp_div_reg
= PP_DIVISOR(pipe
);
1001 pp_div
= I915_READ(pp_div_reg
);
1002 pp_div
&= PP_REFERENCE_DIVIDER_MASK
;
1004 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1005 I915_WRITE(pp_div_reg
, pp_div
| 0x1F);
1006 I915_WRITE(pp_ctrl_reg
, PANEL_UNLOCK_REGS
);
1007 msleep(intel_dp
->panel_power_cycle_delay
);
1014 static bool edp_have_panel_power(struct intel_dp
*intel_dp
)
1016 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1018 lockdep_assert_held(&dev_priv
->pps_mutex
);
1020 if ((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
1021 intel_dp
->pps_pipe
== INVALID_PIPE
)
1024 return (I915_READ(_pp_stat_reg(intel_dp
)) & PP_ON
) != 0;
1027 static bool edp_have_panel_vdd(struct intel_dp
*intel_dp
)
1029 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1031 lockdep_assert_held(&dev_priv
->pps_mutex
);
1033 if ((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
1034 intel_dp
->pps_pipe
== INVALID_PIPE
)
1037 return I915_READ(_pp_ctrl_reg(intel_dp
)) & EDP_FORCE_VDD
;
1041 intel_dp_check_edp(struct intel_dp
*intel_dp
)
1043 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1045 if (!intel_dp_is_edp(intel_dp
))
1048 if (!edp_have_panel_power(intel_dp
) && !edp_have_panel_vdd(intel_dp
)) {
1049 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1050 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1051 I915_READ(_pp_stat_reg(intel_dp
)),
1052 I915_READ(_pp_ctrl_reg(intel_dp
)));
1057 intel_dp_aux_wait_done(struct intel_dp
*intel_dp
)
1059 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1060 i915_reg_t ch_ctl
= intel_dp
->aux_ch_ctl_reg(intel_dp
);
1064 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1065 done
= wait_event_timeout(dev_priv
->gmbus_wait_queue
, C
,
1066 msecs_to_jiffies_timeout(10));
1068 /* just trace the final value */
1069 trace_i915_reg_rw(false, ch_ctl
, status
, sizeof(status
), true);
1072 DRM_ERROR("dp aux hw did not signal timeout!\n");
1078 static u32
g4x_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
1080 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1086 * The clock divider is based off the hrawclk, and would like to run at
1087 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1089 return DIV_ROUND_CLOSEST(dev_priv
->rawclk_freq
, 2000);
1092 static u32
ilk_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
1094 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1095 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1101 * The clock divider is based off the cdclk or PCH rawclk, and would
1102 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1103 * divide by 2000 and use that
1105 if (dig_port
->aux_ch
== AUX_CH_A
)
1106 return DIV_ROUND_CLOSEST(dev_priv
->cdclk
.hw
.cdclk
, 2000);
1108 return DIV_ROUND_CLOSEST(dev_priv
->rawclk_freq
, 2000);
1111 static u32
hsw_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
1113 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1114 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1116 if (dig_port
->aux_ch
!= AUX_CH_A
&& HAS_PCH_LPT_H(dev_priv
)) {
1117 /* Workaround for non-ULT HSW */
1125 return ilk_get_aux_clock_divider(intel_dp
, index
);
1128 static u32
skl_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
1131 * SKL doesn't need us to program the AUX clock divider (Hardware will
1132 * derive the clock from CDCLK automatically). We still implement the
1133 * get_aux_clock_divider vfunc to plug-in into the existing code.
1135 return index
? 0 : 1;
1138 static u32
g4x_get_aux_send_ctl(struct intel_dp
*intel_dp
,
1140 u32 aux_clock_divider
)
1142 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1143 struct drm_i915_private
*dev_priv
=
1144 to_i915(intel_dig_port
->base
.base
.dev
);
1145 u32 precharge
, timeout
;
1147 if (IS_GEN(dev_priv
, 6))
1152 if (IS_BROADWELL(dev_priv
))
1153 timeout
= DP_AUX_CH_CTL_TIME_OUT_600us
;
1155 timeout
= DP_AUX_CH_CTL_TIME_OUT_400us
;
1157 return DP_AUX_CH_CTL_SEND_BUSY
|
1158 DP_AUX_CH_CTL_DONE
|
1159 DP_AUX_CH_CTL_INTERRUPT
|
1160 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
1162 DP_AUX_CH_CTL_RECEIVE_ERROR
|
1163 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
1164 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
1165 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
);
1168 static u32
skl_get_aux_send_ctl(struct intel_dp
*intel_dp
,
1172 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1175 ret
= DP_AUX_CH_CTL_SEND_BUSY
|
1176 DP_AUX_CH_CTL_DONE
|
1177 DP_AUX_CH_CTL_INTERRUPT
|
1178 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
1179 DP_AUX_CH_CTL_TIME_OUT_MAX
|
1180 DP_AUX_CH_CTL_RECEIVE_ERROR
|
1181 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
1182 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1183 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1185 if (intel_dig_port
->tc_type
== TC_PORT_TBT
)
1186 ret
|= DP_AUX_CH_CTL_TBT_IO
;
1192 intel_dp_aux_xfer(struct intel_dp
*intel_dp
,
1193 const u8
*send
, int send_bytes
,
1194 u8
*recv
, int recv_size
,
1195 u32 aux_send_ctl_flags
)
1197 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1198 struct drm_i915_private
*dev_priv
=
1199 to_i915(intel_dig_port
->base
.base
.dev
);
1200 i915_reg_t ch_ctl
, ch_data
[5];
1201 u32 aux_clock_divider
;
1202 intel_wakeref_t wakeref
;
1203 int i
, ret
, recv_bytes
;
1208 ch_ctl
= intel_dp
->aux_ch_ctl_reg(intel_dp
);
1209 for (i
= 0; i
< ARRAY_SIZE(ch_data
); i
++)
1210 ch_data
[i
] = intel_dp
->aux_ch_data_reg(intel_dp
, i
);
1212 wakeref
= pps_lock(intel_dp
);
1215 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1216 * In such cases we want to leave VDD enabled and it's up to upper layers
1217 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1220 vdd
= edp_panel_vdd_on(intel_dp
);
1222 /* dp aux is extremely sensitive to irq latency, hence request the
1223 * lowest possible wakeup latency and so prevent the cpu from going into
1224 * deep sleep states.
1226 pm_qos_update_request(&dev_priv
->pm_qos
, 0);
1228 intel_dp_check_edp(intel_dp
);
1230 /* Try to wait for any previous AUX channel activity */
1231 for (try = 0; try < 3; try++) {
1232 status
= I915_READ_NOTRACE(ch_ctl
);
1233 if ((status
& DP_AUX_CH_CTL_SEND_BUSY
) == 0)
1237 /* just trace the final value */
1238 trace_i915_reg_rw(false, ch_ctl
, status
, sizeof(status
), true);
1241 static u32 last_status
= -1;
1242 const u32 status
= I915_READ(ch_ctl
);
1244 if (status
!= last_status
) {
1245 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1247 last_status
= status
;
1254 /* Only 5 data registers! */
1255 if (WARN_ON(send_bytes
> 20 || recv_size
> 20)) {
1260 while ((aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, clock
++))) {
1261 u32 send_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
,
1265 send_ctl
|= aux_send_ctl_flags
;
1267 /* Must try at least 3 times according to DP spec */
1268 for (try = 0; try < 5; try++) {
1269 /* Load the send data into the aux channel data registers */
1270 for (i
= 0; i
< send_bytes
; i
+= 4)
1271 I915_WRITE(ch_data
[i
>> 2],
1272 intel_dp_pack_aux(send
+ i
,
1275 /* Send the command and wait for it to complete */
1276 I915_WRITE(ch_ctl
, send_ctl
);
1278 status
= intel_dp_aux_wait_done(intel_dp
);
1280 /* Clear done status and any errors */
1283 DP_AUX_CH_CTL_DONE
|
1284 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
1285 DP_AUX_CH_CTL_RECEIVE_ERROR
);
1287 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1288 * 400us delay required for errors and timeouts
1289 * Timeout errors from the HW already meet this
1290 * requirement so skip to next iteration
1292 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
)
1295 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
1296 usleep_range(400, 500);
1299 if (status
& DP_AUX_CH_CTL_DONE
)
1304 if ((status
& DP_AUX_CH_CTL_DONE
) == 0) {
1305 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status
);
1311 /* Check for timeout or receive error.
1312 * Timeouts occur when the sink is not connected
1314 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
1315 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status
);
1320 /* Timeouts occur when the device isn't connected, so they're
1321 * "normal" -- don't fill the kernel log with these */
1322 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
) {
1323 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status
);
1328 /* Unload any bytes sent back from the other side */
1329 recv_bytes
= ((status
& DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
) >>
1330 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
1333 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1334 * We have no idea of what happened so we return -EBUSY so
1335 * drm layer takes care for the necessary retries.
1337 if (recv_bytes
== 0 || recv_bytes
> 20) {
1338 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1344 if (recv_bytes
> recv_size
)
1345 recv_bytes
= recv_size
;
1347 for (i
= 0; i
< recv_bytes
; i
+= 4)
1348 intel_dp_unpack_aux(I915_READ(ch_data
[i
>> 2]),
1349 recv
+ i
, recv_bytes
- i
);
1353 pm_qos_update_request(&dev_priv
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
1356 edp_panel_vdd_off(intel_dp
, false);
1358 pps_unlock(intel_dp
, wakeref
);
1363 #define BARE_ADDRESS_SIZE 3
1364 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1367 intel_dp_aux_header(u8 txbuf
[HEADER_SIZE
],
1368 const struct drm_dp_aux_msg
*msg
)
1370 txbuf
[0] = (msg
->request
<< 4) | ((msg
->address
>> 16) & 0xf);
1371 txbuf
[1] = (msg
->address
>> 8) & 0xff;
1372 txbuf
[2] = msg
->address
& 0xff;
1373 txbuf
[3] = msg
->size
- 1;
1377 intel_dp_aux_transfer(struct drm_dp_aux
*aux
, struct drm_dp_aux_msg
*msg
)
1379 struct intel_dp
*intel_dp
= container_of(aux
, struct intel_dp
, aux
);
1380 u8 txbuf
[20], rxbuf
[20];
1381 size_t txsize
, rxsize
;
1384 intel_dp_aux_header(txbuf
, msg
);
1386 switch (msg
->request
& ~DP_AUX_I2C_MOT
) {
1387 case DP_AUX_NATIVE_WRITE
:
1388 case DP_AUX_I2C_WRITE
:
1389 case DP_AUX_I2C_WRITE_STATUS_UPDATE
:
1390 txsize
= msg
->size
? HEADER_SIZE
+ msg
->size
: BARE_ADDRESS_SIZE
;
1391 rxsize
= 2; /* 0 or 1 data bytes */
1393 if (WARN_ON(txsize
> 20))
1396 WARN_ON(!msg
->buffer
!= !msg
->size
);
1399 memcpy(txbuf
+ HEADER_SIZE
, msg
->buffer
, msg
->size
);
1401 ret
= intel_dp_aux_xfer(intel_dp
, txbuf
, txsize
,
1404 msg
->reply
= rxbuf
[0] >> 4;
1407 /* Number of bytes written in a short write. */
1408 ret
= clamp_t(int, rxbuf
[1], 0, msg
->size
);
1410 /* Return payload size. */
1416 case DP_AUX_NATIVE_READ
:
1417 case DP_AUX_I2C_READ
:
1418 txsize
= msg
->size
? HEADER_SIZE
: BARE_ADDRESS_SIZE
;
1419 rxsize
= msg
->size
+ 1;
1421 if (WARN_ON(rxsize
> 20))
1424 ret
= intel_dp_aux_xfer(intel_dp
, txbuf
, txsize
,
1427 msg
->reply
= rxbuf
[0] >> 4;
1429 * Assume happy day, and copy the data. The caller is
1430 * expected to check msg->reply before touching it.
1432 * Return payload size.
1435 memcpy(msg
->buffer
, rxbuf
+ 1, ret
);
1448 static i915_reg_t
g4x_aux_ctl_reg(struct intel_dp
*intel_dp
)
1450 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1451 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1452 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1458 return DP_AUX_CH_CTL(aux_ch
);
1460 MISSING_CASE(aux_ch
);
1461 return DP_AUX_CH_CTL(AUX_CH_B
);
1465 static i915_reg_t
g4x_aux_data_reg(struct intel_dp
*intel_dp
, int index
)
1467 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1468 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1469 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1475 return DP_AUX_CH_DATA(aux_ch
, index
);
1477 MISSING_CASE(aux_ch
);
1478 return DP_AUX_CH_DATA(AUX_CH_B
, index
);
1482 static i915_reg_t
ilk_aux_ctl_reg(struct intel_dp
*intel_dp
)
1484 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1485 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1486 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1490 return DP_AUX_CH_CTL(aux_ch
);
1494 return PCH_DP_AUX_CH_CTL(aux_ch
);
1496 MISSING_CASE(aux_ch
);
1497 return DP_AUX_CH_CTL(AUX_CH_A
);
1501 static i915_reg_t
ilk_aux_data_reg(struct intel_dp
*intel_dp
, int index
)
1503 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1504 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1505 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1509 return DP_AUX_CH_DATA(aux_ch
, index
);
1513 return PCH_DP_AUX_CH_DATA(aux_ch
, index
);
1515 MISSING_CASE(aux_ch
);
1516 return DP_AUX_CH_DATA(AUX_CH_A
, index
);
1520 static i915_reg_t
skl_aux_ctl_reg(struct intel_dp
*intel_dp
)
1522 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1523 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1524 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1533 return DP_AUX_CH_CTL(aux_ch
);
1535 MISSING_CASE(aux_ch
);
1536 return DP_AUX_CH_CTL(AUX_CH_A
);
1540 static i915_reg_t
skl_aux_data_reg(struct intel_dp
*intel_dp
, int index
)
1542 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1543 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1544 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1553 return DP_AUX_CH_DATA(aux_ch
, index
);
1555 MISSING_CASE(aux_ch
);
1556 return DP_AUX_CH_DATA(AUX_CH_A
, index
);
1561 intel_dp_aux_fini(struct intel_dp
*intel_dp
)
1563 kfree(intel_dp
->aux
.name
);
1567 intel_dp_aux_init(struct intel_dp
*intel_dp
)
1569 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1570 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1571 struct intel_encoder
*encoder
= &dig_port
->base
;
1573 if (INTEL_GEN(dev_priv
) >= 9) {
1574 intel_dp
->aux_ch_ctl_reg
= skl_aux_ctl_reg
;
1575 intel_dp
->aux_ch_data_reg
= skl_aux_data_reg
;
1576 } else if (HAS_PCH_SPLIT(dev_priv
)) {
1577 intel_dp
->aux_ch_ctl_reg
= ilk_aux_ctl_reg
;
1578 intel_dp
->aux_ch_data_reg
= ilk_aux_data_reg
;
1580 intel_dp
->aux_ch_ctl_reg
= g4x_aux_ctl_reg
;
1581 intel_dp
->aux_ch_data_reg
= g4x_aux_data_reg
;
1584 if (INTEL_GEN(dev_priv
) >= 9)
1585 intel_dp
->get_aux_clock_divider
= skl_get_aux_clock_divider
;
1586 else if (IS_BROADWELL(dev_priv
) || IS_HASWELL(dev_priv
))
1587 intel_dp
->get_aux_clock_divider
= hsw_get_aux_clock_divider
;
1588 else if (HAS_PCH_SPLIT(dev_priv
))
1589 intel_dp
->get_aux_clock_divider
= ilk_get_aux_clock_divider
;
1591 intel_dp
->get_aux_clock_divider
= g4x_get_aux_clock_divider
;
1593 if (INTEL_GEN(dev_priv
) >= 9)
1594 intel_dp
->get_aux_send_ctl
= skl_get_aux_send_ctl
;
1596 intel_dp
->get_aux_send_ctl
= g4x_get_aux_send_ctl
;
1598 drm_dp_aux_init(&intel_dp
->aux
);
1600 /* Failure to allocate our preferred name is not critical */
1601 intel_dp
->aux
.name
= kasprintf(GFP_KERNEL
, "DPDDC-%c",
1602 port_name(encoder
->port
));
1603 intel_dp
->aux
.transfer
= intel_dp_aux_transfer
;
1606 bool intel_dp_source_supports_hbr2(struct intel_dp
*intel_dp
)
1608 int max_rate
= intel_dp
->source_rates
[intel_dp
->num_source_rates
- 1];
1610 return max_rate
>= 540000;
1613 bool intel_dp_source_supports_hbr3(struct intel_dp
*intel_dp
)
1615 int max_rate
= intel_dp
->source_rates
[intel_dp
->num_source_rates
- 1];
1617 return max_rate
>= 810000;
1621 intel_dp_set_clock(struct intel_encoder
*encoder
,
1622 struct intel_crtc_state
*pipe_config
)
1624 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
1625 const struct dp_link_dpll
*divisor
= NULL
;
1628 if (IS_G4X(dev_priv
)) {
1630 count
= ARRAY_SIZE(g4x_dpll
);
1631 } else if (HAS_PCH_SPLIT(dev_priv
)) {
1633 count
= ARRAY_SIZE(pch_dpll
);
1634 } else if (IS_CHERRYVIEW(dev_priv
)) {
1636 count
= ARRAY_SIZE(chv_dpll
);
1637 } else if (IS_VALLEYVIEW(dev_priv
)) {
1639 count
= ARRAY_SIZE(vlv_dpll
);
1642 if (divisor
&& count
) {
1643 for (i
= 0; i
< count
; i
++) {
1644 if (pipe_config
->port_clock
== divisor
[i
].clock
) {
1645 pipe_config
->dpll
= divisor
[i
].dpll
;
1646 pipe_config
->clock_set
= true;
1653 static void snprintf_int_array(char *str
, size_t len
,
1654 const int *array
, int nelem
)
1660 for (i
= 0; i
< nelem
; i
++) {
1661 int r
= snprintf(str
, len
, "%s%d", i
? ", " : "", array
[i
]);
1669 static void intel_dp_print_rates(struct intel_dp
*intel_dp
)
1671 char str
[128]; /* FIXME: too big for stack? */
1673 if ((drm_debug
& DRM_UT_KMS
) == 0)
1676 snprintf_int_array(str
, sizeof(str
),
1677 intel_dp
->source_rates
, intel_dp
->num_source_rates
);
1678 DRM_DEBUG_KMS("source rates: %s\n", str
);
1680 snprintf_int_array(str
, sizeof(str
),
1681 intel_dp
->sink_rates
, intel_dp
->num_sink_rates
);
1682 DRM_DEBUG_KMS("sink rates: %s\n", str
);
1684 snprintf_int_array(str
, sizeof(str
),
1685 intel_dp
->common_rates
, intel_dp
->num_common_rates
);
1686 DRM_DEBUG_KMS("common rates: %s\n", str
);
1690 intel_dp_max_link_rate(struct intel_dp
*intel_dp
)
1694 len
= intel_dp_common_len_rate_limit(intel_dp
, intel_dp
->max_link_rate
);
1695 if (WARN_ON(len
<= 0))
1698 return intel_dp
->common_rates
[len
- 1];
1701 int intel_dp_rate_select(struct intel_dp
*intel_dp
, int rate
)
1703 int i
= intel_dp_rate_index(intel_dp
->sink_rates
,
1704 intel_dp
->num_sink_rates
, rate
);
1712 void intel_dp_compute_rate(struct intel_dp
*intel_dp
, int port_clock
,
1713 u8
*link_bw
, u8
*rate_select
)
1715 /* eDP 1.4 rate select method. */
1716 if (intel_dp
->use_rate_select
) {
1719 intel_dp_rate_select(intel_dp
, port_clock
);
1721 *link_bw
= drm_dp_link_rate_to_bw_code(port_clock
);
1726 struct link_config_limits
{
1727 int min_clock
, max_clock
;
1728 int min_lane_count
, max_lane_count
;
1729 int min_bpp
, max_bpp
;
1732 static bool intel_dp_source_supports_fec(struct intel_dp
*intel_dp
,
1733 const struct intel_crtc_state
*pipe_config
)
1735 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1737 return INTEL_GEN(dev_priv
) >= 11 &&
1738 pipe_config
->cpu_transcoder
!= TRANSCODER_A
;
1741 static bool intel_dp_supports_fec(struct intel_dp
*intel_dp
,
1742 const struct intel_crtc_state
*pipe_config
)
1744 return intel_dp_source_supports_fec(intel_dp
, pipe_config
) &&
1745 drm_dp_sink_supports_fec(intel_dp
->fec_capable
);
1748 static bool intel_dp_source_supports_dsc(struct intel_dp
*intel_dp
,
1749 const struct intel_crtc_state
*pipe_config
)
1751 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1753 return INTEL_GEN(dev_priv
) >= 10 &&
1754 pipe_config
->cpu_transcoder
!= TRANSCODER_A
;
1757 static bool intel_dp_supports_dsc(struct intel_dp
*intel_dp
,
1758 const struct intel_crtc_state
*pipe_config
)
1760 if (!intel_dp_is_edp(intel_dp
) && !pipe_config
->fec_enable
)
1763 return intel_dp_source_supports_dsc(intel_dp
, pipe_config
) &&
1764 drm_dp_sink_supports_dsc(intel_dp
->dsc_dpcd
);
1767 static int intel_dp_compute_bpp(struct intel_dp
*intel_dp
,
1768 struct intel_crtc_state
*pipe_config
)
1770 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1771 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
1774 bpp
= pipe_config
->pipe_bpp
;
1775 bpc
= drm_dp_downstream_max_bpc(intel_dp
->dpcd
, intel_dp
->downstream_ports
);
1778 bpp
= min(bpp
, 3*bpc
);
1780 if (intel_dp_is_edp(intel_dp
)) {
1781 /* Get bpp from vbt only for panels that dont have bpp in edid */
1782 if (intel_connector
->base
.display_info
.bpc
== 0 &&
1783 dev_priv
->vbt
.edp
.bpp
&& dev_priv
->vbt
.edp
.bpp
< bpp
) {
1784 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1785 dev_priv
->vbt
.edp
.bpp
);
1786 bpp
= dev_priv
->vbt
.edp
.bpp
;
1793 /* Adjust link config limits based on compliance test requests. */
1795 intel_dp_adjust_compliance_config(struct intel_dp
*intel_dp
,
1796 struct intel_crtc_state
*pipe_config
,
1797 struct link_config_limits
*limits
)
1799 /* For DP Compliance we override the computed bpp for the pipe */
1800 if (intel_dp
->compliance
.test_data
.bpc
!= 0) {
1801 int bpp
= 3 * intel_dp
->compliance
.test_data
.bpc
;
1803 limits
->min_bpp
= limits
->max_bpp
= bpp
;
1804 pipe_config
->dither_force_disable
= bpp
== 6 * 3;
1806 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp
);
1809 /* Use values requested by Compliance Test Request */
1810 if (intel_dp
->compliance
.test_type
== DP_TEST_LINK_TRAINING
) {
1813 /* Validate the compliance test data since max values
1814 * might have changed due to link train fallback.
1816 if (intel_dp_link_params_valid(intel_dp
, intel_dp
->compliance
.test_link_rate
,
1817 intel_dp
->compliance
.test_lane_count
)) {
1818 index
= intel_dp_rate_index(intel_dp
->common_rates
,
1819 intel_dp
->num_common_rates
,
1820 intel_dp
->compliance
.test_link_rate
);
1822 limits
->min_clock
= limits
->max_clock
= index
;
1823 limits
->min_lane_count
= limits
->max_lane_count
=
1824 intel_dp
->compliance
.test_lane_count
;
1829 /* Optimize link config in order: max bpp, min clock, min lanes */
1831 intel_dp_compute_link_config_wide(struct intel_dp
*intel_dp
,
1832 struct intel_crtc_state
*pipe_config
,
1833 const struct link_config_limits
*limits
)
1835 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1836 int bpp
, clock
, lane_count
;
1837 int mode_rate
, link_clock
, link_avail
;
1839 for (bpp
= limits
->max_bpp
; bpp
>= limits
->min_bpp
; bpp
-= 2 * 3) {
1840 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
1843 for (clock
= limits
->min_clock
; clock
<= limits
->max_clock
; clock
++) {
1844 for (lane_count
= limits
->min_lane_count
;
1845 lane_count
<= limits
->max_lane_count
;
1847 link_clock
= intel_dp
->common_rates
[clock
];
1848 link_avail
= intel_dp_max_data_rate(link_clock
,
1851 if (mode_rate
<= link_avail
) {
1852 pipe_config
->lane_count
= lane_count
;
1853 pipe_config
->pipe_bpp
= bpp
;
1854 pipe_config
->port_clock
= link_clock
;
1865 /* Optimize link config in order: max bpp, min lanes, min clock */
1867 intel_dp_compute_link_config_fast(struct intel_dp
*intel_dp
,
1868 struct intel_crtc_state
*pipe_config
,
1869 const struct link_config_limits
*limits
)
1871 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1872 int bpp
, clock
, lane_count
;
1873 int mode_rate
, link_clock
, link_avail
;
1875 for (bpp
= limits
->max_bpp
; bpp
>= limits
->min_bpp
; bpp
-= 2 * 3) {
1876 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
1879 for (lane_count
= limits
->min_lane_count
;
1880 lane_count
<= limits
->max_lane_count
;
1882 for (clock
= limits
->min_clock
; clock
<= limits
->max_clock
; clock
++) {
1883 link_clock
= intel_dp
->common_rates
[clock
];
1884 link_avail
= intel_dp_max_data_rate(link_clock
,
1887 if (mode_rate
<= link_avail
) {
1888 pipe_config
->lane_count
= lane_count
;
1889 pipe_config
->pipe_bpp
= bpp
;
1890 pipe_config
->port_clock
= link_clock
;
1901 static int intel_dp_dsc_compute_bpp(struct intel_dp
*intel_dp
, u8 dsc_max_bpc
)
1904 u8 dsc_bpc
[3] = {0};
1906 num_bpc
= drm_dp_dsc_sink_supported_input_bpcs(intel_dp
->dsc_dpcd
,
1908 for (i
= 0; i
< num_bpc
; i
++) {
1909 if (dsc_max_bpc
>= dsc_bpc
[i
])
1910 return dsc_bpc
[i
] * 3;
1916 static int intel_dp_dsc_compute_config(struct intel_dp
*intel_dp
,
1917 struct intel_crtc_state
*pipe_config
,
1918 struct drm_connector_state
*conn_state
,
1919 struct link_config_limits
*limits
)
1921 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1922 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
1923 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1928 if (!intel_dp_supports_dsc(intel_dp
, pipe_config
))
1931 dsc_max_bpc
= min_t(u8
, DP_DSC_MAX_SUPPORTED_BPC
,
1932 conn_state
->max_requested_bpc
);
1934 pipe_bpp
= intel_dp_dsc_compute_bpp(intel_dp
, dsc_max_bpc
);
1935 if (pipe_bpp
< DP_DSC_MIN_SUPPORTED_BPC
* 3) {
1936 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1941 * For now enable DSC for max bpp, max link rate, max lane count.
1942 * Optimize this later for the minimum possible link rate/lane count
1943 * with DSC enabled for the requested mode.
1945 pipe_config
->pipe_bpp
= pipe_bpp
;
1946 pipe_config
->port_clock
= intel_dp
->common_rates
[limits
->max_clock
];
1947 pipe_config
->lane_count
= limits
->max_lane_count
;
1949 if (intel_dp_is_edp(intel_dp
)) {
1950 pipe_config
->dsc_params
.compressed_bpp
=
1951 min_t(u16
, drm_edp_dsc_sink_output_bpp(intel_dp
->dsc_dpcd
) >> 4,
1952 pipe_config
->pipe_bpp
);
1953 pipe_config
->dsc_params
.slice_count
=
1954 drm_dp_dsc_sink_max_slice_count(intel_dp
->dsc_dpcd
,
1957 u16 dsc_max_output_bpp
;
1958 u8 dsc_dp_slice_count
;
1960 dsc_max_output_bpp
=
1961 intel_dp_dsc_get_output_bpp(pipe_config
->port_clock
,
1962 pipe_config
->lane_count
,
1963 adjusted_mode
->crtc_clock
,
1964 adjusted_mode
->crtc_hdisplay
);
1965 dsc_dp_slice_count
=
1966 intel_dp_dsc_get_slice_count(intel_dp
,
1967 adjusted_mode
->crtc_clock
,
1968 adjusted_mode
->crtc_hdisplay
);
1969 if (!dsc_max_output_bpp
|| !dsc_dp_slice_count
) {
1970 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1973 pipe_config
->dsc_params
.compressed_bpp
= min_t(u16
,
1974 dsc_max_output_bpp
>> 4,
1975 pipe_config
->pipe_bpp
);
1976 pipe_config
->dsc_params
.slice_count
= dsc_dp_slice_count
;
1979 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1980 * is greater than the maximum Cdclock and if slice count is even
1981 * then we need to use 2 VDSC instances.
1983 if (adjusted_mode
->crtc_clock
> dev_priv
->max_cdclk_freq
) {
1984 if (pipe_config
->dsc_params
.slice_count
> 1) {
1985 pipe_config
->dsc_params
.dsc_split
= true;
1987 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
1992 ret
= intel_dp_compute_dsc_params(intel_dp
, pipe_config
);
1994 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
1995 "Compressed BPP = %d\n",
1996 pipe_config
->pipe_bpp
,
1997 pipe_config
->dsc_params
.compressed_bpp
);
2001 pipe_config
->dsc_params
.compression_enable
= true;
2002 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2003 "Compressed Bpp = %d Slice Count = %d\n",
2004 pipe_config
->pipe_bpp
,
2005 pipe_config
->dsc_params
.compressed_bpp
,
2006 pipe_config
->dsc_params
.slice_count
);
2012 intel_dp_compute_link_config(struct intel_encoder
*encoder
,
2013 struct intel_crtc_state
*pipe_config
,
2014 struct drm_connector_state
*conn_state
)
2016 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
2017 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2018 struct link_config_limits limits
;
2022 common_len
= intel_dp_common_len_rate_limit(intel_dp
,
2023 intel_dp
->max_link_rate
);
2025 /* No common link rates between source and sink */
2026 WARN_ON(common_len
<= 0);
2028 limits
.min_clock
= 0;
2029 limits
.max_clock
= common_len
- 1;
2031 limits
.min_lane_count
= 1;
2032 limits
.max_lane_count
= intel_dp_max_lane_count(intel_dp
);
2034 limits
.min_bpp
= 6 * 3;
2035 limits
.max_bpp
= intel_dp_compute_bpp(intel_dp
, pipe_config
);
2037 if (intel_dp_is_edp(intel_dp
) && intel_dp
->edp_dpcd
[0] < DP_EDP_14
) {
2039 * Use the maximum clock and number of lanes the eDP panel
2040 * advertizes being capable of. The eDP 1.3 and earlier panels
2041 * are generally designed to support only a single clock and
2042 * lane configuration, and typically these values correspond to
2043 * the native resolution of the panel. With eDP 1.4 rate select
2044 * and DSC, this is decreasingly the case, and we need to be
2045 * able to select less than maximum link config.
2047 limits
.min_lane_count
= limits
.max_lane_count
;
2048 limits
.min_clock
= limits
.max_clock
;
2051 intel_dp_adjust_compliance_config(intel_dp
, pipe_config
, &limits
);
2053 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2054 "max rate %d max bpp %d pixel clock %iKHz\n",
2055 limits
.max_lane_count
,
2056 intel_dp
->common_rates
[limits
.max_clock
],
2057 limits
.max_bpp
, adjusted_mode
->crtc_clock
);
2059 if (intel_dp_is_edp(intel_dp
))
2061 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2062 * section A.1: "It is recommended that the minimum number of
2063 * lanes be used, using the minimum link rate allowed for that
2064 * lane configuration."
2066 * Note that we use the max clock and lane count for eDP 1.3 and
2067 * earlier, and fast vs. wide is irrelevant.
2069 ret
= intel_dp_compute_link_config_fast(intel_dp
, pipe_config
,
2072 /* Optimize for slow and wide. */
2073 ret
= intel_dp_compute_link_config_wide(intel_dp
, pipe_config
,
2076 /* enable compression if the mode doesn't fit available BW */
2077 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp
->force_dsc_en
);
2078 if (ret
|| intel_dp
->force_dsc_en
) {
2079 ret
= intel_dp_dsc_compute_config(intel_dp
, pipe_config
,
2080 conn_state
, &limits
);
2085 if (pipe_config
->dsc_params
.compression_enable
) {
2086 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2087 pipe_config
->lane_count
, pipe_config
->port_clock
,
2088 pipe_config
->pipe_bpp
,
2089 pipe_config
->dsc_params
.compressed_bpp
);
2091 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2092 intel_dp_link_required(adjusted_mode
->crtc_clock
,
2093 pipe_config
->dsc_params
.compressed_bpp
),
2094 intel_dp_max_data_rate(pipe_config
->port_clock
,
2095 pipe_config
->lane_count
));
2097 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2098 pipe_config
->lane_count
, pipe_config
->port_clock
,
2099 pipe_config
->pipe_bpp
);
2101 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2102 intel_dp_link_required(adjusted_mode
->crtc_clock
,
2103 pipe_config
->pipe_bpp
),
2104 intel_dp_max_data_rate(pipe_config
->port_clock
,
2105 pipe_config
->lane_count
));
2111 intel_dp_compute_config(struct intel_encoder
*encoder
,
2112 struct intel_crtc_state
*pipe_config
,
2113 struct drm_connector_state
*conn_state
)
2115 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2116 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
2117 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2118 struct intel_lspcon
*lspcon
= enc_to_intel_lspcon(&encoder
->base
);
2119 enum port port
= encoder
->port
;
2120 struct intel_crtc
*intel_crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
2121 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
2122 struct intel_digital_connector_state
*intel_conn_state
=
2123 to_intel_digital_connector_state(conn_state
);
2124 bool constant_n
= drm_dp_has_quirk(&intel_dp
->desc
,
2125 DP_DPCD_QUIRK_CONSTANT_N
);
2128 if (HAS_PCH_SPLIT(dev_priv
) && !HAS_DDI(dev_priv
) && port
!= PORT_A
)
2129 pipe_config
->has_pch_encoder
= true;
2131 pipe_config
->output_format
= INTEL_OUTPUT_FORMAT_RGB
;
2133 lspcon_ycbcr420_config(&intel_connector
->base
, pipe_config
);
2135 pipe_config
->has_drrs
= false;
2136 if (IS_G4X(dev_priv
) || port
== PORT_A
)
2137 pipe_config
->has_audio
= false;
2138 else if (intel_conn_state
->force_audio
== HDMI_AUDIO_AUTO
)
2139 pipe_config
->has_audio
= intel_dp
->has_audio
;
2141 pipe_config
->has_audio
= intel_conn_state
->force_audio
== HDMI_AUDIO_ON
;
2143 if (intel_dp_is_edp(intel_dp
) && intel_connector
->panel
.fixed_mode
) {
2144 intel_fixed_panel_mode(intel_connector
->panel
.fixed_mode
,
2147 if (INTEL_GEN(dev_priv
) >= 9) {
2148 ret
= skl_update_scaler_crtc(pipe_config
);
2153 if (HAS_GMCH(dev_priv
))
2154 intel_gmch_panel_fitting(intel_crtc
, pipe_config
,
2155 conn_state
->scaling_mode
);
2157 intel_pch_panel_fitting(intel_crtc
, pipe_config
,
2158 conn_state
->scaling_mode
);
2161 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)
2164 if (HAS_GMCH(dev_priv
) &&
2165 adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
2168 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
2171 pipe_config
->fec_enable
= !intel_dp_is_edp(intel_dp
) &&
2172 intel_dp_supports_fec(intel_dp
, pipe_config
);
2174 ret
= intel_dp_compute_link_config(encoder
, pipe_config
, conn_state
);
2178 if (intel_conn_state
->broadcast_rgb
== INTEL_BROADCAST_RGB_AUTO
) {
2181 * CEA-861-E - 5.1 Default Encoding Parameters
2182 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2184 pipe_config
->limited_color_range
=
2185 pipe_config
->pipe_bpp
!= 18 &&
2186 drm_default_rgb_quant_range(adjusted_mode
) ==
2187 HDMI_QUANTIZATION_RANGE_LIMITED
;
2189 pipe_config
->limited_color_range
=
2190 intel_conn_state
->broadcast_rgb
== INTEL_BROADCAST_RGB_LIMITED
;
2193 if (!pipe_config
->dsc_params
.compression_enable
)
2194 intel_link_compute_m_n(pipe_config
->pipe_bpp
,
2195 pipe_config
->lane_count
,
2196 adjusted_mode
->crtc_clock
,
2197 pipe_config
->port_clock
,
2198 &pipe_config
->dp_m_n
,
2201 intel_link_compute_m_n(pipe_config
->dsc_params
.compressed_bpp
,
2202 pipe_config
->lane_count
,
2203 adjusted_mode
->crtc_clock
,
2204 pipe_config
->port_clock
,
2205 &pipe_config
->dp_m_n
,
2208 if (intel_connector
->panel
.downclock_mode
!= NULL
&&
2209 dev_priv
->drrs
.type
== SEAMLESS_DRRS_SUPPORT
) {
2210 pipe_config
->has_drrs
= true;
2211 intel_link_compute_m_n(pipe_config
->pipe_bpp
,
2212 pipe_config
->lane_count
,
2213 intel_connector
->panel
.downclock_mode
->clock
,
2214 pipe_config
->port_clock
,
2215 &pipe_config
->dp_m2_n2
,
2219 if (!HAS_DDI(dev_priv
))
2220 intel_dp_set_clock(encoder
, pipe_config
);
2222 intel_psr_compute_config(intel_dp
, pipe_config
);
2227 void intel_dp_set_link_params(struct intel_dp
*intel_dp
,
2228 int link_rate
, u8 lane_count
,
2231 intel_dp
->link_trained
= false;
2232 intel_dp
->link_rate
= link_rate
;
2233 intel_dp
->lane_count
= lane_count
;
2234 intel_dp
->link_mst
= link_mst
;
2237 static void intel_dp_prepare(struct intel_encoder
*encoder
,
2238 const struct intel_crtc_state
*pipe_config
)
2240 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2241 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2242 enum port port
= encoder
->port
;
2243 struct intel_crtc
*crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
2244 const struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
2246 intel_dp_set_link_params(intel_dp
, pipe_config
->port_clock
,
2247 pipe_config
->lane_count
,
2248 intel_crtc_has_type(pipe_config
,
2249 INTEL_OUTPUT_DP_MST
));
2252 * There are four kinds of DP registers:
2259 * IBX PCH and CPU are the same for almost everything,
2260 * except that the CPU DP PLL is configured in this
2263 * CPT PCH is quite different, having many bits moved
2264 * to the TRANS_DP_CTL register instead. That
2265 * configuration happens (oddly) in ironlake_pch_enable
2268 /* Preserve the BIOS-computed detected bit. This is
2269 * supposed to be read-only.
2271 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
2273 /* Handle DP bits in common between all three register formats */
2274 intel_dp
->DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
2275 intel_dp
->DP
|= DP_PORT_WIDTH(pipe_config
->lane_count
);
2277 /* Split out the IBX/CPU vs CPT settings */
2279 if (IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
) {
2280 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
2281 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
2282 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
2283 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
2284 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
2286 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
2287 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
2289 intel_dp
->DP
|= DP_PIPE_SEL_IVB(crtc
->pipe
);
2290 } else if (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
) {
2293 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
2295 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
2296 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
2297 trans_dp
|= TRANS_DP_ENH_FRAMING
;
2299 trans_dp
&= ~TRANS_DP_ENH_FRAMING
;
2300 I915_WRITE(TRANS_DP_CTL(crtc
->pipe
), trans_dp
);
2302 if (IS_G4X(dev_priv
) && pipe_config
->limited_color_range
)
2303 intel_dp
->DP
|= DP_COLOR_RANGE_16_235
;
2305 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
2306 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
2307 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
2308 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
2309 intel_dp
->DP
|= DP_LINK_TRAIN_OFF
;
2311 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
2312 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
2314 if (IS_CHERRYVIEW(dev_priv
))
2315 intel_dp
->DP
|= DP_PIPE_SEL_CHV(crtc
->pipe
);
2317 intel_dp
->DP
|= DP_PIPE_SEL(crtc
->pipe
);
2321 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2322 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2324 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2325 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2327 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2328 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2330 static void intel_pps_verify_state(struct intel_dp
*intel_dp
);
2332 static void wait_panel_status(struct intel_dp
*intel_dp
,
2336 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2337 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
2339 lockdep_assert_held(&dev_priv
->pps_mutex
);
2341 intel_pps_verify_state(intel_dp
);
2343 pp_stat_reg
= _pp_stat_reg(intel_dp
);
2344 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2346 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2348 I915_READ(pp_stat_reg
),
2349 I915_READ(pp_ctrl_reg
));
2351 if (intel_wait_for_register(dev_priv
,
2352 pp_stat_reg
, mask
, value
,
2354 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2355 I915_READ(pp_stat_reg
),
2356 I915_READ(pp_ctrl_reg
));
2358 DRM_DEBUG_KMS("Wait complete\n");
2361 static void wait_panel_on(struct intel_dp
*intel_dp
)
2363 DRM_DEBUG_KMS("Wait for panel power on\n");
2364 wait_panel_status(intel_dp
, IDLE_ON_MASK
, IDLE_ON_VALUE
);
2367 static void wait_panel_off(struct intel_dp
*intel_dp
)
2369 DRM_DEBUG_KMS("Wait for panel power off time\n");
2370 wait_panel_status(intel_dp
, IDLE_OFF_MASK
, IDLE_OFF_VALUE
);
2373 static void wait_panel_power_cycle(struct intel_dp
*intel_dp
)
2375 ktime_t panel_power_on_time
;
2376 s64 panel_power_off_duration
;
2378 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2380 /* take the difference of currrent time and panel power off time
2381 * and then make panel wait for t11_t12 if needed. */
2382 panel_power_on_time
= ktime_get_boottime();
2383 panel_power_off_duration
= ktime_ms_delta(panel_power_on_time
, intel_dp
->panel_power_off_time
);
2385 /* When we disable the VDD override bit last we have to do the manual
2387 if (panel_power_off_duration
< (s64
)intel_dp
->panel_power_cycle_delay
)
2388 wait_remaining_ms_from_jiffies(jiffies
,
2389 intel_dp
->panel_power_cycle_delay
- panel_power_off_duration
);
2391 wait_panel_status(intel_dp
, IDLE_CYCLE_MASK
, IDLE_CYCLE_VALUE
);
2394 static void wait_backlight_on(struct intel_dp
*intel_dp
)
2396 wait_remaining_ms_from_jiffies(intel_dp
->last_power_on
,
2397 intel_dp
->backlight_on_delay
);
2400 static void edp_wait_backlight_off(struct intel_dp
*intel_dp
)
2402 wait_remaining_ms_from_jiffies(intel_dp
->last_backlight_off
,
2403 intel_dp
->backlight_off_delay
);
2406 /* Read the current pp_control value, unlocking the register if it
2410 static u32
ironlake_get_pp_control(struct intel_dp
*intel_dp
)
2412 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2415 lockdep_assert_held(&dev_priv
->pps_mutex
);
2417 control
= I915_READ(_pp_ctrl_reg(intel_dp
));
2418 if (WARN_ON(!HAS_DDI(dev_priv
) &&
2419 (control
& PANEL_UNLOCK_MASK
) != PANEL_UNLOCK_REGS
)) {
2420 control
&= ~PANEL_UNLOCK_MASK
;
2421 control
|= PANEL_UNLOCK_REGS
;
2427 * Must be paired with edp_panel_vdd_off().
2428 * Must hold pps_mutex around the whole on/off sequence.
2429 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2431 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
)
2433 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2434 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2436 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
2437 bool need_to_disable
= !intel_dp
->want_panel_vdd
;
2439 lockdep_assert_held(&dev_priv
->pps_mutex
);
2441 if (!intel_dp_is_edp(intel_dp
))
2444 cancel_delayed_work(&intel_dp
->panel_vdd_work
);
2445 intel_dp
->want_panel_vdd
= true;
2447 if (edp_have_panel_vdd(intel_dp
))
2448 return need_to_disable
;
2450 intel_display_power_get(dev_priv
,
2451 intel_aux_power_domain(intel_dig_port
));
2453 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2454 port_name(intel_dig_port
->base
.port
));
2456 if (!edp_have_panel_power(intel_dp
))
2457 wait_panel_power_cycle(intel_dp
);
2459 pp
= ironlake_get_pp_control(intel_dp
);
2460 pp
|= EDP_FORCE_VDD
;
2462 pp_stat_reg
= _pp_stat_reg(intel_dp
);
2463 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2465 I915_WRITE(pp_ctrl_reg
, pp
);
2466 POSTING_READ(pp_ctrl_reg
);
2467 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2468 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
2470 * If the panel wasn't on, delay before accessing aux channel
2472 if (!edp_have_panel_power(intel_dp
)) {
2473 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2474 port_name(intel_dig_port
->base
.port
));
2475 msleep(intel_dp
->panel_power_up_delay
);
2478 return need_to_disable
;
2482 * Must be paired with intel_edp_panel_vdd_off() or
2483 * intel_edp_panel_off().
2484 * Nested calls to these functions are not allowed since
2485 * we drop the lock. Caller must use some higher level
2486 * locking to prevent nested calls from other threads.
2488 void intel_edp_panel_vdd_on(struct intel_dp
*intel_dp
)
2490 intel_wakeref_t wakeref
;
2493 if (!intel_dp_is_edp(intel_dp
))
2497 with_pps_lock(intel_dp
, wakeref
)
2498 vdd
= edp_panel_vdd_on(intel_dp
);
2499 I915_STATE_WARN(!vdd
, "eDP port %c VDD already requested on\n",
2500 port_name(dp_to_dig_port(intel_dp
)->base
.port
));
2503 static void edp_panel_vdd_off_sync(struct intel_dp
*intel_dp
)
2505 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2506 struct intel_digital_port
*intel_dig_port
=
2507 dp_to_dig_port(intel_dp
);
2509 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
2511 lockdep_assert_held(&dev_priv
->pps_mutex
);
2513 WARN_ON(intel_dp
->want_panel_vdd
);
2515 if (!edp_have_panel_vdd(intel_dp
))
2518 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2519 port_name(intel_dig_port
->base
.port
));
2521 pp
= ironlake_get_pp_control(intel_dp
);
2522 pp
&= ~EDP_FORCE_VDD
;
2524 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2525 pp_stat_reg
= _pp_stat_reg(intel_dp
);
2527 I915_WRITE(pp_ctrl_reg
, pp
);
2528 POSTING_READ(pp_ctrl_reg
);
2530 /* Make sure sequencer is idle before allowing subsequent activity */
2531 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2532 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
2534 if ((pp
& PANEL_POWER_ON
) == 0)
2535 intel_dp
->panel_power_off_time
= ktime_get_boottime();
2537 intel_display_power_put_unchecked(dev_priv
,
2538 intel_aux_power_domain(intel_dig_port
));
2541 static void edp_panel_vdd_work(struct work_struct
*__work
)
2543 struct intel_dp
*intel_dp
=
2544 container_of(to_delayed_work(__work
),
2545 struct intel_dp
, panel_vdd_work
);
2546 intel_wakeref_t wakeref
;
2548 with_pps_lock(intel_dp
, wakeref
) {
2549 if (!intel_dp
->want_panel_vdd
)
2550 edp_panel_vdd_off_sync(intel_dp
);
2554 static void edp_panel_vdd_schedule_off(struct intel_dp
*intel_dp
)
2556 unsigned long delay
;
2559 * Queue the timer to fire a long time from now (relative to the power
2560 * down delay) to keep the panel power up across a sequence of
2563 delay
= msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5);
2564 schedule_delayed_work(&intel_dp
->panel_vdd_work
, delay
);
2568 * Must be paired with edp_panel_vdd_on().
2569 * Must hold pps_mutex around the whole on/off sequence.
2570 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2572 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
)
2574 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2576 lockdep_assert_held(&dev_priv
->pps_mutex
);
2578 if (!intel_dp_is_edp(intel_dp
))
2581 I915_STATE_WARN(!intel_dp
->want_panel_vdd
, "eDP port %c VDD not forced on",
2582 port_name(dp_to_dig_port(intel_dp
)->base
.port
));
2584 intel_dp
->want_panel_vdd
= false;
2587 edp_panel_vdd_off_sync(intel_dp
);
2589 edp_panel_vdd_schedule_off(intel_dp
);
2592 static void edp_panel_on(struct intel_dp
*intel_dp
)
2594 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2596 i915_reg_t pp_ctrl_reg
;
2598 lockdep_assert_held(&dev_priv
->pps_mutex
);
2600 if (!intel_dp_is_edp(intel_dp
))
2603 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2604 port_name(dp_to_dig_port(intel_dp
)->base
.port
));
2606 if (WARN(edp_have_panel_power(intel_dp
),
2607 "eDP port %c panel power already on\n",
2608 port_name(dp_to_dig_port(intel_dp
)->base
.port
)))
2611 wait_panel_power_cycle(intel_dp
);
2613 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2614 pp
= ironlake_get_pp_control(intel_dp
);
2615 if (IS_GEN(dev_priv
, 5)) {
2616 /* ILK workaround: disable reset around power sequence */
2617 pp
&= ~PANEL_POWER_RESET
;
2618 I915_WRITE(pp_ctrl_reg
, pp
);
2619 POSTING_READ(pp_ctrl_reg
);
2622 pp
|= PANEL_POWER_ON
;
2623 if (!IS_GEN(dev_priv
, 5))
2624 pp
|= PANEL_POWER_RESET
;
2626 I915_WRITE(pp_ctrl_reg
, pp
);
2627 POSTING_READ(pp_ctrl_reg
);
2629 wait_panel_on(intel_dp
);
2630 intel_dp
->last_power_on
= jiffies
;
2632 if (IS_GEN(dev_priv
, 5)) {
2633 pp
|= PANEL_POWER_RESET
; /* restore panel reset bit */
2634 I915_WRITE(pp_ctrl_reg
, pp
);
2635 POSTING_READ(pp_ctrl_reg
);
2639 void intel_edp_panel_on(struct intel_dp
*intel_dp
)
2641 intel_wakeref_t wakeref
;
2643 if (!intel_dp_is_edp(intel_dp
))
2646 with_pps_lock(intel_dp
, wakeref
)
2647 edp_panel_on(intel_dp
);
2651 static void edp_panel_off(struct intel_dp
*intel_dp
)
2653 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2654 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
2656 i915_reg_t pp_ctrl_reg
;
2658 lockdep_assert_held(&dev_priv
->pps_mutex
);
2660 if (!intel_dp_is_edp(intel_dp
))
2663 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2664 port_name(dig_port
->base
.port
));
2666 WARN(!intel_dp
->want_panel_vdd
, "Need eDP port %c VDD to turn off panel\n",
2667 port_name(dig_port
->base
.port
));
2669 pp
= ironlake_get_pp_control(intel_dp
);
2670 /* We need to switch off panel power _and_ force vdd, for otherwise some
2671 * panels get very unhappy and cease to work. */
2672 pp
&= ~(PANEL_POWER_ON
| PANEL_POWER_RESET
| EDP_FORCE_VDD
|
2675 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2677 intel_dp
->want_panel_vdd
= false;
2679 I915_WRITE(pp_ctrl_reg
, pp
);
2680 POSTING_READ(pp_ctrl_reg
);
2682 wait_panel_off(intel_dp
);
2683 intel_dp
->panel_power_off_time
= ktime_get_boottime();
2685 /* We got a reference when we enabled the VDD. */
2686 intel_display_power_put_unchecked(dev_priv
, intel_aux_power_domain(dig_port
));
2689 void intel_edp_panel_off(struct intel_dp
*intel_dp
)
2691 intel_wakeref_t wakeref
;
2693 if (!intel_dp_is_edp(intel_dp
))
2696 with_pps_lock(intel_dp
, wakeref
)
2697 edp_panel_off(intel_dp
);
2700 /* Enable backlight in the panel power control. */
2701 static void _intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2703 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2704 intel_wakeref_t wakeref
;
2707 * If we enable the backlight right away following a panel power
2708 * on, we may see slight flicker as the panel syncs with the eDP
2709 * link. So delay a bit to make sure the image is solid before
2710 * allowing it to appear.
2712 wait_backlight_on(intel_dp
);
2714 with_pps_lock(intel_dp
, wakeref
) {
2715 i915_reg_t pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2718 pp
= ironlake_get_pp_control(intel_dp
);
2719 pp
|= EDP_BLC_ENABLE
;
2721 I915_WRITE(pp_ctrl_reg
, pp
);
2722 POSTING_READ(pp_ctrl_reg
);
2726 /* Enable backlight PWM and backlight PP control. */
2727 void intel_edp_backlight_on(const struct intel_crtc_state
*crtc_state
,
2728 const struct drm_connector_state
*conn_state
)
2730 struct intel_dp
*intel_dp
= enc_to_intel_dp(conn_state
->best_encoder
);
2732 if (!intel_dp_is_edp(intel_dp
))
2735 DRM_DEBUG_KMS("\n");
2737 intel_panel_enable_backlight(crtc_state
, conn_state
);
2738 _intel_edp_backlight_on(intel_dp
);
2741 /* Disable backlight in the panel power control. */
2742 static void _intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2744 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2745 intel_wakeref_t wakeref
;
2747 if (!intel_dp_is_edp(intel_dp
))
2750 with_pps_lock(intel_dp
, wakeref
) {
2751 i915_reg_t pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2754 pp
= ironlake_get_pp_control(intel_dp
);
2755 pp
&= ~EDP_BLC_ENABLE
;
2757 I915_WRITE(pp_ctrl_reg
, pp
);
2758 POSTING_READ(pp_ctrl_reg
);
2761 intel_dp
->last_backlight_off
= jiffies
;
2762 edp_wait_backlight_off(intel_dp
);
2765 /* Disable backlight PP control and backlight PWM. */
2766 void intel_edp_backlight_off(const struct drm_connector_state
*old_conn_state
)
2768 struct intel_dp
*intel_dp
= enc_to_intel_dp(old_conn_state
->best_encoder
);
2770 if (!intel_dp_is_edp(intel_dp
))
2773 DRM_DEBUG_KMS("\n");
2775 _intel_edp_backlight_off(intel_dp
);
2776 intel_panel_disable_backlight(old_conn_state
);
2780 * Hook for controlling the panel power control backlight through the bl_power
2781 * sysfs attribute. Take care to handle multiple calls.
2783 static void intel_edp_backlight_power(struct intel_connector
*connector
,
2786 struct intel_dp
*intel_dp
= intel_attached_dp(&connector
->base
);
2787 intel_wakeref_t wakeref
;
2791 with_pps_lock(intel_dp
, wakeref
)
2792 is_enabled
= ironlake_get_pp_control(intel_dp
) & EDP_BLC_ENABLE
;
2793 if (is_enabled
== enable
)
2796 DRM_DEBUG_KMS("panel power control backlight %s\n",
2797 enable
? "enable" : "disable");
2800 _intel_edp_backlight_on(intel_dp
);
2802 _intel_edp_backlight_off(intel_dp
);
2805 static void assert_dp_port(struct intel_dp
*intel_dp
, bool state
)
2807 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
2808 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
2809 bool cur_state
= I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
;
2811 I915_STATE_WARN(cur_state
!= state
,
2812 "DP port %c state assertion failure (expected %s, current %s)\n",
2813 port_name(dig_port
->base
.port
),
2814 onoff(state
), onoff(cur_state
));
2816 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2818 static void assert_edp_pll(struct drm_i915_private
*dev_priv
, bool state
)
2820 bool cur_state
= I915_READ(DP_A
) & DP_PLL_ENABLE
;
2822 I915_STATE_WARN(cur_state
!= state
,
2823 "eDP PLL state assertion failure (expected %s, current %s)\n",
2824 onoff(state
), onoff(cur_state
));
2826 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2827 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2829 static void ironlake_edp_pll_on(struct intel_dp
*intel_dp
,
2830 const struct intel_crtc_state
*pipe_config
)
2832 struct intel_crtc
*crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
2833 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2835 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
2836 assert_dp_port_disabled(intel_dp
);
2837 assert_edp_pll_disabled(dev_priv
);
2839 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2840 pipe_config
->port_clock
);
2842 intel_dp
->DP
&= ~DP_PLL_FREQ_MASK
;
2844 if (pipe_config
->port_clock
== 162000)
2845 intel_dp
->DP
|= DP_PLL_FREQ_162MHZ
;
2847 intel_dp
->DP
|= DP_PLL_FREQ_270MHZ
;
2849 I915_WRITE(DP_A
, intel_dp
->DP
);
2854 * [DevILK] Work around required when enabling DP PLL
2855 * while a pipe is enabled going to FDI:
2856 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2857 * 2. Program DP PLL enable
2859 if (IS_GEN(dev_priv
, 5))
2860 intel_wait_for_vblank_if_active(dev_priv
, !crtc
->pipe
);
2862 intel_dp
->DP
|= DP_PLL_ENABLE
;
2864 I915_WRITE(DP_A
, intel_dp
->DP
);
2869 static void ironlake_edp_pll_off(struct intel_dp
*intel_dp
,
2870 const struct intel_crtc_state
*old_crtc_state
)
2872 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
2873 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2875 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
2876 assert_dp_port_disabled(intel_dp
);
2877 assert_edp_pll_enabled(dev_priv
);
2879 DRM_DEBUG_KMS("disabling eDP PLL\n");
2881 intel_dp
->DP
&= ~DP_PLL_ENABLE
;
2883 I915_WRITE(DP_A
, intel_dp
->DP
);
2888 static bool downstream_hpd_needs_d0(struct intel_dp
*intel_dp
)
2891 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2892 * be capable of signalling downstream hpd with a long pulse.
2893 * Whether or not that means D3 is safe to use is not clear,
2894 * but let's assume so until proven otherwise.
2896 * FIXME should really check all downstream ports...
2898 return intel_dp
->dpcd
[DP_DPCD_REV
] == 0x11 &&
2899 intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] & DP_DWN_STRM_PORT_PRESENT
&&
2900 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
;
2903 void intel_dp_sink_set_decompression_state(struct intel_dp
*intel_dp
,
2904 const struct intel_crtc_state
*crtc_state
,
2909 if (!crtc_state
->dsc_params
.compression_enable
)
2912 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_DSC_ENABLE
,
2913 enable
? DP_DECOMPRESSION_EN
: 0);
2915 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2916 enable
? "enable" : "disable");
2919 /* If the sink supports it, try to set the power state appropriately */
2920 void intel_dp_sink_dpms(struct intel_dp
*intel_dp
, int mode
)
2924 /* Should have a valid DPCD by this point */
2925 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
2928 if (mode
!= DRM_MODE_DPMS_ON
) {
2929 if (downstream_hpd_needs_d0(intel_dp
))
2932 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2935 struct intel_lspcon
*lspcon
= dp_to_lspcon(intel_dp
);
2938 * When turning on, we need to retry for 1ms to give the sink
2941 for (i
= 0; i
< 3; i
++) {
2942 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2949 if (ret
== 1 && lspcon
->active
)
2950 lspcon_wait_pcon_mode(lspcon
);
2954 DRM_DEBUG_KMS("failed to %s sink power state\n",
2955 mode
== DRM_MODE_DPMS_ON
? "enable" : "disable");
2958 static bool cpt_dp_port_selected(struct drm_i915_private
*dev_priv
,
2959 enum port port
, enum pipe
*pipe
)
2963 for_each_pipe(dev_priv
, p
) {
2964 u32 val
= I915_READ(TRANS_DP_CTL(p
));
2966 if ((val
& TRANS_DP_PORT_SEL_MASK
) == TRANS_DP_PORT_SEL(port
)) {
2972 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port
));
2974 /* must initialize pipe to something for the asserts */
2980 bool intel_dp_port_enabled(struct drm_i915_private
*dev_priv
,
2981 i915_reg_t dp_reg
, enum port port
,
2987 val
= I915_READ(dp_reg
);
2989 ret
= val
& DP_PORT_EN
;
2991 /* asserts want to know the pipe even if the port is disabled */
2992 if (IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
)
2993 *pipe
= (val
& DP_PIPE_SEL_MASK_IVB
) >> DP_PIPE_SEL_SHIFT_IVB
;
2994 else if (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
)
2995 ret
&= cpt_dp_port_selected(dev_priv
, port
, pipe
);
2996 else if (IS_CHERRYVIEW(dev_priv
))
2997 *pipe
= (val
& DP_PIPE_SEL_MASK_CHV
) >> DP_PIPE_SEL_SHIFT_CHV
;
2999 *pipe
= (val
& DP_PIPE_SEL_MASK
) >> DP_PIPE_SEL_SHIFT
;
3004 static bool intel_dp_get_hw_state(struct intel_encoder
*encoder
,
3007 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3008 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3009 intel_wakeref_t wakeref
;
3012 wakeref
= intel_display_power_get_if_enabled(dev_priv
,
3013 encoder
->power_domain
);
3017 ret
= intel_dp_port_enabled(dev_priv
, intel_dp
->output_reg
,
3018 encoder
->port
, pipe
);
3020 intel_display_power_put(dev_priv
, encoder
->power_domain
, wakeref
);
3025 static void intel_dp_get_config(struct intel_encoder
*encoder
,
3026 struct intel_crtc_state
*pipe_config
)
3028 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3029 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3031 enum port port
= encoder
->port
;
3032 struct intel_crtc
*crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
3034 if (encoder
->type
== INTEL_OUTPUT_EDP
)
3035 pipe_config
->output_types
|= BIT(INTEL_OUTPUT_EDP
);
3037 pipe_config
->output_types
|= BIT(INTEL_OUTPUT_DP
);
3039 tmp
= I915_READ(intel_dp
->output_reg
);
3041 pipe_config
->has_audio
= tmp
& DP_AUDIO_OUTPUT_ENABLE
&& port
!= PORT_A
;
3043 if (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
) {
3044 u32 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
3046 if (trans_dp
& TRANS_DP_HSYNC_ACTIVE_HIGH
)
3047 flags
|= DRM_MODE_FLAG_PHSYNC
;
3049 flags
|= DRM_MODE_FLAG_NHSYNC
;
3051 if (trans_dp
& TRANS_DP_VSYNC_ACTIVE_HIGH
)
3052 flags
|= DRM_MODE_FLAG_PVSYNC
;
3054 flags
|= DRM_MODE_FLAG_NVSYNC
;
3056 if (tmp
& DP_SYNC_HS_HIGH
)
3057 flags
|= DRM_MODE_FLAG_PHSYNC
;
3059 flags
|= DRM_MODE_FLAG_NHSYNC
;
3061 if (tmp
& DP_SYNC_VS_HIGH
)
3062 flags
|= DRM_MODE_FLAG_PVSYNC
;
3064 flags
|= DRM_MODE_FLAG_NVSYNC
;
3067 pipe_config
->base
.adjusted_mode
.flags
|= flags
;
3069 if (IS_G4X(dev_priv
) && tmp
& DP_COLOR_RANGE_16_235
)
3070 pipe_config
->limited_color_range
= true;
3072 pipe_config
->lane_count
=
3073 ((tmp
& DP_PORT_WIDTH_MASK
) >> DP_PORT_WIDTH_SHIFT
) + 1;
3075 intel_dp_get_m_n(crtc
, pipe_config
);
3077 if (port
== PORT_A
) {
3078 if ((I915_READ(DP_A
) & DP_PLL_FREQ_MASK
) == DP_PLL_FREQ_162MHZ
)
3079 pipe_config
->port_clock
= 162000;
3081 pipe_config
->port_clock
= 270000;
3084 pipe_config
->base
.adjusted_mode
.crtc_clock
=
3085 intel_dotclock_calculate(pipe_config
->port_clock
,
3086 &pipe_config
->dp_m_n
);
3088 if (intel_dp_is_edp(intel_dp
) && dev_priv
->vbt
.edp
.bpp
&&
3089 pipe_config
->pipe_bpp
> dev_priv
->vbt
.edp
.bpp
) {
3091 * This is a big fat ugly hack.
3093 * Some machines in UEFI boot mode provide us a VBT that has 18
3094 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3095 * unknown we fail to light up. Yet the same BIOS boots up with
3096 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3097 * max, not what it tells us to use.
3099 * Note: This will still be broken if the eDP panel is not lit
3100 * up by the BIOS, and thus we can't get the mode at module
3103 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3104 pipe_config
->pipe_bpp
, dev_priv
->vbt
.edp
.bpp
);
3105 dev_priv
->vbt
.edp
.bpp
= pipe_config
->pipe_bpp
;
3109 static void intel_disable_dp(struct intel_encoder
*encoder
,
3110 const struct intel_crtc_state
*old_crtc_state
,
3111 const struct drm_connector_state
*old_conn_state
)
3113 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3115 intel_dp
->link_trained
= false;
3117 if (old_crtc_state
->has_audio
)
3118 intel_audio_codec_disable(encoder
,
3119 old_crtc_state
, old_conn_state
);
3121 /* Make sure the panel is off before trying to change the mode. But also
3122 * ensure that we have vdd while we switch off the panel. */
3123 intel_edp_panel_vdd_on(intel_dp
);
3124 intel_edp_backlight_off(old_conn_state
);
3125 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_OFF
);
3126 intel_edp_panel_off(intel_dp
);
3129 static void g4x_disable_dp(struct intel_encoder
*encoder
,
3130 const struct intel_crtc_state
*old_crtc_state
,
3131 const struct drm_connector_state
*old_conn_state
)
3133 intel_disable_dp(encoder
, old_crtc_state
, old_conn_state
);
3136 static void vlv_disable_dp(struct intel_encoder
*encoder
,
3137 const struct intel_crtc_state
*old_crtc_state
,
3138 const struct drm_connector_state
*old_conn_state
)
3140 intel_disable_dp(encoder
, old_crtc_state
, old_conn_state
);
3143 static void g4x_post_disable_dp(struct intel_encoder
*encoder
,
3144 const struct intel_crtc_state
*old_crtc_state
,
3145 const struct drm_connector_state
*old_conn_state
)
3147 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3148 enum port port
= encoder
->port
;
3151 * Bspec does not list a specific disable sequence for g4x DP.
3152 * Follow the ilk+ sequence (disable pipe before the port) for
3153 * g4x DP as it does not suffer from underruns like the normal
3154 * g4x modeset sequence (disable pipe after the port).
3156 intel_dp_link_down(encoder
, old_crtc_state
);
3158 /* Only ilk+ has port A */
3160 ironlake_edp_pll_off(intel_dp
, old_crtc_state
);
3163 static void vlv_post_disable_dp(struct intel_encoder
*encoder
,
3164 const struct intel_crtc_state
*old_crtc_state
,
3165 const struct drm_connector_state
*old_conn_state
)
3167 intel_dp_link_down(encoder
, old_crtc_state
);
3170 static void chv_post_disable_dp(struct intel_encoder
*encoder
,
3171 const struct intel_crtc_state
*old_crtc_state
,
3172 const struct drm_connector_state
*old_conn_state
)
3174 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3176 intel_dp_link_down(encoder
, old_crtc_state
);
3178 mutex_lock(&dev_priv
->sb_lock
);
3180 /* Assert data lane reset */
3181 chv_data_lane_soft_reset(encoder
, old_crtc_state
, true);
3183 mutex_unlock(&dev_priv
->sb_lock
);
3187 _intel_dp_set_link_train(struct intel_dp
*intel_dp
,
3191 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3192 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3193 enum port port
= intel_dig_port
->base
.port
;
3194 u8 train_pat_mask
= drm_dp_training_pattern_mask(intel_dp
->dpcd
);
3196 if (dp_train_pat
& train_pat_mask
)
3197 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3198 dp_train_pat
& train_pat_mask
);
3200 if (HAS_DDI(dev_priv
)) {
3201 u32 temp
= I915_READ(DP_TP_CTL(port
));
3203 if (dp_train_pat
& DP_LINK_SCRAMBLING_DISABLE
)
3204 temp
|= DP_TP_CTL_SCRAMBLE_DISABLE
;
3206 temp
&= ~DP_TP_CTL_SCRAMBLE_DISABLE
;
3208 temp
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
3209 switch (dp_train_pat
& train_pat_mask
) {
3210 case DP_TRAINING_PATTERN_DISABLE
:
3211 temp
|= DP_TP_CTL_LINK_TRAIN_NORMAL
;
3214 case DP_TRAINING_PATTERN_1
:
3215 temp
|= DP_TP_CTL_LINK_TRAIN_PAT1
;
3217 case DP_TRAINING_PATTERN_2
:
3218 temp
|= DP_TP_CTL_LINK_TRAIN_PAT2
;
3220 case DP_TRAINING_PATTERN_3
:
3221 temp
|= DP_TP_CTL_LINK_TRAIN_PAT3
;
3223 case DP_TRAINING_PATTERN_4
:
3224 temp
|= DP_TP_CTL_LINK_TRAIN_PAT4
;
3227 I915_WRITE(DP_TP_CTL(port
), temp
);
3229 } else if ((IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
) ||
3230 (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
)) {
3231 *DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3233 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
3234 case DP_TRAINING_PATTERN_DISABLE
:
3235 *DP
|= DP_LINK_TRAIN_OFF_CPT
;
3237 case DP_TRAINING_PATTERN_1
:
3238 *DP
|= DP_LINK_TRAIN_PAT_1_CPT
;
3240 case DP_TRAINING_PATTERN_2
:
3241 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
3243 case DP_TRAINING_PATTERN_3
:
3244 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3245 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
3250 *DP
&= ~DP_LINK_TRAIN_MASK
;
3252 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
3253 case DP_TRAINING_PATTERN_DISABLE
:
3254 *DP
|= DP_LINK_TRAIN_OFF
;
3256 case DP_TRAINING_PATTERN_1
:
3257 *DP
|= DP_LINK_TRAIN_PAT_1
;
3259 case DP_TRAINING_PATTERN_2
:
3260 *DP
|= DP_LINK_TRAIN_PAT_2
;
3262 case DP_TRAINING_PATTERN_3
:
3263 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3264 *DP
|= DP_LINK_TRAIN_PAT_2
;
3270 static void intel_dp_enable_port(struct intel_dp
*intel_dp
,
3271 const struct intel_crtc_state
*old_crtc_state
)
3273 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3275 /* enable with pattern 1 (as per spec) */
3277 intel_dp_program_link_training_pattern(intel_dp
, DP_TRAINING_PATTERN_1
);
3280 * Magic for VLV/CHV. We _must_ first set up the register
3281 * without actually enabling the port, and then do another
3282 * write to enable the port. Otherwise link training will
3283 * fail when the power sequencer is freshly used for this port.
3285 intel_dp
->DP
|= DP_PORT_EN
;
3286 if (old_crtc_state
->has_audio
)
3287 intel_dp
->DP
|= DP_AUDIO_OUTPUT_ENABLE
;
3289 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3290 POSTING_READ(intel_dp
->output_reg
);
3293 static void intel_enable_dp(struct intel_encoder
*encoder
,
3294 const struct intel_crtc_state
*pipe_config
,
3295 const struct drm_connector_state
*conn_state
)
3297 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3298 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3299 struct intel_crtc
*crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
3300 u32 dp_reg
= I915_READ(intel_dp
->output_reg
);
3301 enum pipe pipe
= crtc
->pipe
;
3302 intel_wakeref_t wakeref
;
3304 if (WARN_ON(dp_reg
& DP_PORT_EN
))
3307 with_pps_lock(intel_dp
, wakeref
) {
3308 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
3309 vlv_init_panel_power_sequencer(encoder
, pipe_config
);
3311 intel_dp_enable_port(intel_dp
, pipe_config
);
3313 edp_panel_vdd_on(intel_dp
);
3314 edp_panel_on(intel_dp
);
3315 edp_panel_vdd_off(intel_dp
, true);
3318 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
3319 unsigned int lane_mask
= 0x0;
3321 if (IS_CHERRYVIEW(dev_priv
))
3322 lane_mask
= intel_dp_unused_lane_mask(pipe_config
->lane_count
);
3324 vlv_wait_port_ready(dev_priv
, dp_to_dig_port(intel_dp
),
3328 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_ON
);
3329 intel_dp_start_link_train(intel_dp
);
3330 intel_dp_stop_link_train(intel_dp
);
3332 if (pipe_config
->has_audio
) {
3333 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3335 intel_audio_codec_enable(encoder
, pipe_config
, conn_state
);
3339 static void g4x_enable_dp(struct intel_encoder
*encoder
,
3340 const struct intel_crtc_state
*pipe_config
,
3341 const struct drm_connector_state
*conn_state
)
3343 intel_enable_dp(encoder
, pipe_config
, conn_state
);
3344 intel_edp_backlight_on(pipe_config
, conn_state
);
3347 static void vlv_enable_dp(struct intel_encoder
*encoder
,
3348 const struct intel_crtc_state
*pipe_config
,
3349 const struct drm_connector_state
*conn_state
)
3351 intel_edp_backlight_on(pipe_config
, conn_state
);
3354 static void g4x_pre_enable_dp(struct intel_encoder
*encoder
,
3355 const struct intel_crtc_state
*pipe_config
,
3356 const struct drm_connector_state
*conn_state
)
3358 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3359 enum port port
= encoder
->port
;
3361 intel_dp_prepare(encoder
, pipe_config
);
3363 /* Only ilk+ has port A */
3365 ironlake_edp_pll_on(intel_dp
, pipe_config
);
3368 static void vlv_detach_power_sequencer(struct intel_dp
*intel_dp
)
3370 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3371 struct drm_i915_private
*dev_priv
= to_i915(intel_dig_port
->base
.base
.dev
);
3372 enum pipe pipe
= intel_dp
->pps_pipe
;
3373 i915_reg_t pp_on_reg
= PP_ON_DELAYS(pipe
);
3375 WARN_ON(intel_dp
->active_pipe
!= INVALID_PIPE
);
3377 if (WARN_ON(pipe
!= PIPE_A
&& pipe
!= PIPE_B
))
3380 edp_panel_vdd_off_sync(intel_dp
);
3383 * VLV seems to get confused when multiple power sequencers
3384 * have the same port selected (even if only one has power/vdd
3385 * enabled). The failure manifests as vlv_wait_port_ready() failing
3386 * CHV on the other hand doesn't seem to mind having the same port
3387 * selected in multiple power sequencers, but let's clear the
3388 * port select always when logically disconnecting a power sequencer
3391 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3392 pipe_name(pipe
), port_name(intel_dig_port
->base
.port
));
3393 I915_WRITE(pp_on_reg
, 0);
3394 POSTING_READ(pp_on_reg
);
3396 intel_dp
->pps_pipe
= INVALID_PIPE
;
3399 static void vlv_steal_power_sequencer(struct drm_i915_private
*dev_priv
,
3402 struct intel_encoder
*encoder
;
3404 lockdep_assert_held(&dev_priv
->pps_mutex
);
3406 for_each_intel_dp(&dev_priv
->drm
, encoder
) {
3407 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3408 enum port port
= encoder
->port
;
3410 WARN(intel_dp
->active_pipe
== pipe
,
3411 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3412 pipe_name(pipe
), port_name(port
));
3414 if (intel_dp
->pps_pipe
!= pipe
)
3417 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3418 pipe_name(pipe
), port_name(port
));
3420 /* make sure vdd is off before we steal it */
3421 vlv_detach_power_sequencer(intel_dp
);
3425 static void vlv_init_panel_power_sequencer(struct intel_encoder
*encoder
,
3426 const struct intel_crtc_state
*crtc_state
)
3428 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3429 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3430 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
3432 lockdep_assert_held(&dev_priv
->pps_mutex
);
3434 WARN_ON(intel_dp
->active_pipe
!= INVALID_PIPE
);
3436 if (intel_dp
->pps_pipe
!= INVALID_PIPE
&&
3437 intel_dp
->pps_pipe
!= crtc
->pipe
) {
3439 * If another power sequencer was being used on this
3440 * port previously make sure to turn off vdd there while
3441 * we still have control of it.
3443 vlv_detach_power_sequencer(intel_dp
);
3447 * We may be stealing the power
3448 * sequencer from another port.
3450 vlv_steal_power_sequencer(dev_priv
, crtc
->pipe
);
3452 intel_dp
->active_pipe
= crtc
->pipe
;
3454 if (!intel_dp_is_edp(intel_dp
))
3457 /* now it's all ours */
3458 intel_dp
->pps_pipe
= crtc
->pipe
;
3460 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3461 pipe_name(intel_dp
->pps_pipe
), port_name(encoder
->port
));
3463 /* init power sequencer on this pipe and port */
3464 intel_dp_init_panel_power_sequencer(intel_dp
);
3465 intel_dp_init_panel_power_sequencer_registers(intel_dp
, true);
3468 static void vlv_pre_enable_dp(struct intel_encoder
*encoder
,
3469 const struct intel_crtc_state
*pipe_config
,
3470 const struct drm_connector_state
*conn_state
)
3472 vlv_phy_pre_encoder_enable(encoder
, pipe_config
);
3474 intel_enable_dp(encoder
, pipe_config
, conn_state
);
3477 static void vlv_dp_pre_pll_enable(struct intel_encoder
*encoder
,
3478 const struct intel_crtc_state
*pipe_config
,
3479 const struct drm_connector_state
*conn_state
)
3481 intel_dp_prepare(encoder
, pipe_config
);
3483 vlv_phy_pre_pll_enable(encoder
, pipe_config
);
3486 static void chv_pre_enable_dp(struct intel_encoder
*encoder
,
3487 const struct intel_crtc_state
*pipe_config
,
3488 const struct drm_connector_state
*conn_state
)
3490 chv_phy_pre_encoder_enable(encoder
, pipe_config
);
3492 intel_enable_dp(encoder
, pipe_config
, conn_state
);
3494 /* Second common lane will stay alive on its own now */
3495 chv_phy_release_cl2_override(encoder
);
3498 static void chv_dp_pre_pll_enable(struct intel_encoder
*encoder
,
3499 const struct intel_crtc_state
*pipe_config
,
3500 const struct drm_connector_state
*conn_state
)
3502 intel_dp_prepare(encoder
, pipe_config
);
3504 chv_phy_pre_pll_enable(encoder
, pipe_config
);
3507 static void chv_dp_post_pll_disable(struct intel_encoder
*encoder
,
3508 const struct intel_crtc_state
*old_crtc_state
,
3509 const struct drm_connector_state
*old_conn_state
)
3511 chv_phy_post_pll_disable(encoder
, old_crtc_state
);
3515 * Fetch AUX CH registers 0x202 - 0x207 which contain
3516 * link status information
3519 intel_dp_get_link_status(struct intel_dp
*intel_dp
, u8 link_status
[DP_LINK_STATUS_SIZE
])
3521 return drm_dp_dpcd_read(&intel_dp
->aux
, DP_LANE0_1_STATUS
, link_status
,
3522 DP_LINK_STATUS_SIZE
) == DP_LINK_STATUS_SIZE
;
3525 /* These are source-specific values. */
3527 intel_dp_voltage_max(struct intel_dp
*intel_dp
)
3529 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3530 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
3531 enum port port
= encoder
->port
;
3533 if (HAS_DDI(dev_priv
))
3534 return intel_ddi_dp_voltage_max(encoder
);
3535 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
3536 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3537 else if (IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
)
3538 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3539 else if (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
)
3540 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3542 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3546 intel_dp_pre_emphasis_max(struct intel_dp
*intel_dp
, u8 voltage_swing
)
3548 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3549 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
3550 enum port port
= encoder
->port
;
3552 if (HAS_DDI(dev_priv
)) {
3553 return intel_ddi_dp_pre_emphasis_max(encoder
, voltage_swing
);
3554 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
3555 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3556 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3557 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3558 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3559 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3560 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3561 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3562 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3564 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3566 } else if (IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
) {
3567 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3569 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3570 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3571 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3572 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3574 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3577 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3578 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3579 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3580 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3581 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3582 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3583 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3584 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3586 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3591 static u32
vlv_signal_levels(struct intel_dp
*intel_dp
)
3593 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
3594 unsigned long demph_reg_value
, preemph_reg_value
,
3595 uniqtranscale_reg_value
;
3596 u8 train_set
= intel_dp
->train_set
[0];
3598 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3599 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3600 preemph_reg_value
= 0x0004000;
3601 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3602 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3603 demph_reg_value
= 0x2B405555;
3604 uniqtranscale_reg_value
= 0x552AB83A;
3606 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3607 demph_reg_value
= 0x2B404040;
3608 uniqtranscale_reg_value
= 0x5548B83A;
3610 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3611 demph_reg_value
= 0x2B245555;
3612 uniqtranscale_reg_value
= 0x5560B83A;
3614 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3615 demph_reg_value
= 0x2B405555;
3616 uniqtranscale_reg_value
= 0x5598DA3A;
3622 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3623 preemph_reg_value
= 0x0002000;
3624 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3625 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3626 demph_reg_value
= 0x2B404040;
3627 uniqtranscale_reg_value
= 0x5552B83A;
3629 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3630 demph_reg_value
= 0x2B404848;
3631 uniqtranscale_reg_value
= 0x5580B83A;
3633 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3634 demph_reg_value
= 0x2B404040;
3635 uniqtranscale_reg_value
= 0x55ADDA3A;
3641 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3642 preemph_reg_value
= 0x0000000;
3643 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3645 demph_reg_value
= 0x2B305555;
3646 uniqtranscale_reg_value
= 0x5570B83A;
3648 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3649 demph_reg_value
= 0x2B2B4040;
3650 uniqtranscale_reg_value
= 0x55ADDA3A;
3656 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3657 preemph_reg_value
= 0x0006000;
3658 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3659 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3660 demph_reg_value
= 0x1B405555;
3661 uniqtranscale_reg_value
= 0x55ADDA3A;
3671 vlv_set_phy_signal_level(encoder
, demph_reg_value
, preemph_reg_value
,
3672 uniqtranscale_reg_value
, 0);
3677 static u32
chv_signal_levels(struct intel_dp
*intel_dp
)
3679 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
3680 u32 deemph_reg_value
, margin_reg_value
;
3681 bool uniq_trans_scale
= false;
3682 u8 train_set
= intel_dp
->train_set
[0];
3684 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3685 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3686 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3687 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3688 deemph_reg_value
= 128;
3689 margin_reg_value
= 52;
3691 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3692 deemph_reg_value
= 128;
3693 margin_reg_value
= 77;
3695 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3696 deemph_reg_value
= 128;
3697 margin_reg_value
= 102;
3699 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3700 deemph_reg_value
= 128;
3701 margin_reg_value
= 154;
3702 uniq_trans_scale
= true;
3708 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3709 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3710 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3711 deemph_reg_value
= 85;
3712 margin_reg_value
= 78;
3714 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3715 deemph_reg_value
= 85;
3716 margin_reg_value
= 116;
3718 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3719 deemph_reg_value
= 85;
3720 margin_reg_value
= 154;
3726 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3727 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3728 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3729 deemph_reg_value
= 64;
3730 margin_reg_value
= 104;
3732 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3733 deemph_reg_value
= 64;
3734 margin_reg_value
= 154;
3740 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3741 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3742 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3743 deemph_reg_value
= 43;
3744 margin_reg_value
= 154;
3754 chv_set_phy_signal_level(encoder
, deemph_reg_value
,
3755 margin_reg_value
, uniq_trans_scale
);
3761 g4x_signal_levels(u8 train_set
)
3763 u32 signal_levels
= 0;
3765 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3766 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3768 signal_levels
|= DP_VOLTAGE_0_4
;
3770 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3771 signal_levels
|= DP_VOLTAGE_0_6
;
3773 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3774 signal_levels
|= DP_VOLTAGE_0_8
;
3776 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3777 signal_levels
|= DP_VOLTAGE_1_2
;
3780 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3781 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3783 signal_levels
|= DP_PRE_EMPHASIS_0
;
3785 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3786 signal_levels
|= DP_PRE_EMPHASIS_3_5
;
3788 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3789 signal_levels
|= DP_PRE_EMPHASIS_6
;
3791 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3792 signal_levels
|= DP_PRE_EMPHASIS_9_5
;
3795 return signal_levels
;
3798 /* SNB CPU eDP voltage swing and pre-emphasis control */
3800 snb_cpu_edp_signal_levels(u8 train_set
)
3802 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3803 DP_TRAIN_PRE_EMPHASIS_MASK
);
3804 switch (signal_levels
) {
3805 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3806 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3807 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3808 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3809 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B
;
3810 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3811 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3812 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B
;
3813 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3814 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3815 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B
;
3816 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3817 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3818 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B
;
3820 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3821 "0x%x\n", signal_levels
);
3822 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3826 /* IVB CPU eDP voltage swing and pre-emphasis control */
3828 ivb_cpu_edp_signal_levels(u8 train_set
)
3830 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3831 DP_TRAIN_PRE_EMPHASIS_MASK
);
3832 switch (signal_levels
) {
3833 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3834 return EDP_LINK_TRAIN_400MV_0DB_IVB
;
3835 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3836 return EDP_LINK_TRAIN_400MV_3_5DB_IVB
;
3837 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3838 return EDP_LINK_TRAIN_400MV_6DB_IVB
;
3840 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3841 return EDP_LINK_TRAIN_600MV_0DB_IVB
;
3842 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3843 return EDP_LINK_TRAIN_600MV_3_5DB_IVB
;
3845 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3846 return EDP_LINK_TRAIN_800MV_0DB_IVB
;
3847 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3848 return EDP_LINK_TRAIN_800MV_3_5DB_IVB
;
3851 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3852 "0x%x\n", signal_levels
);
3853 return EDP_LINK_TRAIN_500MV_0DB_IVB
;
3858 intel_dp_set_signal_levels(struct intel_dp
*intel_dp
)
3860 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3861 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3862 enum port port
= intel_dig_port
->base
.port
;
3863 u32 signal_levels
, mask
= 0;
3864 u8 train_set
= intel_dp
->train_set
[0];
3866 if (IS_GEN9_LP(dev_priv
) || INTEL_GEN(dev_priv
) >= 10) {
3867 signal_levels
= bxt_signal_levels(intel_dp
);
3868 } else if (HAS_DDI(dev_priv
)) {
3869 signal_levels
= ddi_signal_levels(intel_dp
);
3870 mask
= DDI_BUF_EMP_MASK
;
3871 } else if (IS_CHERRYVIEW(dev_priv
)) {
3872 signal_levels
= chv_signal_levels(intel_dp
);
3873 } else if (IS_VALLEYVIEW(dev_priv
)) {
3874 signal_levels
= vlv_signal_levels(intel_dp
);
3875 } else if (IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
) {
3876 signal_levels
= ivb_cpu_edp_signal_levels(train_set
);
3877 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_IVB
;
3878 } else if (IS_GEN(dev_priv
, 6) && port
== PORT_A
) {
3879 signal_levels
= snb_cpu_edp_signal_levels(train_set
);
3880 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_SNB
;
3882 signal_levels
= g4x_signal_levels(train_set
);
3883 mask
= DP_VOLTAGE_MASK
| DP_PRE_EMPHASIS_MASK
;
3887 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels
);
3889 DRM_DEBUG_KMS("Using vswing level %d\n",
3890 train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
);
3891 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3892 (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) >>
3893 DP_TRAIN_PRE_EMPHASIS_SHIFT
);
3895 intel_dp
->DP
= (intel_dp
->DP
& ~mask
) | signal_levels
;
3897 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3898 POSTING_READ(intel_dp
->output_reg
);
3902 intel_dp_program_link_training_pattern(struct intel_dp
*intel_dp
,
3905 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3906 struct drm_i915_private
*dev_priv
=
3907 to_i915(intel_dig_port
->base
.base
.dev
);
3909 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
, dp_train_pat
);
3911 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3912 POSTING_READ(intel_dp
->output_reg
);
3915 void intel_dp_set_idle_link_train(struct intel_dp
*intel_dp
)
3917 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3918 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3919 enum port port
= intel_dig_port
->base
.port
;
3922 if (!HAS_DDI(dev_priv
))
3925 val
= I915_READ(DP_TP_CTL(port
));
3926 val
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
3927 val
|= DP_TP_CTL_LINK_TRAIN_IDLE
;
3928 I915_WRITE(DP_TP_CTL(port
), val
);
3931 * On PORT_A we can have only eDP in SST mode. There the only reason
3932 * we need to set idle transmission mode is to work around a HW issue
3933 * where we enable the pipe while not in idle link-training mode.
3934 * In this case there is requirement to wait for a minimum number of
3935 * idle patterns to be sent.
3940 if (intel_wait_for_register(dev_priv
,DP_TP_STATUS(port
),
3941 DP_TP_STATUS_IDLE_DONE
,
3942 DP_TP_STATUS_IDLE_DONE
,
3944 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3948 intel_dp_link_down(struct intel_encoder
*encoder
,
3949 const struct intel_crtc_state
*old_crtc_state
)
3951 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3952 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
3953 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->base
.crtc
);
3954 enum port port
= encoder
->port
;
3955 u32 DP
= intel_dp
->DP
;
3957 if (WARN_ON(HAS_DDI(dev_priv
)))
3960 if (WARN_ON((I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
) == 0))
3963 DRM_DEBUG_KMS("\n");
3965 if ((IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
) ||
3966 (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
)) {
3967 DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3968 DP
|= DP_LINK_TRAIN_PAT_IDLE_CPT
;
3970 DP
&= ~DP_LINK_TRAIN_MASK
;
3971 DP
|= DP_LINK_TRAIN_PAT_IDLE
;
3973 I915_WRITE(intel_dp
->output_reg
, DP
);
3974 POSTING_READ(intel_dp
->output_reg
);
3976 DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
3977 I915_WRITE(intel_dp
->output_reg
, DP
);
3978 POSTING_READ(intel_dp
->output_reg
);
3981 * HW workaround for IBX, we need to move the port
3982 * to transcoder A after disabling it to allow the
3983 * matching HDMI port to be enabled on transcoder A.
3985 if (HAS_PCH_IBX(dev_priv
) && crtc
->pipe
== PIPE_B
&& port
!= PORT_A
) {
3987 * We get CPU/PCH FIFO underruns on the other pipe when
3988 * doing the workaround. Sweep them under the rug.
3990 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
3991 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
3993 /* always enable with pattern 1 (as per spec) */
3994 DP
&= ~(DP_PIPE_SEL_MASK
| DP_LINK_TRAIN_MASK
);
3995 DP
|= DP_PORT_EN
| DP_PIPE_SEL(PIPE_A
) |
3996 DP_LINK_TRAIN_PAT_1
;
3997 I915_WRITE(intel_dp
->output_reg
, DP
);
3998 POSTING_READ(intel_dp
->output_reg
);
4001 I915_WRITE(intel_dp
->output_reg
, DP
);
4002 POSTING_READ(intel_dp
->output_reg
);
4004 intel_wait_for_vblank_if_active(dev_priv
, PIPE_A
);
4005 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
4006 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
4009 msleep(intel_dp
->panel_power_down_delay
);
4013 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
4014 intel_wakeref_t wakeref
;
4016 with_pps_lock(intel_dp
, wakeref
)
4017 intel_dp
->active_pipe
= INVALID_PIPE
;
4022 intel_dp_extended_receiver_capabilities(struct intel_dp
*intel_dp
)
4027 * Prior to DP1.3 the bit represented by
4028 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4029 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4030 * the true capability of the panel. The only way to check is to
4031 * then compare 0000h and 2200h.
4033 if (!(intel_dp
->dpcd
[DP_TRAINING_AUX_RD_INTERVAL
] &
4034 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT
))
4037 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_DP13_DPCD_REV
,
4038 &dpcd_ext
, sizeof(dpcd_ext
)) != sizeof(dpcd_ext
)) {
4039 DRM_ERROR("DPCD failed read at extended capabilities\n");
4043 if (intel_dp
->dpcd
[DP_DPCD_REV
] > dpcd_ext
[DP_DPCD_REV
]) {
4044 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4048 if (!memcmp(intel_dp
->dpcd
, dpcd_ext
, sizeof(dpcd_ext
)))
4051 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4052 (int)sizeof(intel_dp
->dpcd
), intel_dp
->dpcd
);
4054 memcpy(intel_dp
->dpcd
, dpcd_ext
, sizeof(dpcd_ext
));
4058 intel_dp_read_dpcd(struct intel_dp
*intel_dp
)
4060 if (drm_dp_dpcd_read(&intel_dp
->aux
, 0x000, intel_dp
->dpcd
,
4061 sizeof(intel_dp
->dpcd
)) < 0)
4062 return false; /* aux transfer failed */
4064 intel_dp_extended_receiver_capabilities(intel_dp
);
4066 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp
->dpcd
), intel_dp
->dpcd
);
4068 return intel_dp
->dpcd
[DP_DPCD_REV
] != 0;
4071 static void intel_dp_get_dsc_sink_cap(struct intel_dp
*intel_dp
)
4074 * Clear the cached register set to avoid using stale values
4075 * for the sinks that do not support DSC.
4077 memset(intel_dp
->dsc_dpcd
, 0, sizeof(intel_dp
->dsc_dpcd
));
4079 /* Clear fec_capable to avoid using stale values */
4080 intel_dp
->fec_capable
= 0;
4082 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4083 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x14 ||
4084 intel_dp
->edp_dpcd
[0] >= DP_EDP_14
) {
4085 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_DSC_SUPPORT
,
4087 sizeof(intel_dp
->dsc_dpcd
)) < 0)
4088 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4091 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4092 (int)sizeof(intel_dp
->dsc_dpcd
),
4093 intel_dp
->dsc_dpcd
);
4095 /* FEC is supported only on DP 1.4 */
4096 if (!intel_dp_is_edp(intel_dp
) &&
4097 drm_dp_dpcd_readb(&intel_dp
->aux
, DP_FEC_CAPABILITY
,
4098 &intel_dp
->fec_capable
) < 0)
4099 DRM_ERROR("Failed to read FEC DPCD register\n");
4101 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp
->fec_capable
);
4106 intel_edp_init_dpcd(struct intel_dp
*intel_dp
)
4108 struct drm_i915_private
*dev_priv
=
4109 to_i915(dp_to_dig_port(intel_dp
)->base
.base
.dev
);
4111 /* this function is meant to be called only once */
4112 WARN_ON(intel_dp
->dpcd
[DP_DPCD_REV
] != 0);
4114 if (!intel_dp_read_dpcd(intel_dp
))
4117 drm_dp_read_desc(&intel_dp
->aux
, &intel_dp
->desc
,
4118 drm_dp_is_branch(intel_dp
->dpcd
));
4120 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11)
4121 dev_priv
->no_aux_handshake
= intel_dp
->dpcd
[DP_MAX_DOWNSPREAD
] &
4122 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
;
4125 * Read the eDP display control registers.
4127 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4128 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4129 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4130 * method). The display control registers should read zero if they're
4131 * not supported anyway.
4133 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_EDP_DPCD_REV
,
4134 intel_dp
->edp_dpcd
, sizeof(intel_dp
->edp_dpcd
)) ==
4135 sizeof(intel_dp
->edp_dpcd
))
4136 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp
->edp_dpcd
),
4137 intel_dp
->edp_dpcd
);
4140 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4141 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4143 intel_psr_init_dpcd(intel_dp
);
4145 /* Read the eDP 1.4+ supported link rates. */
4146 if (intel_dp
->edp_dpcd
[0] >= DP_EDP_14
) {
4147 __le16 sink_rates
[DP_MAX_SUPPORTED_RATES
];
4150 drm_dp_dpcd_read(&intel_dp
->aux
, DP_SUPPORTED_LINK_RATES
,
4151 sink_rates
, sizeof(sink_rates
));
4153 for (i
= 0; i
< ARRAY_SIZE(sink_rates
); i
++) {
4154 int val
= le16_to_cpu(sink_rates
[i
]);
4159 /* Value read multiplied by 200kHz gives the per-lane
4160 * link rate in kHz. The source rates are, however,
4161 * stored in terms of LS_Clk kHz. The full conversion
4162 * back to symbols is
4163 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4165 intel_dp
->sink_rates
[i
] = (val
* 200) / 10;
4167 intel_dp
->num_sink_rates
= i
;
4171 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4172 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4174 if (intel_dp
->num_sink_rates
)
4175 intel_dp
->use_rate_select
= true;
4177 intel_dp_set_sink_rates(intel_dp
);
4179 intel_dp_set_common_rates(intel_dp
);
4181 /* Read the eDP DSC DPCD registers */
4182 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
4183 intel_dp_get_dsc_sink_cap(intel_dp
);
4190 intel_dp_get_dpcd(struct intel_dp
*intel_dp
)
4192 if (!intel_dp_read_dpcd(intel_dp
))
4195 /* Don't clobber cached eDP rates. */
4196 if (!intel_dp_is_edp(intel_dp
)) {
4197 intel_dp_set_sink_rates(intel_dp
);
4198 intel_dp_set_common_rates(intel_dp
);
4202 * Some eDP panels do not set a valid value for sink count, that is why
4203 * it don't care about read it here and in intel_edp_init_dpcd().
4205 if (!intel_dp_is_edp(intel_dp
)) {
4209 r
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_SINK_COUNT
, &count
);
4214 * Sink count can change between short pulse hpd hence
4215 * a member variable in intel_dp will track any changes
4216 * between short pulse interrupts.
4218 intel_dp
->sink_count
= DP_GET_SINK_COUNT(count
);
4221 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4222 * a dongle is present but no display. Unless we require to know
4223 * if a dongle is present or not, we don't need to update
4224 * downstream port information. So, an early return here saves
4225 * time from performing other operations which are not required.
4227 if (!intel_dp
->sink_count
)
4231 if (!drm_dp_is_branch(intel_dp
->dpcd
))
4232 return true; /* native DP sink */
4234 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0x10)
4235 return true; /* no per-port downstream info */
4237 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_DOWNSTREAM_PORT_0
,
4238 intel_dp
->downstream_ports
,
4239 DP_MAX_DOWNSTREAM_PORTS
) < 0)
4240 return false; /* downstream port status fetch failed */
4246 intel_dp_sink_can_mst(struct intel_dp
*intel_dp
)
4250 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x12)
4253 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_MSTM_CAP
, &mstm_cap
) != 1)
4256 return mstm_cap
& DP_MST_CAP
;
4260 intel_dp_can_mst(struct intel_dp
*intel_dp
)
4262 return i915_modparams
.enable_dp_mst
&&
4263 intel_dp
->can_mst
&&
4264 intel_dp_sink_can_mst(intel_dp
);
4268 intel_dp_configure_mst(struct intel_dp
*intel_dp
)
4270 struct intel_encoder
*encoder
=
4271 &dp_to_dig_port(intel_dp
)->base
;
4272 bool sink_can_mst
= intel_dp_sink_can_mst(intel_dp
);
4274 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4275 port_name(encoder
->port
), yesno(intel_dp
->can_mst
),
4276 yesno(sink_can_mst
), yesno(i915_modparams
.enable_dp_mst
));
4278 if (!intel_dp
->can_mst
)
4281 intel_dp
->is_mst
= sink_can_mst
&&
4282 i915_modparams
.enable_dp_mst
;
4284 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
,
4289 intel_dp_get_sink_irq_esi(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4291 return drm_dp_dpcd_read(&intel_dp
->aux
, DP_SINK_COUNT_ESI
,
4292 sink_irq_vector
, DP_DPRX_ESI_LEN
) ==
4296 u16
intel_dp_dsc_get_output_bpp(int link_clock
, u8 lane_count
,
4297 int mode_clock
, int mode_hdisplay
)
4299 u16 bits_per_pixel
, max_bpp_small_joiner_ram
;
4303 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4304 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4305 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4306 * for MST -> TimeSlotsPerMTP has to be calculated
4308 bits_per_pixel
= (link_clock
* lane_count
* 8 *
4309 DP_DSC_FEC_OVERHEAD_FACTOR
) /
4312 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4313 max_bpp_small_joiner_ram
= DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER
/
4317 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4318 * check, output bpp from small joiner RAM check)
4320 bits_per_pixel
= min(bits_per_pixel
, max_bpp_small_joiner_ram
);
4322 /* Error out if the max bpp is less than smallest allowed valid bpp */
4323 if (bits_per_pixel
< valid_dsc_bpp
[0]) {
4324 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel
);
4328 /* Find the nearest match in the array of known BPPs from VESA */
4329 for (i
= 0; i
< ARRAY_SIZE(valid_dsc_bpp
) - 1; i
++) {
4330 if (bits_per_pixel
< valid_dsc_bpp
[i
+ 1])
4333 bits_per_pixel
= valid_dsc_bpp
[i
];
4336 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4337 * fractional part is 0
4339 return bits_per_pixel
<< 4;
4342 u8
intel_dp_dsc_get_slice_count(struct intel_dp
*intel_dp
,
4346 u8 min_slice_count
, i
;
4347 int max_slice_width
;
4349 if (mode_clock
<= DP_DSC_PEAK_PIXEL_RATE
)
4350 min_slice_count
= DIV_ROUND_UP(mode_clock
,
4351 DP_DSC_MAX_ENC_THROUGHPUT_0
);
4353 min_slice_count
= DIV_ROUND_UP(mode_clock
,
4354 DP_DSC_MAX_ENC_THROUGHPUT_1
);
4356 max_slice_width
= drm_dp_dsc_sink_max_slice_width(intel_dp
->dsc_dpcd
);
4357 if (max_slice_width
< DP_DSC_MIN_SLICE_WIDTH_VALUE
) {
4358 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4362 /* Also take into account max slice width */
4363 min_slice_count
= min_t(u8
, min_slice_count
,
4364 DIV_ROUND_UP(mode_hdisplay
,
4367 /* Find the closest match to the valid slice count values */
4368 for (i
= 0; i
< ARRAY_SIZE(valid_dsc_slicecount
); i
++) {
4369 if (valid_dsc_slicecount
[i
] >
4370 drm_dp_dsc_sink_max_slice_count(intel_dp
->dsc_dpcd
,
4373 if (min_slice_count
<= valid_dsc_slicecount
[i
])
4374 return valid_dsc_slicecount
[i
];
4377 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count
);
4381 static u8
intel_dp_autotest_link_training(struct intel_dp
*intel_dp
)
4385 u8 test_lane_count
, test_link_bw
;
4389 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4390 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_LANE_COUNT
,
4394 DRM_DEBUG_KMS("Lane count read failed\n");
4397 test_lane_count
&= DP_MAX_LANE_COUNT_MASK
;
4399 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_LINK_RATE
,
4402 DRM_DEBUG_KMS("Link Rate read failed\n");
4405 test_link_rate
= drm_dp_bw_code_to_link_rate(test_link_bw
);
4407 /* Validate the requested link rate and lane count */
4408 if (!intel_dp_link_params_valid(intel_dp
, test_link_rate
,
4412 intel_dp
->compliance
.test_lane_count
= test_lane_count
;
4413 intel_dp
->compliance
.test_link_rate
= test_link_rate
;
4418 static u8
intel_dp_autotest_video_pattern(struct intel_dp
*intel_dp
)
4422 __be16 h_width
, v_height
;
4425 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4426 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_PATTERN
,
4429 DRM_DEBUG_KMS("Test pattern read failed\n");
4432 if (test_pattern
!= DP_COLOR_RAMP
)
4435 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_H_WIDTH_HI
,
4438 DRM_DEBUG_KMS("H Width read failed\n");
4442 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_V_HEIGHT_HI
,
4445 DRM_DEBUG_KMS("V Height read failed\n");
4449 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_MISC0
,
4452 DRM_DEBUG_KMS("TEST MISC read failed\n");
4455 if ((test_misc
& DP_TEST_COLOR_FORMAT_MASK
) != DP_COLOR_FORMAT_RGB
)
4457 if (test_misc
& DP_TEST_DYNAMIC_RANGE_CEA
)
4459 switch (test_misc
& DP_TEST_BIT_DEPTH_MASK
) {
4460 case DP_TEST_BIT_DEPTH_6
:
4461 intel_dp
->compliance
.test_data
.bpc
= 6;
4463 case DP_TEST_BIT_DEPTH_8
:
4464 intel_dp
->compliance
.test_data
.bpc
= 8;
4470 intel_dp
->compliance
.test_data
.video_pattern
= test_pattern
;
4471 intel_dp
->compliance
.test_data
.hdisplay
= be16_to_cpu(h_width
);
4472 intel_dp
->compliance
.test_data
.vdisplay
= be16_to_cpu(v_height
);
4473 /* Set test active flag here so userspace doesn't interrupt things */
4474 intel_dp
->compliance
.test_active
= 1;
4479 static u8
intel_dp_autotest_edid(struct intel_dp
*intel_dp
)
4481 u8 test_result
= DP_TEST_ACK
;
4482 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4483 struct drm_connector
*connector
= &intel_connector
->base
;
4485 if (intel_connector
->detect_edid
== NULL
||
4486 connector
->edid_corrupt
||
4487 intel_dp
->aux
.i2c_defer_count
> 6) {
4488 /* Check EDID read for NACKs, DEFERs and corruption
4489 * (DP CTS 1.2 Core r1.1)
4490 * 4.2.2.4 : Failed EDID read, I2C_NAK
4491 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4492 * 4.2.2.6 : EDID corruption detected
4493 * Use failsafe mode for all cases
4495 if (intel_dp
->aux
.i2c_nack_count
> 0 ||
4496 intel_dp
->aux
.i2c_defer_count
> 0)
4497 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4498 intel_dp
->aux
.i2c_nack_count
,
4499 intel_dp
->aux
.i2c_defer_count
);
4500 intel_dp
->compliance
.test_data
.edid
= INTEL_DP_RESOLUTION_FAILSAFE
;
4502 struct edid
*block
= intel_connector
->detect_edid
;
4504 /* We have to write the checksum
4505 * of the last block read
4507 block
+= intel_connector
->detect_edid
->extensions
;
4509 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_EDID_CHECKSUM
,
4510 block
->checksum
) <= 0)
4511 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4513 test_result
= DP_TEST_ACK
| DP_TEST_EDID_CHECKSUM_WRITE
;
4514 intel_dp
->compliance
.test_data
.edid
= INTEL_DP_RESOLUTION_PREFERRED
;
4517 /* Set test active flag here so userspace doesn't interrupt things */
4518 intel_dp
->compliance
.test_active
= 1;
4523 static u8
intel_dp_autotest_phy_pattern(struct intel_dp
*intel_dp
)
4525 u8 test_result
= DP_TEST_NAK
;
4529 static void intel_dp_handle_test_request(struct intel_dp
*intel_dp
)
4531 u8 response
= DP_TEST_NAK
;
4535 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_REQUEST
, &request
);
4537 DRM_DEBUG_KMS("Could not read test request from sink\n");
4542 case DP_TEST_LINK_TRAINING
:
4543 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4544 response
= intel_dp_autotest_link_training(intel_dp
);
4546 case DP_TEST_LINK_VIDEO_PATTERN
:
4547 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4548 response
= intel_dp_autotest_video_pattern(intel_dp
);
4550 case DP_TEST_LINK_EDID_READ
:
4551 DRM_DEBUG_KMS("EDID test requested\n");
4552 response
= intel_dp_autotest_edid(intel_dp
);
4554 case DP_TEST_LINK_PHY_TEST_PATTERN
:
4555 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4556 response
= intel_dp_autotest_phy_pattern(intel_dp
);
4559 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request
);
4563 if (response
& DP_TEST_ACK
)
4564 intel_dp
->compliance
.test_type
= request
;
4567 status
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_RESPONSE
, response
);
4569 DRM_DEBUG_KMS("Could not write test response to sink\n");
4573 intel_dp_check_mst_status(struct intel_dp
*intel_dp
)
4577 if (intel_dp
->is_mst
) {
4578 u8 esi
[DP_DPRX_ESI_LEN
] = { 0 };
4583 WARN_ON_ONCE(intel_dp
->active_mst_links
< 0);
4584 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4588 /* check link status - esi[10] = 0x200c */
4589 if (intel_dp
->active_mst_links
> 0 &&
4590 !drm_dp_channel_eq_ok(&esi
[10], intel_dp
->lane_count
)) {
4591 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4592 intel_dp_start_link_train(intel_dp
);
4593 intel_dp_stop_link_train(intel_dp
);
4596 DRM_DEBUG_KMS("got esi %3ph\n", esi
);
4597 ret
= drm_dp_mst_hpd_irq(&intel_dp
->mst_mgr
, esi
, &handled
);
4600 for (retry
= 0; retry
< 3; retry
++) {
4602 wret
= drm_dp_dpcd_write(&intel_dp
->aux
,
4603 DP_SINK_COUNT_ESI
+1,
4610 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4612 DRM_DEBUG_KMS("got esi2 %3ph\n", esi
);
4620 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4621 intel_dp
->is_mst
= false;
4622 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
,
4630 intel_dp_needs_link_retrain(struct intel_dp
*intel_dp
)
4632 u8 link_status
[DP_LINK_STATUS_SIZE
];
4634 if (!intel_dp
->link_trained
)
4638 * While PSR source HW is enabled, it will control main-link sending
4639 * frames, enabling and disabling it so trying to do a retrain will fail
4640 * as the link would or not be on or it could mix training patterns
4641 * and frame data at the same time causing retrain to fail.
4642 * Also when exiting PSR, HW will retrain the link anyways fixing
4643 * any link status error.
4645 if (intel_psr_enabled(intel_dp
))
4648 if (!intel_dp_get_link_status(intel_dp
, link_status
))
4652 * Validate the cached values of intel_dp->link_rate and
4653 * intel_dp->lane_count before attempting to retrain.
4655 if (!intel_dp_link_params_valid(intel_dp
, intel_dp
->link_rate
,
4656 intel_dp
->lane_count
))
4659 /* Retrain if Channel EQ or CR not ok */
4660 return !drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
);
4663 int intel_dp_retrain_link(struct intel_encoder
*encoder
,
4664 struct drm_modeset_acquire_ctx
*ctx
)
4666 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
4667 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
4668 struct intel_connector
*connector
= intel_dp
->attached_connector
;
4669 struct drm_connector_state
*conn_state
;
4670 struct intel_crtc_state
*crtc_state
;
4671 struct intel_crtc
*crtc
;
4674 /* FIXME handle the MST connectors as well */
4676 if (!connector
|| connector
->base
.status
!= connector_status_connected
)
4679 ret
= drm_modeset_lock(&dev_priv
->drm
.mode_config
.connection_mutex
,
4684 conn_state
= connector
->base
.state
;
4686 crtc
= to_intel_crtc(conn_state
->crtc
);
4690 ret
= drm_modeset_lock(&crtc
->base
.mutex
, ctx
);
4694 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
4696 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state
));
4698 if (!crtc_state
->base
.active
)
4701 if (conn_state
->commit
&&
4702 !try_wait_for_completion(&conn_state
->commit
->hw_done
))
4705 if (!intel_dp_needs_link_retrain(intel_dp
))
4708 /* Suppress underruns caused by re-training */
4709 intel_set_cpu_fifo_underrun_reporting(dev_priv
, crtc
->pipe
, false);
4710 if (crtc_state
->has_pch_encoder
)
4711 intel_set_pch_fifo_underrun_reporting(dev_priv
,
4712 intel_crtc_pch_transcoder(crtc
), false);
4714 intel_dp_start_link_train(intel_dp
);
4715 intel_dp_stop_link_train(intel_dp
);
4717 /* Keep underrun reporting disabled until things are stable */
4718 intel_wait_for_vblank(dev_priv
, crtc
->pipe
);
4720 intel_set_cpu_fifo_underrun_reporting(dev_priv
, crtc
->pipe
, true);
4721 if (crtc_state
->has_pch_encoder
)
4722 intel_set_pch_fifo_underrun_reporting(dev_priv
,
4723 intel_crtc_pch_transcoder(crtc
), true);
4729 * If display is now connected check links status,
4730 * there has been known issues of link loss triggering
4733 * Some sinks (eg. ASUS PB287Q) seem to perform some
4734 * weird HPD ping pong during modesets. So we can apparently
4735 * end up with HPD going low during a modeset, and then
4736 * going back up soon after. And once that happens we must
4737 * retrain the link to get a picture. That's in case no
4738 * userspace component reacted to intermittent HPD dip.
4740 static bool intel_dp_hotplug(struct intel_encoder
*encoder
,
4741 struct intel_connector
*connector
)
4743 struct drm_modeset_acquire_ctx ctx
;
4747 changed
= intel_encoder_hotplug(encoder
, connector
);
4749 drm_modeset_acquire_init(&ctx
, 0);
4752 ret
= intel_dp_retrain_link(encoder
, &ctx
);
4754 if (ret
== -EDEADLK
) {
4755 drm_modeset_backoff(&ctx
);
4762 drm_modeset_drop_locks(&ctx
);
4763 drm_modeset_acquire_fini(&ctx
);
4764 WARN(ret
, "Acquiring modeset locks failed with %i\n", ret
);
4769 static void intel_dp_check_service_irq(struct intel_dp
*intel_dp
)
4773 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
4776 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
4777 DP_DEVICE_SERVICE_IRQ_VECTOR
, &val
) != 1 || !val
)
4780 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_DEVICE_SERVICE_IRQ_VECTOR
, val
);
4782 if (val
& DP_AUTOMATED_TEST_REQUEST
)
4783 intel_dp_handle_test_request(intel_dp
);
4785 if (val
& DP_CP_IRQ
)
4786 intel_hdcp_handle_cp_irq(intel_dp
->attached_connector
);
4788 if (val
& DP_SINK_SPECIFIC_IRQ
)
4789 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4793 * According to DP spec
4796 * 2. Configure link according to Receiver Capabilities
4797 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4798 * 4. Check link status on receipt of hot-plug interrupt
4800 * intel_dp_short_pulse - handles short pulse interrupts
4801 * when full detection is not required.
4802 * Returns %true if short pulse is handled and full detection
4803 * is NOT required and %false otherwise.
4806 intel_dp_short_pulse(struct intel_dp
*intel_dp
)
4808 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
4809 u8 old_sink_count
= intel_dp
->sink_count
;
4813 * Clearing compliance test variables to allow capturing
4814 * of values for next automated test request.
4816 memset(&intel_dp
->compliance
, 0, sizeof(intel_dp
->compliance
));
4819 * Now read the DPCD to see if it's actually running
4820 * If the current value of sink count doesn't match with
4821 * the value that was stored earlier or dpcd read failed
4822 * we need to do full detection
4824 ret
= intel_dp_get_dpcd(intel_dp
);
4826 if ((old_sink_count
!= intel_dp
->sink_count
) || !ret
) {
4827 /* No need to proceed if we are going to do full detect */
4831 intel_dp_check_service_irq(intel_dp
);
4833 /* Handle CEC interrupts, if any */
4834 drm_dp_cec_irq(&intel_dp
->aux
);
4836 /* defer to the hotplug work for link retraining if needed */
4837 if (intel_dp_needs_link_retrain(intel_dp
))
4840 intel_psr_short_pulse(intel_dp
);
4842 if (intel_dp
->compliance
.test_type
== DP_TEST_LINK_TRAINING
) {
4843 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4844 /* Send a Hotplug Uevent to userspace to start modeset */
4845 drm_kms_helper_hotplug_event(&dev_priv
->drm
);
4851 /* XXX this is probably wrong for multiple downstream ports */
4852 static enum drm_connector_status
4853 intel_dp_detect_dpcd(struct intel_dp
*intel_dp
)
4855 struct intel_lspcon
*lspcon
= dp_to_lspcon(intel_dp
);
4856 u8
*dpcd
= intel_dp
->dpcd
;
4860 lspcon_resume(lspcon
);
4862 if (!intel_dp_get_dpcd(intel_dp
))
4863 return connector_status_disconnected
;
4865 if (intel_dp_is_edp(intel_dp
))
4866 return connector_status_connected
;
4868 /* if there's no downstream port, we're done */
4869 if (!drm_dp_is_branch(dpcd
))
4870 return connector_status_connected
;
4872 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4873 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4874 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
) {
4876 return intel_dp
->sink_count
?
4877 connector_status_connected
: connector_status_disconnected
;
4880 if (intel_dp_can_mst(intel_dp
))
4881 return connector_status_connected
;
4883 /* If no HPD, poke DDC gently */
4884 if (drm_probe_ddc(&intel_dp
->aux
.ddc
))
4885 return connector_status_connected
;
4887 /* Well we tried, say unknown for unreliable port types */
4888 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11) {
4889 type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
4890 if (type
== DP_DS_PORT_TYPE_VGA
||
4891 type
== DP_DS_PORT_TYPE_NON_EDID
)
4892 return connector_status_unknown
;
4894 type
= intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
4895 DP_DWN_STRM_PORT_TYPE_MASK
;
4896 if (type
== DP_DWN_STRM_PORT_TYPE_ANALOG
||
4897 type
== DP_DWN_STRM_PORT_TYPE_OTHER
)
4898 return connector_status_unknown
;
4901 /* Anything else is out of spec, warn and ignore */
4902 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4903 return connector_status_disconnected
;
4906 static enum drm_connector_status
4907 edp_detect(struct intel_dp
*intel_dp
)
4909 return connector_status_connected
;
4912 static bool ibx_digital_port_connected(struct intel_encoder
*encoder
)
4914 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
4917 switch (encoder
->hpd_pin
) {
4919 bit
= SDE_PORTB_HOTPLUG
;
4922 bit
= SDE_PORTC_HOTPLUG
;
4925 bit
= SDE_PORTD_HOTPLUG
;
4928 MISSING_CASE(encoder
->hpd_pin
);
4932 return I915_READ(SDEISR
) & bit
;
4935 static bool cpt_digital_port_connected(struct intel_encoder
*encoder
)
4937 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
4940 switch (encoder
->hpd_pin
) {
4942 bit
= SDE_PORTB_HOTPLUG_CPT
;
4945 bit
= SDE_PORTC_HOTPLUG_CPT
;
4948 bit
= SDE_PORTD_HOTPLUG_CPT
;
4951 MISSING_CASE(encoder
->hpd_pin
);
4955 return I915_READ(SDEISR
) & bit
;
4958 static bool spt_digital_port_connected(struct intel_encoder
*encoder
)
4960 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
4963 switch (encoder
->hpd_pin
) {
4965 bit
= SDE_PORTA_HOTPLUG_SPT
;
4968 bit
= SDE_PORTE_HOTPLUG_SPT
;
4971 return cpt_digital_port_connected(encoder
);
4974 return I915_READ(SDEISR
) & bit
;
4977 static bool g4x_digital_port_connected(struct intel_encoder
*encoder
)
4979 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
4982 switch (encoder
->hpd_pin
) {
4984 bit
= PORTB_HOTPLUG_LIVE_STATUS_G4X
;
4987 bit
= PORTC_HOTPLUG_LIVE_STATUS_G4X
;
4990 bit
= PORTD_HOTPLUG_LIVE_STATUS_G4X
;
4993 MISSING_CASE(encoder
->hpd_pin
);
4997 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
5000 static bool gm45_digital_port_connected(struct intel_encoder
*encoder
)
5002 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5005 switch (encoder
->hpd_pin
) {
5007 bit
= PORTB_HOTPLUG_LIVE_STATUS_GM45
;
5010 bit
= PORTC_HOTPLUG_LIVE_STATUS_GM45
;
5013 bit
= PORTD_HOTPLUG_LIVE_STATUS_GM45
;
5016 MISSING_CASE(encoder
->hpd_pin
);
5020 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
5023 static bool ilk_digital_port_connected(struct intel_encoder
*encoder
)
5025 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5027 if (encoder
->hpd_pin
== HPD_PORT_A
)
5028 return I915_READ(DEISR
) & DE_DP_A_HOTPLUG
;
5030 return ibx_digital_port_connected(encoder
);
5033 static bool snb_digital_port_connected(struct intel_encoder
*encoder
)
5035 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5037 if (encoder
->hpd_pin
== HPD_PORT_A
)
5038 return I915_READ(DEISR
) & DE_DP_A_HOTPLUG
;
5040 return cpt_digital_port_connected(encoder
);
5043 static bool ivb_digital_port_connected(struct intel_encoder
*encoder
)
5045 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5047 if (encoder
->hpd_pin
== HPD_PORT_A
)
5048 return I915_READ(DEISR
) & DE_DP_A_HOTPLUG_IVB
;
5050 return cpt_digital_port_connected(encoder
);
5053 static bool bdw_digital_port_connected(struct intel_encoder
*encoder
)
5055 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5057 if (encoder
->hpd_pin
== HPD_PORT_A
)
5058 return I915_READ(GEN8_DE_PORT_ISR
) & GEN8_PORT_DP_A_HOTPLUG
;
5060 return cpt_digital_port_connected(encoder
);
5063 static bool bxt_digital_port_connected(struct intel_encoder
*encoder
)
5065 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5068 switch (encoder
->hpd_pin
) {
5070 bit
= BXT_DE_PORT_HP_DDIA
;
5073 bit
= BXT_DE_PORT_HP_DDIB
;
5076 bit
= BXT_DE_PORT_HP_DDIC
;
5079 MISSING_CASE(encoder
->hpd_pin
);
5083 return I915_READ(GEN8_DE_PORT_ISR
) & bit
;
5086 static bool icl_combo_port_connected(struct drm_i915_private
*dev_priv
,
5087 struct intel_digital_port
*intel_dig_port
)
5089 enum port port
= intel_dig_port
->base
.port
;
5091 return I915_READ(SDEISR
) & SDE_DDI_HOTPLUG_ICP(port
);
5094 static const char *tc_type_name(enum tc_port_type type
)
5096 static const char * const names
[] = {
5097 [TC_PORT_UNKNOWN
] = "unknown",
5098 [TC_PORT_LEGACY
] = "legacy",
5099 [TC_PORT_TYPEC
] = "typec",
5100 [TC_PORT_TBT
] = "tbt",
5103 if (WARN_ON(type
>= ARRAY_SIZE(names
)))
5104 type
= TC_PORT_UNKNOWN
;
5109 static void icl_update_tc_port_type(struct drm_i915_private
*dev_priv
,
5110 struct intel_digital_port
*intel_dig_port
,
5111 bool is_legacy
, bool is_typec
, bool is_tbt
)
5113 enum port port
= intel_dig_port
->base
.port
;
5114 enum tc_port_type old_type
= intel_dig_port
->tc_type
;
5116 WARN_ON(is_legacy
+ is_typec
+ is_tbt
!= 1);
5119 intel_dig_port
->tc_type
= TC_PORT_LEGACY
;
5121 intel_dig_port
->tc_type
= TC_PORT_TYPEC
;
5123 intel_dig_port
->tc_type
= TC_PORT_TBT
;
5127 /* Types are not supposed to be changed at runtime. */
5128 WARN_ON(old_type
!= TC_PORT_UNKNOWN
&&
5129 old_type
!= intel_dig_port
->tc_type
);
5131 if (old_type
!= intel_dig_port
->tc_type
)
5132 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port
),
5133 tc_type_name(intel_dig_port
->tc_type
));
5137 * This function implements the first part of the Connect Flow described by our
5138 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5139 * lanes, EDID, etc) is done as needed in the typical places.
5141 * Unlike the other ports, type-C ports are not available to use as soon as we
5142 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5143 * display, USB, etc. As a result, handshaking through FIA is required around
5144 * connect and disconnect to cleanly transfer ownership with the controller and
5145 * set the type-C power state.
5147 * We could opt to only do the connect flow when we actually try to use the AUX
5148 * channels or do a modeset, then immediately run the disconnect flow after
5149 * usage, but there are some implications on this for a dynamic environment:
5150 * things may go away or change behind our backs. So for now our driver is
5151 * always trying to acquire ownership of the controller as soon as it gets an
5152 * interrupt (or polls state and sees a port is connected) and only gives it
5153 * back when it sees a disconnect. Implementation of a more fine-grained model
5154 * will require a lot of coordination with user space and thorough testing for
5155 * the extra possible cases.
5157 static bool icl_tc_phy_connect(struct drm_i915_private
*dev_priv
,
5158 struct intel_digital_port
*dig_port
)
5160 enum tc_port tc_port
= intel_port_to_tc(dev_priv
, dig_port
->base
.port
);
5163 if (dig_port
->tc_type
!= TC_PORT_LEGACY
&&
5164 dig_port
->tc_type
!= TC_PORT_TYPEC
)
5167 val
= I915_READ(PORT_TX_DFLEXDPPMS
);
5168 if (!(val
& DP_PHY_MODE_STATUS_COMPLETED(tc_port
))) {
5169 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port
);
5170 WARN_ON(dig_port
->tc_legacy_port
);
5175 * This function may be called many times in a row without an HPD event
5176 * in between, so try to avoid the write when we can.
5178 val
= I915_READ(PORT_TX_DFLEXDPCSSS
);
5179 if (!(val
& DP_PHY_MODE_STATUS_NOT_SAFE(tc_port
))) {
5180 val
|= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port
);
5181 I915_WRITE(PORT_TX_DFLEXDPCSSS
, val
);
5185 * Now we have to re-check the live state, in case the port recently
5186 * became disconnected. Not necessary for legacy mode.
5188 if (dig_port
->tc_type
== TC_PORT_TYPEC
&&
5189 !(I915_READ(PORT_TX_DFLEXDPSP
) & TC_LIVE_STATE_TC(tc_port
))) {
5190 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port
);
5191 icl_tc_phy_disconnect(dev_priv
, dig_port
);
5199 * See the comment at the connect function. This implements the Disconnect
5202 void icl_tc_phy_disconnect(struct drm_i915_private
*dev_priv
,
5203 struct intel_digital_port
*dig_port
)
5205 enum tc_port tc_port
= intel_port_to_tc(dev_priv
, dig_port
->base
.port
);
5207 if (dig_port
->tc_type
== TC_PORT_UNKNOWN
)
5211 * TBT disconnection flow is read the live status, what was done in
5214 if (dig_port
->tc_type
== TC_PORT_TYPEC
||
5215 dig_port
->tc_type
== TC_PORT_LEGACY
) {
5218 val
= I915_READ(PORT_TX_DFLEXDPCSSS
);
5219 val
&= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port
);
5220 I915_WRITE(PORT_TX_DFLEXDPCSSS
, val
);
5223 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5224 port_name(dig_port
->base
.port
),
5225 tc_type_name(dig_port
->tc_type
));
5227 dig_port
->tc_type
= TC_PORT_UNKNOWN
;
5231 * The type-C ports are different because even when they are connected, they may
5232 * not be available/usable by the graphics driver: see the comment on
5233 * icl_tc_phy_connect(). So in our driver instead of adding the additional
5234 * concept of "usable" and make everything check for "connected and usable" we
5235 * define a port as "connected" when it is not only connected, but also when it
5236 * is usable by the rest of the driver. That maintains the old assumption that
5237 * connected ports are usable, and avoids exposing to the users objects they
5240 static bool icl_tc_port_connected(struct drm_i915_private
*dev_priv
,
5241 struct intel_digital_port
*intel_dig_port
)
5243 enum port port
= intel_dig_port
->base
.port
;
5244 enum tc_port tc_port
= intel_port_to_tc(dev_priv
, port
);
5245 bool is_legacy
, is_typec
, is_tbt
;
5249 * WARN if we got a legacy port HPD, but VBT didn't mark the port as
5250 * legacy. Treat the port as legacy from now on.
5252 if (WARN_ON(!intel_dig_port
->tc_legacy_port
&&
5253 I915_READ(SDEISR
) & SDE_TC_HOTPLUG_ICP(tc_port
)))
5254 intel_dig_port
->tc_legacy_port
= true;
5255 is_legacy
= intel_dig_port
->tc_legacy_port
;
5258 * The spec says we shouldn't be using the ISR bits for detecting
5259 * between TC and TBT. We should use DFLEXDPSP.
5261 dpsp
= I915_READ(PORT_TX_DFLEXDPSP
);
5262 is_typec
= dpsp
& TC_LIVE_STATE_TC(tc_port
);
5263 is_tbt
= dpsp
& TC_LIVE_STATE_TBT(tc_port
);
5265 if (!is_legacy
&& !is_typec
&& !is_tbt
) {
5266 icl_tc_phy_disconnect(dev_priv
, intel_dig_port
);
5271 icl_update_tc_port_type(dev_priv
, intel_dig_port
, is_legacy
, is_typec
,
5274 if (!icl_tc_phy_connect(dev_priv
, intel_dig_port
))
5280 static bool icl_digital_port_connected(struct intel_encoder
*encoder
)
5282 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5283 struct intel_digital_port
*dig_port
= enc_to_dig_port(&encoder
->base
);
5285 if (intel_port_is_combophy(dev_priv
, encoder
->port
))
5286 return icl_combo_port_connected(dev_priv
, dig_port
);
5287 else if (intel_port_is_tc(dev_priv
, encoder
->port
))
5288 return icl_tc_port_connected(dev_priv
, dig_port
);
5290 MISSING_CASE(encoder
->hpd_pin
);
5296 * intel_digital_port_connected - is the specified port connected?
5297 * @encoder: intel_encoder
5299 * In cases where there's a connector physically connected but it can't be used
5300 * by our hardware we also return false, since the rest of the driver should
5301 * pretty much treat the port as disconnected. This is relevant for type-C
5302 * (starting on ICL) where there's ownership involved.
5304 * Return %true if port is connected, %false otherwise.
5306 bool intel_digital_port_connected(struct intel_encoder
*encoder
)
5308 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5310 if (HAS_GMCH(dev_priv
)) {
5311 if (IS_GM45(dev_priv
))
5312 return gm45_digital_port_connected(encoder
);
5314 return g4x_digital_port_connected(encoder
);
5317 if (INTEL_GEN(dev_priv
) >= 11)
5318 return icl_digital_port_connected(encoder
);
5319 else if (IS_GEN(dev_priv
, 10) || IS_GEN9_BC(dev_priv
))
5320 return spt_digital_port_connected(encoder
);
5321 else if (IS_GEN9_LP(dev_priv
))
5322 return bxt_digital_port_connected(encoder
);
5323 else if (IS_GEN(dev_priv
, 8))
5324 return bdw_digital_port_connected(encoder
);
5325 else if (IS_GEN(dev_priv
, 7))
5326 return ivb_digital_port_connected(encoder
);
5327 else if (IS_GEN(dev_priv
, 6))
5328 return snb_digital_port_connected(encoder
);
5329 else if (IS_GEN(dev_priv
, 5))
5330 return ilk_digital_port_connected(encoder
);
5332 MISSING_CASE(INTEL_GEN(dev_priv
));
5336 static struct edid
*
5337 intel_dp_get_edid(struct intel_dp
*intel_dp
)
5339 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
5341 /* use cached edid if we have one */
5342 if (intel_connector
->edid
) {
5344 if (IS_ERR(intel_connector
->edid
))
5347 return drm_edid_duplicate(intel_connector
->edid
);
5349 return drm_get_edid(&intel_connector
->base
,
5350 &intel_dp
->aux
.ddc
);
5354 intel_dp_set_edid(struct intel_dp
*intel_dp
)
5356 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
5359 intel_dp_unset_edid(intel_dp
);
5360 edid
= intel_dp_get_edid(intel_dp
);
5361 intel_connector
->detect_edid
= edid
;
5363 intel_dp
->has_audio
= drm_detect_monitor_audio(edid
);
5364 drm_dp_cec_set_edid(&intel_dp
->aux
, edid
);
5368 intel_dp_unset_edid(struct intel_dp
*intel_dp
)
5370 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
5372 drm_dp_cec_unset_edid(&intel_dp
->aux
);
5373 kfree(intel_connector
->detect_edid
);
5374 intel_connector
->detect_edid
= NULL
;
5376 intel_dp
->has_audio
= false;
5380 intel_dp_detect(struct drm_connector
*connector
,
5381 struct drm_modeset_acquire_ctx
*ctx
,
5384 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
5385 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
5386 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5387 struct intel_encoder
*encoder
= &dig_port
->base
;
5388 enum drm_connector_status status
;
5389 enum intel_display_power_domain aux_domain
=
5390 intel_aux_power_domain(dig_port
);
5391 intel_wakeref_t wakeref
;
5393 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5394 connector
->base
.id
, connector
->name
);
5395 WARN_ON(!drm_modeset_is_locked(&dev_priv
->drm
.mode_config
.connection_mutex
));
5397 wakeref
= intel_display_power_get(dev_priv
, aux_domain
);
5399 /* Can't disconnect eDP */
5400 if (intel_dp_is_edp(intel_dp
))
5401 status
= edp_detect(intel_dp
);
5402 else if (intel_digital_port_connected(encoder
))
5403 status
= intel_dp_detect_dpcd(intel_dp
);
5405 status
= connector_status_disconnected
;
5407 if (status
== connector_status_disconnected
) {
5408 memset(&intel_dp
->compliance
, 0, sizeof(intel_dp
->compliance
));
5409 memset(intel_dp
->dsc_dpcd
, 0, sizeof(intel_dp
->dsc_dpcd
));
5411 if (intel_dp
->is_mst
) {
5412 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5414 intel_dp
->mst_mgr
.mst_state
);
5415 intel_dp
->is_mst
= false;
5416 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
,
5423 if (intel_dp
->reset_link_params
) {
5424 /* Initial max link lane count */
5425 intel_dp
->max_link_lane_count
= intel_dp_max_common_lane_count(intel_dp
);
5427 /* Initial max link rate */
5428 intel_dp
->max_link_rate
= intel_dp_max_common_rate(intel_dp
);
5430 intel_dp
->reset_link_params
= false;
5433 intel_dp_print_rates(intel_dp
);
5435 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5436 if (INTEL_GEN(dev_priv
) >= 11)
5437 intel_dp_get_dsc_sink_cap(intel_dp
);
5439 drm_dp_read_desc(&intel_dp
->aux
, &intel_dp
->desc
,
5440 drm_dp_is_branch(intel_dp
->dpcd
));
5442 intel_dp_configure_mst(intel_dp
);
5444 if (intel_dp
->is_mst
) {
5446 * If we are in MST mode then this connector
5447 * won't appear connected or have anything
5450 status
= connector_status_disconnected
;
5455 * Some external monitors do not signal loss of link synchronization
5456 * with an IRQ_HPD, so force a link status check.
5458 if (!intel_dp_is_edp(intel_dp
)) {
5461 ret
= intel_dp_retrain_link(encoder
, ctx
);
5463 intel_display_power_put(dev_priv
, aux_domain
, wakeref
);
5469 * Clearing NACK and defer counts to get their exact values
5470 * while reading EDID which are required by Compliance tests
5471 * 4.2.2.4 and 4.2.2.5
5473 intel_dp
->aux
.i2c_nack_count
= 0;
5474 intel_dp
->aux
.i2c_defer_count
= 0;
5476 intel_dp_set_edid(intel_dp
);
5477 if (intel_dp_is_edp(intel_dp
) ||
5478 to_intel_connector(connector
)->detect_edid
)
5479 status
= connector_status_connected
;
5481 intel_dp_check_service_irq(intel_dp
);
5484 if (status
!= connector_status_connected
&& !intel_dp
->is_mst
)
5485 intel_dp_unset_edid(intel_dp
);
5487 intel_display_power_put(dev_priv
, aux_domain
, wakeref
);
5492 intel_dp_force(struct drm_connector
*connector
)
5494 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
5495 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5496 struct intel_encoder
*intel_encoder
= &dig_port
->base
;
5497 struct drm_i915_private
*dev_priv
= to_i915(intel_encoder
->base
.dev
);
5498 enum intel_display_power_domain aux_domain
=
5499 intel_aux_power_domain(dig_port
);
5500 intel_wakeref_t wakeref
;
5502 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5503 connector
->base
.id
, connector
->name
);
5504 intel_dp_unset_edid(intel_dp
);
5506 if (connector
->status
!= connector_status_connected
)
5509 wakeref
= intel_display_power_get(dev_priv
, aux_domain
);
5511 intel_dp_set_edid(intel_dp
);
5513 intel_display_power_put(dev_priv
, aux_domain
, wakeref
);
5516 static int intel_dp_get_modes(struct drm_connector
*connector
)
5518 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
5521 edid
= intel_connector
->detect_edid
;
5523 int ret
= intel_connector_update_modes(connector
, edid
);
5528 /* if eDP has no EDID, fall back to fixed mode */
5529 if (intel_dp_is_edp(intel_attached_dp(connector
)) &&
5530 intel_connector
->panel
.fixed_mode
) {
5531 struct drm_display_mode
*mode
;
5533 mode
= drm_mode_duplicate(connector
->dev
,
5534 intel_connector
->panel
.fixed_mode
);
5536 drm_mode_probed_add(connector
, mode
);
5545 intel_dp_connector_register(struct drm_connector
*connector
)
5547 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
5548 struct drm_device
*dev
= connector
->dev
;
5551 ret
= intel_connector_register(connector
);
5555 i915_debugfs_connector_add(connector
);
5557 DRM_DEBUG_KMS("registering %s bus for %s\n",
5558 intel_dp
->aux
.name
, connector
->kdev
->kobj
.name
);
5560 intel_dp
->aux
.dev
= connector
->kdev
;
5561 ret
= drm_dp_aux_register(&intel_dp
->aux
);
5563 drm_dp_cec_register_connector(&intel_dp
->aux
,
5564 connector
->name
, dev
->dev
);
5569 intel_dp_connector_unregister(struct drm_connector
*connector
)
5571 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
5573 drm_dp_cec_unregister_connector(&intel_dp
->aux
);
5574 drm_dp_aux_unregister(&intel_dp
->aux
);
5575 intel_connector_unregister(connector
);
5578 void intel_dp_encoder_flush_work(struct drm_encoder
*encoder
)
5580 struct intel_digital_port
*intel_dig_port
= enc_to_dig_port(encoder
);
5581 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5583 intel_dp_mst_encoder_cleanup(intel_dig_port
);
5584 if (intel_dp_is_edp(intel_dp
)) {
5585 intel_wakeref_t wakeref
;
5587 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
5589 * vdd might still be enabled do to the delayed vdd off.
5590 * Make sure vdd is actually turned off here.
5592 with_pps_lock(intel_dp
, wakeref
)
5593 edp_panel_vdd_off_sync(intel_dp
);
5595 if (intel_dp
->edp_notifier
.notifier_call
) {
5596 unregister_reboot_notifier(&intel_dp
->edp_notifier
);
5597 intel_dp
->edp_notifier
.notifier_call
= NULL
;
5601 intel_dp_aux_fini(intel_dp
);
5604 static void intel_dp_encoder_destroy(struct drm_encoder
*encoder
)
5606 intel_dp_encoder_flush_work(encoder
);
5608 drm_encoder_cleanup(encoder
);
5609 kfree(enc_to_dig_port(encoder
));
5612 void intel_dp_encoder_suspend(struct intel_encoder
*intel_encoder
)
5614 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
5615 intel_wakeref_t wakeref
;
5617 if (!intel_dp_is_edp(intel_dp
))
5621 * vdd might still be enabled do to the delayed vdd off.
5622 * Make sure vdd is actually turned off here.
5624 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
5625 with_pps_lock(intel_dp
, wakeref
)
5626 edp_panel_vdd_off_sync(intel_dp
);
5629 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp
*hdcp
, int timeout
)
5633 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5634 ret
= wait_event_interruptible_timeout(hdcp
->cp_irq_queue
, C
,
5635 msecs_to_jiffies(timeout
));
5638 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5642 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port
*intel_dig_port
,
5645 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_dig_port
->base
.base
);
5646 static const struct drm_dp_aux_msg msg
= {
5647 .request
= DP_AUX_NATIVE_WRITE
,
5648 .address
= DP_AUX_HDCP_AKSV
,
5649 .size
= DRM_HDCP_KSV_LEN
,
5651 u8 txbuf
[HEADER_SIZE
+ DRM_HDCP_KSV_LEN
] = {}, rxbuf
[2], reply
= 0;
5655 /* Output An first, that's easy */
5656 dpcd_ret
= drm_dp_dpcd_write(&intel_dig_port
->dp
.aux
, DP_AUX_HDCP_AN
,
5657 an
, DRM_HDCP_AN_LEN
);
5658 if (dpcd_ret
!= DRM_HDCP_AN_LEN
) {
5659 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5661 return dpcd_ret
>= 0 ? -EIO
: dpcd_ret
;
5665 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5666 * order to get it on the wire, we need to create the AUX header as if
5667 * we were writing the data, and then tickle the hardware to output the
5668 * data once the header is sent out.
5670 intel_dp_aux_header(txbuf
, &msg
);
5672 ret
= intel_dp_aux_xfer(intel_dp
, txbuf
, HEADER_SIZE
+ msg
.size
,
5673 rxbuf
, sizeof(rxbuf
),
5674 DP_AUX_CH_CTL_AUX_AKSV_SELECT
);
5676 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret
);
5678 } else if (ret
== 0) {
5679 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5683 reply
= (rxbuf
[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK
;
5684 if (reply
!= DP_AUX_NATIVE_REPLY_ACK
) {
5685 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5692 static int intel_dp_hdcp_read_bksv(struct intel_digital_port
*intel_dig_port
,
5696 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
, DP_AUX_HDCP_BKSV
, bksv
,
5698 if (ret
!= DRM_HDCP_KSV_LEN
) {
5699 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret
);
5700 return ret
>= 0 ? -EIO
: ret
;
5705 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port
*intel_dig_port
,
5710 * For some reason the HDMI and DP HDCP specs call this register
5711 * definition by different names. In the HDMI spec, it's called BSTATUS,
5712 * but in DP it's called BINFO.
5714 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
, DP_AUX_HDCP_BINFO
,
5715 bstatus
, DRM_HDCP_BSTATUS_LEN
);
5716 if (ret
!= DRM_HDCP_BSTATUS_LEN
) {
5717 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret
);
5718 return ret
>= 0 ? -EIO
: ret
;
5724 int intel_dp_hdcp_read_bcaps(struct intel_digital_port
*intel_dig_port
,
5729 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
, DP_AUX_HDCP_BCAPS
,
5732 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret
);
5733 return ret
>= 0 ? -EIO
: ret
;
5740 int intel_dp_hdcp_repeater_present(struct intel_digital_port
*intel_dig_port
,
5741 bool *repeater_present
)
5746 ret
= intel_dp_hdcp_read_bcaps(intel_dig_port
, &bcaps
);
5750 *repeater_present
= bcaps
& DP_BCAPS_REPEATER_PRESENT
;
5755 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port
*intel_dig_port
,
5759 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
, DP_AUX_HDCP_RI_PRIME
,
5760 ri_prime
, DRM_HDCP_RI_LEN
);
5761 if (ret
!= DRM_HDCP_RI_LEN
) {
5762 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret
);
5763 return ret
>= 0 ? -EIO
: ret
;
5769 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port
*intel_dig_port
,
5774 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
, DP_AUX_HDCP_BSTATUS
,
5777 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret
);
5778 return ret
>= 0 ? -EIO
: ret
;
5780 *ksv_ready
= bstatus
& DP_BSTATUS_READY
;
5785 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port
*intel_dig_port
,
5786 int num_downstream
, u8
*ksv_fifo
)
5791 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5792 for (i
= 0; i
< num_downstream
; i
+= 3) {
5793 size_t len
= min(num_downstream
- i
, 3) * DRM_HDCP_KSV_LEN
;
5794 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
,
5795 DP_AUX_HDCP_KSV_FIFO
,
5796 ksv_fifo
+ i
* DRM_HDCP_KSV_LEN
,
5799 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5801 return ret
>= 0 ? -EIO
: ret
;
5808 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port
*intel_dig_port
,
5813 if (i
>= DRM_HDCP_V_PRIME_NUM_PARTS
)
5816 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
,
5817 DP_AUX_HDCP_V_PRIME(i
), part
,
5818 DRM_HDCP_V_PRIME_PART_LEN
);
5819 if (ret
!= DRM_HDCP_V_PRIME_PART_LEN
) {
5820 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i
, ret
);
5821 return ret
>= 0 ? -EIO
: ret
;
5827 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port
*intel_dig_port
,
5830 /* Not used for single stream DisplayPort setups */
5835 bool intel_dp_hdcp_check_link(struct intel_digital_port
*intel_dig_port
)
5840 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
, DP_AUX_HDCP_BSTATUS
,
5843 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret
);
5847 return !(bstatus
& (DP_BSTATUS_LINK_FAILURE
| DP_BSTATUS_REAUTH_REQ
));
5851 int intel_dp_hdcp_capable(struct intel_digital_port
*intel_dig_port
,
5857 ret
= intel_dp_hdcp_read_bcaps(intel_dig_port
, &bcaps
);
5861 *hdcp_capable
= bcaps
& DP_BCAPS_HDCP_CAPABLE
;
5865 struct hdcp2_dp_errata_stream_type
{
5870 static struct hdcp2_dp_msg_data
{
5873 bool msg_detectable
;
5875 u32 timeout2
; /* Added for non_paired situation */
5876 } hdcp2_msg_data
[] = {
5877 {HDCP_2_2_AKE_INIT
, DP_HDCP_2_2_AKE_INIT_OFFSET
, false, 0, 0},
5878 {HDCP_2_2_AKE_SEND_CERT
, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET
,
5879 false, HDCP_2_2_CERT_TIMEOUT_MS
, 0},
5880 {HDCP_2_2_AKE_NO_STORED_KM
, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET
,
5882 {HDCP_2_2_AKE_STORED_KM
, DP_HDCP_2_2_AKE_STORED_KM_OFFSET
,
5884 {HDCP_2_2_AKE_SEND_HPRIME
, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET
,
5885 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS
,
5886 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS
},
5887 {HDCP_2_2_AKE_SEND_PAIRING_INFO
,
5888 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET
, true,
5889 HDCP_2_2_PAIRING_TIMEOUT_MS
, 0},
5890 {HDCP_2_2_LC_INIT
, DP_HDCP_2_2_LC_INIT_OFFSET
, false, 0, 0},
5891 {HDCP_2_2_LC_SEND_LPRIME
, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET
,
5892 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS
, 0},
5893 {HDCP_2_2_SKE_SEND_EKS
, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET
, false,
5895 {HDCP_2_2_REP_SEND_RECVID_LIST
,
5896 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET
, true,
5897 HDCP_2_2_RECVID_LIST_TIMEOUT_MS
, 0},
5898 {HDCP_2_2_REP_SEND_ACK
, DP_HDCP_2_2_REP_SEND_ACK_OFFSET
, false,
5900 {HDCP_2_2_REP_STREAM_MANAGE
,
5901 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET
, false,
5903 {HDCP_2_2_REP_STREAM_READY
, DP_HDCP_2_2_REP_STREAM_READY_OFFSET
,
5904 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS
, 0},
5905 /* local define to shovel this through the write_2_2 interface */
5906 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
5907 {HDCP_2_2_ERRATA_DP_STREAM_TYPE
,
5908 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET
, false,
5913 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port
*intel_dig_port
,
5918 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
,
5919 DP_HDCP_2_2_REG_RXSTATUS_OFFSET
, rx_status
,
5920 HDCP_2_2_DP_RXSTATUS_LEN
);
5921 if (ret
!= HDCP_2_2_DP_RXSTATUS_LEN
) {
5922 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret
);
5923 return ret
>= 0 ? -EIO
: ret
;
5930 int hdcp2_detect_msg_availability(struct intel_digital_port
*intel_dig_port
,
5931 u8 msg_id
, bool *msg_ready
)
5937 ret
= intel_dp_hdcp2_read_rx_status(intel_dig_port
, &rx_status
);
5942 case HDCP_2_2_AKE_SEND_HPRIME
:
5943 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status
))
5946 case HDCP_2_2_AKE_SEND_PAIRING_INFO
:
5947 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status
))
5950 case HDCP_2_2_REP_SEND_RECVID_LIST
:
5951 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status
))
5955 DRM_ERROR("Unidentified msg_id: %d\n", msg_id
);
5963 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port
*intel_dig_port
,
5964 struct hdcp2_dp_msg_data
*hdcp2_msg_data
)
5966 struct intel_dp
*dp
= &intel_dig_port
->dp
;
5967 struct intel_hdcp
*hdcp
= &dp
->attached_connector
->hdcp
;
5968 u8 msg_id
= hdcp2_msg_data
->msg_id
;
5970 bool msg_ready
= false;
5972 if (msg_id
== HDCP_2_2_AKE_SEND_HPRIME
&& !hdcp
->is_paired
)
5973 timeout
= hdcp2_msg_data
->timeout2
;
5975 timeout
= hdcp2_msg_data
->timeout
;
5978 * There is no way to detect the CERT, LPRIME and STREAM_READY
5979 * availability. So Wait for timeout and read the msg.
5981 if (!hdcp2_msg_data
->msg_detectable
) {
5986 * As we want to check the msg availability at timeout, Ignoring
5987 * the timeout at wait for CP_IRQ.
5989 intel_dp_hdcp_wait_for_cp_irq(hdcp
, timeout
);
5990 ret
= hdcp2_detect_msg_availability(intel_dig_port
,
5991 msg_id
, &msg_ready
);
5997 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
5998 hdcp2_msg_data
->msg_id
, ret
, timeout
);
6003 static struct hdcp2_dp_msg_data
*get_hdcp2_dp_msg_data(u8 msg_id
)
6007 for (i
= 0; i
< ARRAY_SIZE(hdcp2_msg_data
); i
++)
6008 if (hdcp2_msg_data
[i
].msg_id
== msg_id
)
6009 return &hdcp2_msg_data
[i
];
6015 int intel_dp_hdcp2_write_msg(struct intel_digital_port
*intel_dig_port
,
6016 void *buf
, size_t size
)
6018 struct intel_dp
*dp
= &intel_dig_port
->dp
;
6019 struct intel_hdcp
*hdcp
= &dp
->attached_connector
->hdcp
;
6020 unsigned int offset
;
6022 ssize_t ret
, bytes_to_write
, len
;
6023 struct hdcp2_dp_msg_data
*hdcp2_msg_data
;
6025 hdcp2_msg_data
= get_hdcp2_dp_msg_data(*byte
);
6026 if (!hdcp2_msg_data
)
6029 offset
= hdcp2_msg_data
->offset
;
6031 /* No msg_id in DP HDCP2.2 msgs */
6032 bytes_to_write
= size
- 1;
6035 hdcp
->cp_irq_count_cached
= atomic_read(&hdcp
->cp_irq_count
);
6037 while (bytes_to_write
) {
6038 len
= bytes_to_write
> DP_AUX_MAX_PAYLOAD_BYTES
?
6039 DP_AUX_MAX_PAYLOAD_BYTES
: bytes_to_write
;
6041 ret
= drm_dp_dpcd_write(&intel_dig_port
->dp
.aux
,
6042 offset
, (void *)byte
, len
);
6046 bytes_to_write
-= ret
;
6055 ssize_t
get_receiver_id_list_size(struct intel_digital_port
*intel_dig_port
)
6057 u8 rx_info
[HDCP_2_2_RXINFO_LEN
];
6061 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
,
6062 DP_HDCP_2_2_REG_RXINFO_OFFSET
,
6063 (void *)rx_info
, HDCP_2_2_RXINFO_LEN
);
6064 if (ret
!= HDCP_2_2_RXINFO_LEN
)
6065 return ret
>= 0 ? -EIO
: ret
;
6067 dev_cnt
= (HDCP_2_2_DEV_COUNT_HI(rx_info
[0]) << 4 |
6068 HDCP_2_2_DEV_COUNT_LO(rx_info
[1]));
6070 if (dev_cnt
> HDCP_2_2_MAX_DEVICE_COUNT
)
6071 dev_cnt
= HDCP_2_2_MAX_DEVICE_COUNT
;
6073 ret
= sizeof(struct hdcp2_rep_send_receiverid_list
) -
6074 HDCP_2_2_RECEIVER_IDS_MAX_LEN
+
6075 (dev_cnt
* HDCP_2_2_RECEIVER_ID_LEN
);
6081 int intel_dp_hdcp2_read_msg(struct intel_digital_port
*intel_dig_port
,
6082 u8 msg_id
, void *buf
, size_t size
)
6084 unsigned int offset
;
6086 ssize_t ret
, bytes_to_recv
, len
;
6087 struct hdcp2_dp_msg_data
*hdcp2_msg_data
;
6089 hdcp2_msg_data
= get_hdcp2_dp_msg_data(msg_id
);
6090 if (!hdcp2_msg_data
)
6092 offset
= hdcp2_msg_data
->offset
;
6094 ret
= intel_dp_hdcp2_wait_for_msg(intel_dig_port
, hdcp2_msg_data
);
6098 if (msg_id
== HDCP_2_2_REP_SEND_RECVID_LIST
) {
6099 ret
= get_receiver_id_list_size(intel_dig_port
);
6105 bytes_to_recv
= size
- 1;
6107 /* DP adaptation msgs has no msg_id */
6110 while (bytes_to_recv
) {
6111 len
= bytes_to_recv
> DP_AUX_MAX_PAYLOAD_BYTES
?
6112 DP_AUX_MAX_PAYLOAD_BYTES
: bytes_to_recv
;
6114 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
, offset
,
6117 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id
, ret
);
6121 bytes_to_recv
-= ret
;
6132 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port
*intel_dig_port
,
6133 bool is_repeater
, u8 content_type
)
6135 struct hdcp2_dp_errata_stream_type stream_type_msg
;
6141 * Errata for DP: As Stream type is used for encryption, Receiver
6142 * should be communicated with stream type for the decryption of the
6144 * Repeater will be communicated with stream type as a part of it's
6145 * auth later in time.
6147 stream_type_msg
.msg_id
= HDCP_2_2_ERRATA_DP_STREAM_TYPE
;
6148 stream_type_msg
.stream_type
= content_type
;
6150 return intel_dp_hdcp2_write_msg(intel_dig_port
, &stream_type_msg
,
6151 sizeof(stream_type_msg
));
6155 int intel_dp_hdcp2_check_link(struct intel_digital_port
*intel_dig_port
)
6160 ret
= intel_dp_hdcp2_read_rx_status(intel_dig_port
, &rx_status
);
6164 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status
))
6165 ret
= HDCP_REAUTH_REQUEST
;
6166 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status
))
6167 ret
= HDCP_LINK_INTEGRITY_FAILURE
;
6168 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status
))
6169 ret
= HDCP_TOPOLOGY_CHANGE
;
6175 int intel_dp_hdcp2_capable(struct intel_digital_port
*intel_dig_port
,
6182 ret
= drm_dp_dpcd_read(&intel_dig_port
->dp
.aux
,
6183 DP_HDCP_2_2_REG_RX_CAPS_OFFSET
,
6184 rx_caps
, HDCP_2_2_RXCAPS_LEN
);
6185 if (ret
!= HDCP_2_2_RXCAPS_LEN
)
6186 return ret
>= 0 ? -EIO
: ret
;
6188 if (rx_caps
[0] == HDCP_2_2_RX_CAPS_VERSION_VAL
&&
6189 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps
[2]))
6195 static const struct intel_hdcp_shim intel_dp_hdcp_shim
= {
6196 .write_an_aksv
= intel_dp_hdcp_write_an_aksv
,
6197 .read_bksv
= intel_dp_hdcp_read_bksv
,
6198 .read_bstatus
= intel_dp_hdcp_read_bstatus
,
6199 .repeater_present
= intel_dp_hdcp_repeater_present
,
6200 .read_ri_prime
= intel_dp_hdcp_read_ri_prime
,
6201 .read_ksv_ready
= intel_dp_hdcp_read_ksv_ready
,
6202 .read_ksv_fifo
= intel_dp_hdcp_read_ksv_fifo
,
6203 .read_v_prime_part
= intel_dp_hdcp_read_v_prime_part
,
6204 .toggle_signalling
= intel_dp_hdcp_toggle_signalling
,
6205 .check_link
= intel_dp_hdcp_check_link
,
6206 .hdcp_capable
= intel_dp_hdcp_capable
,
6207 .write_2_2_msg
= intel_dp_hdcp2_write_msg
,
6208 .read_2_2_msg
= intel_dp_hdcp2_read_msg
,
6209 .config_stream_type
= intel_dp_hdcp2_config_stream_type
,
6210 .check_2_2_link
= intel_dp_hdcp2_check_link
,
6211 .hdcp_2_2_capable
= intel_dp_hdcp2_capable
,
6212 .protocol
= HDCP_PROTOCOL_DP
,
6215 static void intel_edp_panel_vdd_sanitize(struct intel_dp
*intel_dp
)
6217 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6218 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
6220 lockdep_assert_held(&dev_priv
->pps_mutex
);
6222 if (!edp_have_panel_vdd(intel_dp
))
6226 * The VDD bit needs a power domain reference, so if the bit is
6227 * already enabled when we boot or resume, grab this reference and
6228 * schedule a vdd off, so we don't hold on to the reference
6231 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6232 intel_display_power_get(dev_priv
, intel_aux_power_domain(dig_port
));
6234 edp_panel_vdd_schedule_off(intel_dp
);
6237 static enum pipe
vlv_active_pipe(struct intel_dp
*intel_dp
)
6239 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6240 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
6243 if (intel_dp_port_enabled(dev_priv
, intel_dp
->output_reg
,
6244 encoder
->port
, &pipe
))
6247 return INVALID_PIPE
;
6250 void intel_dp_encoder_reset(struct drm_encoder
*encoder
)
6252 struct drm_i915_private
*dev_priv
= to_i915(encoder
->dev
);
6253 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
6254 struct intel_lspcon
*lspcon
= dp_to_lspcon(intel_dp
);
6255 intel_wakeref_t wakeref
;
6257 if (!HAS_DDI(dev_priv
))
6258 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
);
6261 lspcon_resume(lspcon
);
6263 intel_dp
->reset_link_params
= true;
6265 with_pps_lock(intel_dp
, wakeref
) {
6266 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
6267 intel_dp
->active_pipe
= vlv_active_pipe(intel_dp
);
6269 if (intel_dp_is_edp(intel_dp
)) {
6271 * Reinit the power sequencer, in case BIOS did
6272 * something nasty with it.
6274 intel_dp_pps_init(intel_dp
);
6275 intel_edp_panel_vdd_sanitize(intel_dp
);
6280 static const struct drm_connector_funcs intel_dp_connector_funcs
= {
6281 .force
= intel_dp_force
,
6282 .fill_modes
= drm_helper_probe_single_connector_modes
,
6283 .atomic_get_property
= intel_digital_connector_atomic_get_property
,
6284 .atomic_set_property
= intel_digital_connector_atomic_set_property
,
6285 .late_register
= intel_dp_connector_register
,
6286 .early_unregister
= intel_dp_connector_unregister
,
6287 .destroy
= intel_connector_destroy
,
6288 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
6289 .atomic_duplicate_state
= intel_digital_connector_duplicate_state
,
6292 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs
= {
6293 .detect_ctx
= intel_dp_detect
,
6294 .get_modes
= intel_dp_get_modes
,
6295 .mode_valid
= intel_dp_mode_valid
,
6296 .atomic_check
= intel_digital_connector_atomic_check
,
6299 static const struct drm_encoder_funcs intel_dp_enc_funcs
= {
6300 .reset
= intel_dp_encoder_reset
,
6301 .destroy
= intel_dp_encoder_destroy
,
6305 intel_dp_hpd_pulse(struct intel_digital_port
*intel_dig_port
, bool long_hpd
)
6307 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
6308 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6309 enum irqreturn ret
= IRQ_NONE
;
6310 intel_wakeref_t wakeref
;
6312 if (long_hpd
&& intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
) {
6314 * vdd off can generate a long pulse on eDP which
6315 * would require vdd on to handle it, and thus we
6316 * would end up in an endless cycle of
6317 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6319 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6320 port_name(intel_dig_port
->base
.port
));
6324 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6325 port_name(intel_dig_port
->base
.port
),
6326 long_hpd
? "long" : "short");
6329 intel_dp
->reset_link_params
= true;
6333 wakeref
= intel_display_power_get(dev_priv
,
6334 intel_aux_power_domain(intel_dig_port
));
6336 if (intel_dp
->is_mst
) {
6337 if (intel_dp_check_mst_status(intel_dp
) == -EINVAL
) {
6339 * If we were in MST mode, and device is not
6340 * there, get out of MST mode
6342 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6343 intel_dp
->is_mst
, intel_dp
->mst_mgr
.mst_state
);
6344 intel_dp
->is_mst
= false;
6345 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
,
6351 if (!intel_dp
->is_mst
) {
6354 handled
= intel_dp_short_pulse(intel_dp
);
6363 intel_display_power_put(dev_priv
,
6364 intel_aux_power_domain(intel_dig_port
),
6370 /* check the VBT to see whether the eDP is on another port */
6371 bool intel_dp_is_port_edp(struct drm_i915_private
*dev_priv
, enum port port
)
6374 * eDP not supported on g4x. so bail out early just
6375 * for a bit extra safety in case the VBT is bonkers.
6377 if (INTEL_GEN(dev_priv
) < 5)
6380 if (INTEL_GEN(dev_priv
) < 9 && port
== PORT_A
)
6383 return intel_bios_is_port_edp(dev_priv
, port
);
6387 intel_dp_add_properties(struct intel_dp
*intel_dp
, struct drm_connector
*connector
)
6389 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
6390 enum port port
= dp_to_dig_port(intel_dp
)->base
.port
;
6392 if (!IS_G4X(dev_priv
) && port
!= PORT_A
)
6393 intel_attach_force_audio_property(connector
);
6395 intel_attach_broadcast_rgb_property(connector
);
6396 if (HAS_GMCH(dev_priv
))
6397 drm_connector_attach_max_bpc_property(connector
, 6, 10);
6398 else if (INTEL_GEN(dev_priv
) >= 5)
6399 drm_connector_attach_max_bpc_property(connector
, 6, 12);
6401 if (intel_dp_is_edp(intel_dp
)) {
6402 u32 allowed_scalers
;
6404 allowed_scalers
= BIT(DRM_MODE_SCALE_ASPECT
) | BIT(DRM_MODE_SCALE_FULLSCREEN
);
6405 if (!HAS_GMCH(dev_priv
))
6406 allowed_scalers
|= BIT(DRM_MODE_SCALE_CENTER
);
6408 drm_connector_attach_scaling_mode_property(connector
, allowed_scalers
);
6410 connector
->state
->scaling_mode
= DRM_MODE_SCALE_ASPECT
;
6415 static void intel_dp_init_panel_power_timestamps(struct intel_dp
*intel_dp
)
6417 intel_dp
->panel_power_off_time
= ktime_get_boottime();
6418 intel_dp
->last_power_on
= jiffies
;
6419 intel_dp
->last_backlight_off
= jiffies
;
6423 intel_pps_readout_hw_state(struct intel_dp
*intel_dp
, struct edp_power_seq
*seq
)
6425 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6426 u32 pp_on
, pp_off
, pp_ctl
;
6427 struct pps_registers regs
;
6429 intel_pps_get_registers(intel_dp
, ®s
);
6431 pp_ctl
= ironlake_get_pp_control(intel_dp
);
6433 /* Ensure PPS is unlocked */
6434 if (!HAS_DDI(dev_priv
))
6435 I915_WRITE(regs
.pp_ctrl
, pp_ctl
);
6437 pp_on
= I915_READ(regs
.pp_on
);
6438 pp_off
= I915_READ(regs
.pp_off
);
6440 /* Pull timing values out of registers */
6441 seq
->t1_t3
= REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK
, pp_on
);
6442 seq
->t8
= REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK
, pp_on
);
6443 seq
->t9
= REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK
, pp_off
);
6444 seq
->t10
= REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK
, pp_off
);
6446 if (i915_mmio_reg_valid(regs
.pp_div
)) {
6449 pp_div
= I915_READ(regs
.pp_div
);
6451 seq
->t11_t12
= REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK
, pp_div
) * 1000;
6453 seq
->t11_t12
= REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK
, pp_ctl
) * 1000;
6458 intel_pps_dump_state(const char *state_name
, const struct edp_power_seq
*seq
)
6460 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6462 seq
->t1_t3
, seq
->t8
, seq
->t9
, seq
->t10
, seq
->t11_t12
);
6466 intel_pps_verify_state(struct intel_dp
*intel_dp
)
6468 struct edp_power_seq hw
;
6469 struct edp_power_seq
*sw
= &intel_dp
->pps_delays
;
6471 intel_pps_readout_hw_state(intel_dp
, &hw
);
6473 if (hw
.t1_t3
!= sw
->t1_t3
|| hw
.t8
!= sw
->t8
|| hw
.t9
!= sw
->t9
||
6474 hw
.t10
!= sw
->t10
|| hw
.t11_t12
!= sw
->t11_t12
) {
6475 DRM_ERROR("PPS state mismatch\n");
6476 intel_pps_dump_state("sw", sw
);
6477 intel_pps_dump_state("hw", &hw
);
6482 intel_dp_init_panel_power_sequencer(struct intel_dp
*intel_dp
)
6484 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6485 struct edp_power_seq cur
, vbt
, spec
,
6486 *final
= &intel_dp
->pps_delays
;
6488 lockdep_assert_held(&dev_priv
->pps_mutex
);
6490 /* already initialized? */
6491 if (final
->t11_t12
!= 0)
6494 intel_pps_readout_hw_state(intel_dp
, &cur
);
6496 intel_pps_dump_state("cur", &cur
);
6498 vbt
= dev_priv
->vbt
.edp
.pps
;
6499 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6500 * of 500ms appears to be too short. Ocassionally the panel
6501 * just fails to power back on. Increasing the delay to 800ms
6502 * seems sufficient to avoid this problem.
6504 if (dev_priv
->quirks
& QUIRK_INCREASE_T12_DELAY
) {
6505 vbt
.t11_t12
= max_t(u16
, vbt
.t11_t12
, 1300 * 10);
6506 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6509 /* T11_T12 delay is special and actually in units of 100ms, but zero
6510 * based in the hw (so we need to add 100 ms). But the sw vbt
6511 * table multiplies it with 1000 to make it in units of 100usec,
6513 vbt
.t11_t12
+= 100 * 10;
6515 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6516 * our hw here, which are all in 100usec. */
6517 spec
.t1_t3
= 210 * 10;
6518 spec
.t8
= 50 * 10; /* no limit for t8, use t7 instead */
6519 spec
.t9
= 50 * 10; /* no limit for t9, make it symmetric with t8 */
6520 spec
.t10
= 500 * 10;
6521 /* This one is special and actually in units of 100ms, but zero
6522 * based in the hw (so we need to add 100 ms). But the sw vbt
6523 * table multiplies it with 1000 to make it in units of 100usec,
6525 spec
.t11_t12
= (510 + 100) * 10;
6527 intel_pps_dump_state("vbt", &vbt
);
6529 /* Use the max of the register settings and vbt. If both are
6530 * unset, fall back to the spec limits. */
6531 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6533 max(cur.field, vbt.field))
6534 assign_final(t1_t3
);
6538 assign_final(t11_t12
);
6541 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6542 intel_dp
->panel_power_up_delay
= get_delay(t1_t3
);
6543 intel_dp
->backlight_on_delay
= get_delay(t8
);
6544 intel_dp
->backlight_off_delay
= get_delay(t9
);
6545 intel_dp
->panel_power_down_delay
= get_delay(t10
);
6546 intel_dp
->panel_power_cycle_delay
= get_delay(t11_t12
);
6549 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6550 intel_dp
->panel_power_up_delay
, intel_dp
->panel_power_down_delay
,
6551 intel_dp
->panel_power_cycle_delay
);
6553 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6554 intel_dp
->backlight_on_delay
, intel_dp
->backlight_off_delay
);
6557 * We override the HW backlight delays to 1 because we do manual waits
6558 * on them. For T8, even BSpec recommends doing it. For T9, if we
6559 * don't do this, we'll end up waiting for the backlight off delay
6560 * twice: once when we do the manual sleep, and once when we disable
6561 * the panel and wait for the PP_STATUS bit to become zero.
6567 * HW has only a 100msec granularity for t11_t12 so round it up
6570 final
->t11_t12
= roundup(final
->t11_t12
, 100 * 10);
6574 intel_dp_init_panel_power_sequencer_registers(struct intel_dp
*intel_dp
,
6575 bool force_disable_vdd
)
6577 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6578 u32 pp_on
, pp_off
, port_sel
= 0;
6579 int div
= dev_priv
->rawclk_freq
/ 1000;
6580 struct pps_registers regs
;
6581 enum port port
= dp_to_dig_port(intel_dp
)->base
.port
;
6582 const struct edp_power_seq
*seq
= &intel_dp
->pps_delays
;
6584 lockdep_assert_held(&dev_priv
->pps_mutex
);
6586 intel_pps_get_registers(intel_dp
, ®s
);
6589 * On some VLV machines the BIOS can leave the VDD
6590 * enabled even on power sequencers which aren't
6591 * hooked up to any port. This would mess up the
6592 * power domain tracking the first time we pick
6593 * one of these power sequencers for use since
6594 * edp_panel_vdd_on() would notice that the VDD was
6595 * already on and therefore wouldn't grab the power
6596 * domain reference. Disable VDD first to avoid this.
6597 * This also avoids spuriously turning the VDD on as
6598 * soon as the new power sequencer gets initialized.
6600 if (force_disable_vdd
) {
6601 u32 pp
= ironlake_get_pp_control(intel_dp
);
6603 WARN(pp
& PANEL_POWER_ON
, "Panel power already on\n");
6605 if (pp
& EDP_FORCE_VDD
)
6606 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6608 pp
&= ~EDP_FORCE_VDD
;
6610 I915_WRITE(regs
.pp_ctrl
, pp
);
6613 pp_on
= REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK
, seq
->t1_t3
) |
6614 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK
, seq
->t8
);
6615 pp_off
= REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK
, seq
->t9
) |
6616 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK
, seq
->t10
);
6618 /* Haswell doesn't have any port selection bits for the panel
6619 * power sequencer any more. */
6620 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
6621 port_sel
= PANEL_PORT_SELECT_VLV(port
);
6622 } else if (HAS_PCH_IBX(dev_priv
) || HAS_PCH_CPT(dev_priv
)) {
6625 port_sel
= PANEL_PORT_SELECT_DPA
;
6628 port_sel
= PANEL_PORT_SELECT_DPC
;
6631 port_sel
= PANEL_PORT_SELECT_DPD
;
6641 I915_WRITE(regs
.pp_on
, pp_on
);
6642 I915_WRITE(regs
.pp_off
, pp_off
);
6645 * Compute the divisor for the pp clock, simply match the Bspec formula.
6647 if (i915_mmio_reg_valid(regs
.pp_div
)) {
6648 I915_WRITE(regs
.pp_div
,
6649 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK
, (100 * div
) / 2 - 1) |
6650 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK
, DIV_ROUND_UP(seq
->t11_t12
, 1000)));
6654 pp_ctl
= I915_READ(regs
.pp_ctrl
);
6655 pp_ctl
&= ~BXT_POWER_CYCLE_DELAY_MASK
;
6656 pp_ctl
|= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK
, DIV_ROUND_UP(seq
->t11_t12
, 1000));
6657 I915_WRITE(regs
.pp_ctrl
, pp_ctl
);
6660 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6661 I915_READ(regs
.pp_on
),
6662 I915_READ(regs
.pp_off
),
6663 i915_mmio_reg_valid(regs
.pp_div
) ?
6664 I915_READ(regs
.pp_div
) :
6665 (I915_READ(regs
.pp_ctrl
) & BXT_POWER_CYCLE_DELAY_MASK
));
6668 static void intel_dp_pps_init(struct intel_dp
*intel_dp
)
6670 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6672 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
6673 vlv_initial_power_sequencer_setup(intel_dp
);
6675 intel_dp_init_panel_power_sequencer(intel_dp
);
6676 intel_dp_init_panel_power_sequencer_registers(intel_dp
, false);
6681 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6682 * @dev_priv: i915 device
6683 * @crtc_state: a pointer to the active intel_crtc_state
6684 * @refresh_rate: RR to be programmed
6686 * This function gets called when refresh rate (RR) has to be changed from
6687 * one frequency to another. Switches can be between high and low RR
6688 * supported by the panel or to any other RR based on media playback (in
6689 * this case, RR value needs to be passed from user space).
6691 * The caller of this function needs to take a lock on dev_priv->drrs.
6693 static void intel_dp_set_drrs_state(struct drm_i915_private
*dev_priv
,
6694 const struct intel_crtc_state
*crtc_state
,
6697 struct intel_encoder
*encoder
;
6698 struct intel_digital_port
*dig_port
= NULL
;
6699 struct intel_dp
*intel_dp
= dev_priv
->drrs
.dp
;
6700 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->base
.crtc
);
6701 enum drrs_refresh_rate_type index
= DRRS_HIGH_RR
;
6703 if (refresh_rate
<= 0) {
6704 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6708 if (intel_dp
== NULL
) {
6709 DRM_DEBUG_KMS("DRRS not supported.\n");
6713 dig_port
= dp_to_dig_port(intel_dp
);
6714 encoder
= &dig_port
->base
;
6717 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6721 if (dev_priv
->drrs
.type
< SEAMLESS_DRRS_SUPPORT
) {
6722 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6726 if (intel_dp
->attached_connector
->panel
.downclock_mode
->vrefresh
==
6728 index
= DRRS_LOW_RR
;
6730 if (index
== dev_priv
->drrs
.refresh_rate_type
) {
6732 "DRRS requested for previously set RR...ignoring\n");
6736 if (!crtc_state
->base
.active
) {
6737 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6741 if (INTEL_GEN(dev_priv
) >= 8 && !IS_CHERRYVIEW(dev_priv
)) {
6744 intel_dp_set_m_n(crtc_state
, M1_N1
);
6747 intel_dp_set_m_n(crtc_state
, M2_N2
);
6751 DRM_ERROR("Unsupported refreshrate type\n");
6753 } else if (INTEL_GEN(dev_priv
) > 6) {
6754 i915_reg_t reg
= PIPECONF(crtc_state
->cpu_transcoder
);
6757 val
= I915_READ(reg
);
6758 if (index
> DRRS_HIGH_RR
) {
6759 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
6760 val
|= PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
6762 val
|= PIPECONF_EDP_RR_MODE_SWITCH
;
6764 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
6765 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
6767 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH
;
6769 I915_WRITE(reg
, val
);
6772 dev_priv
->drrs
.refresh_rate_type
= index
;
6774 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate
);
6778 * intel_edp_drrs_enable - init drrs struct if supported
6779 * @intel_dp: DP struct
6780 * @crtc_state: A pointer to the active crtc state.
6782 * Initializes frontbuffer_bits and drrs.dp
6784 void intel_edp_drrs_enable(struct intel_dp
*intel_dp
,
6785 const struct intel_crtc_state
*crtc_state
)
6787 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6789 if (!crtc_state
->has_drrs
) {
6790 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6794 if (dev_priv
->psr
.enabled
) {
6795 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6799 mutex_lock(&dev_priv
->drrs
.mutex
);
6800 if (dev_priv
->drrs
.dp
) {
6801 DRM_DEBUG_KMS("DRRS already enabled\n");
6805 dev_priv
->drrs
.busy_frontbuffer_bits
= 0;
6807 dev_priv
->drrs
.dp
= intel_dp
;
6810 mutex_unlock(&dev_priv
->drrs
.mutex
);
6814 * intel_edp_drrs_disable - Disable DRRS
6815 * @intel_dp: DP struct
6816 * @old_crtc_state: Pointer to old crtc_state.
6819 void intel_edp_drrs_disable(struct intel_dp
*intel_dp
,
6820 const struct intel_crtc_state
*old_crtc_state
)
6822 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6824 if (!old_crtc_state
->has_drrs
)
6827 mutex_lock(&dev_priv
->drrs
.mutex
);
6828 if (!dev_priv
->drrs
.dp
) {
6829 mutex_unlock(&dev_priv
->drrs
.mutex
);
6833 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
6834 intel_dp_set_drrs_state(dev_priv
, old_crtc_state
,
6835 intel_dp
->attached_connector
->panel
.fixed_mode
->vrefresh
);
6837 dev_priv
->drrs
.dp
= NULL
;
6838 mutex_unlock(&dev_priv
->drrs
.mutex
);
6840 cancel_delayed_work_sync(&dev_priv
->drrs
.work
);
6843 static void intel_edp_drrs_downclock_work(struct work_struct
*work
)
6845 struct drm_i915_private
*dev_priv
=
6846 container_of(work
, typeof(*dev_priv
), drrs
.work
.work
);
6847 struct intel_dp
*intel_dp
;
6849 mutex_lock(&dev_priv
->drrs
.mutex
);
6851 intel_dp
= dev_priv
->drrs
.dp
;
6857 * The delayed work can race with an invalidate hence we need to
6861 if (dev_priv
->drrs
.busy_frontbuffer_bits
)
6864 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
) {
6865 struct drm_crtc
*crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
6867 intel_dp_set_drrs_state(dev_priv
, to_intel_crtc(crtc
)->config
,
6868 intel_dp
->attached_connector
->panel
.downclock_mode
->vrefresh
);
6872 mutex_unlock(&dev_priv
->drrs
.mutex
);
6876 * intel_edp_drrs_invalidate - Disable Idleness DRRS
6877 * @dev_priv: i915 device
6878 * @frontbuffer_bits: frontbuffer plane tracking bits
6880 * This function gets called everytime rendering on the given planes start.
6881 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6883 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6885 void intel_edp_drrs_invalidate(struct drm_i915_private
*dev_priv
,
6886 unsigned int frontbuffer_bits
)
6888 struct drm_crtc
*crtc
;
6891 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
6894 cancel_delayed_work(&dev_priv
->drrs
.work
);
6896 mutex_lock(&dev_priv
->drrs
.mutex
);
6897 if (!dev_priv
->drrs
.dp
) {
6898 mutex_unlock(&dev_priv
->drrs
.mutex
);
6902 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
6903 pipe
= to_intel_crtc(crtc
)->pipe
;
6905 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
6906 dev_priv
->drrs
.busy_frontbuffer_bits
|= frontbuffer_bits
;
6908 /* invalidate means busy screen hence upclock */
6909 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
6910 intel_dp_set_drrs_state(dev_priv
, to_intel_crtc(crtc
)->config
,
6911 dev_priv
->drrs
.dp
->attached_connector
->panel
.fixed_mode
->vrefresh
);
6913 mutex_unlock(&dev_priv
->drrs
.mutex
);
6917 * intel_edp_drrs_flush - Restart Idleness DRRS
6918 * @dev_priv: i915 device
6919 * @frontbuffer_bits: frontbuffer plane tracking bits
6921 * This function gets called every time rendering on the given planes has
6922 * completed or flip on a crtc is completed. So DRRS should be upclocked
6923 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6924 * if no other planes are dirty.
6926 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6928 void intel_edp_drrs_flush(struct drm_i915_private
*dev_priv
,
6929 unsigned int frontbuffer_bits
)
6931 struct drm_crtc
*crtc
;
6934 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
6937 cancel_delayed_work(&dev_priv
->drrs
.work
);
6939 mutex_lock(&dev_priv
->drrs
.mutex
);
6940 if (!dev_priv
->drrs
.dp
) {
6941 mutex_unlock(&dev_priv
->drrs
.mutex
);
6945 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
6946 pipe
= to_intel_crtc(crtc
)->pipe
;
6948 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
6949 dev_priv
->drrs
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
6951 /* flush means busy screen hence upclock */
6952 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
6953 intel_dp_set_drrs_state(dev_priv
, to_intel_crtc(crtc
)->config
,
6954 dev_priv
->drrs
.dp
->attached_connector
->panel
.fixed_mode
->vrefresh
);
6957 * flush also means no more activity hence schedule downclock, if all
6958 * other fbs are quiescent too
6960 if (!dev_priv
->drrs
.busy_frontbuffer_bits
)
6961 schedule_delayed_work(&dev_priv
->drrs
.work
,
6962 msecs_to_jiffies(1000));
6963 mutex_unlock(&dev_priv
->drrs
.mutex
);
6967 * DOC: Display Refresh Rate Switching (DRRS)
6969 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6970 * which enables swtching between low and high refresh rates,
6971 * dynamically, based on the usage scenario. This feature is applicable
6972 * for internal panels.
6974 * Indication that the panel supports DRRS is given by the panel EDID, which
6975 * would list multiple refresh rates for one resolution.
6977 * DRRS is of 2 types - static and seamless.
6978 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6979 * (may appear as a blink on screen) and is used in dock-undock scenario.
6980 * Seamless DRRS involves changing RR without any visual effect to the user
6981 * and can be used during normal system usage. This is done by programming
6982 * certain registers.
6984 * Support for static/seamless DRRS may be indicated in the VBT based on
6985 * inputs from the panel spec.
6987 * DRRS saves power by switching to low RR based on usage scenarios.
6989 * The implementation is based on frontbuffer tracking implementation. When
6990 * there is a disturbance on the screen triggered by user activity or a periodic
6991 * system activity, DRRS is disabled (RR is changed to high RR). When there is
6992 * no movement on screen, after a timeout of 1 second, a switch to low RR is
6995 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6996 * and intel_edp_drrs_flush() are called.
6998 * DRRS can be further extended to support other internal panels and also
6999 * the scenario of video playback wherein RR is set based on the rate
7000 * requested by userspace.
7004 * intel_dp_drrs_init - Init basic DRRS work and mutex.
7005 * @connector: eDP connector
7006 * @fixed_mode: preferred mode of panel
7008 * This function is called only once at driver load to initialize basic
7012 * Downclock mode if panel supports it, else return NULL.
7013 * DRRS support is determined by the presence of downclock mode (apart
7014 * from VBT setting).
7016 static struct drm_display_mode
*
7017 intel_dp_drrs_init(struct intel_connector
*connector
,
7018 struct drm_display_mode
*fixed_mode
)
7020 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
7021 struct drm_display_mode
*downclock_mode
= NULL
;
7023 INIT_DELAYED_WORK(&dev_priv
->drrs
.work
, intel_edp_drrs_downclock_work
);
7024 mutex_init(&dev_priv
->drrs
.mutex
);
7026 if (INTEL_GEN(dev_priv
) <= 6) {
7027 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7031 if (dev_priv
->vbt
.drrs_type
!= SEAMLESS_DRRS_SUPPORT
) {
7032 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
7036 downclock_mode
= intel_find_panel_downclock(dev_priv
, fixed_mode
,
7039 if (!downclock_mode
) {
7040 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
7044 dev_priv
->drrs
.type
= dev_priv
->vbt
.drrs_type
;
7046 dev_priv
->drrs
.refresh_rate_type
= DRRS_HIGH_RR
;
7047 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
7048 return downclock_mode
;
7051 static bool intel_edp_init_connector(struct intel_dp
*intel_dp
,
7052 struct intel_connector
*intel_connector
)
7054 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7055 struct drm_device
*dev
= &dev_priv
->drm
;
7056 struct drm_connector
*connector
= &intel_connector
->base
;
7057 struct drm_display_mode
*fixed_mode
= NULL
;
7058 struct drm_display_mode
*downclock_mode
= NULL
;
7060 enum pipe pipe
= INVALID_PIPE
;
7061 intel_wakeref_t wakeref
;
7064 if (!intel_dp_is_edp(intel_dp
))
7067 INIT_DELAYED_WORK(&intel_dp
->panel_vdd_work
, edp_panel_vdd_work
);
7070 * On IBX/CPT we may get here with LVDS already registered. Since the
7071 * driver uses the only internal power sequencer available for both
7072 * eDP and LVDS bail out early in this case to prevent interfering
7073 * with an already powered-on LVDS power sequencer.
7075 if (intel_get_lvds_encoder(dev_priv
)) {
7076 WARN_ON(!(HAS_PCH_IBX(dev_priv
) || HAS_PCH_CPT(dev_priv
)));
7077 DRM_INFO("LVDS was detected, not registering eDP\n");
7082 with_pps_lock(intel_dp
, wakeref
) {
7083 intel_dp_init_panel_power_timestamps(intel_dp
);
7084 intel_dp_pps_init(intel_dp
);
7085 intel_edp_panel_vdd_sanitize(intel_dp
);
7088 /* Cache DPCD and EDID for edp. */
7089 has_dpcd
= intel_edp_init_dpcd(intel_dp
);
7092 /* if this fails, presume the device is a ghost */
7093 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7097 mutex_lock(&dev
->mode_config
.mutex
);
7098 edid
= drm_get_edid(connector
, &intel_dp
->aux
.ddc
);
7100 if (drm_add_edid_modes(connector
, edid
)) {
7101 drm_connector_update_edid_property(connector
,
7105 edid
= ERR_PTR(-EINVAL
);
7108 edid
= ERR_PTR(-ENOENT
);
7110 intel_connector
->edid
= edid
;
7112 fixed_mode
= intel_panel_edid_fixed_mode(intel_connector
);
7114 downclock_mode
= intel_dp_drrs_init(intel_connector
, fixed_mode
);
7116 /* fallback to VBT if available for eDP */
7117 if (!fixed_mode
&& dev_priv
->vbt
.lfp_lvds_vbt_mode
) {
7118 fixed_mode
= drm_mode_duplicate(dev
,
7119 dev_priv
->vbt
.lfp_lvds_vbt_mode
);
7121 fixed_mode
->type
|= DRM_MODE_TYPE_PREFERRED
;
7122 connector
->display_info
.width_mm
= fixed_mode
->width_mm
;
7123 connector
->display_info
.height_mm
= fixed_mode
->height_mm
;
7126 mutex_unlock(&dev
->mode_config
.mutex
);
7128 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
7129 intel_dp
->edp_notifier
.notifier_call
= edp_notify_handler
;
7130 register_reboot_notifier(&intel_dp
->edp_notifier
);
7133 * Figure out the current pipe for the initial backlight setup.
7134 * If the current pipe isn't valid, try the PPS pipe, and if that
7135 * fails just assume pipe A.
7137 pipe
= vlv_active_pipe(intel_dp
);
7139 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
7140 pipe
= intel_dp
->pps_pipe
;
7142 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
7145 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7149 intel_panel_init(&intel_connector
->panel
, fixed_mode
, downclock_mode
);
7150 intel_connector
->panel
.backlight
.power
= intel_edp_backlight_power
;
7151 intel_panel_setup_backlight(connector
, pipe
);
7154 drm_connector_init_panel_orientation_property(
7155 connector
, fixed_mode
->hdisplay
, fixed_mode
->vdisplay
);
7160 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
7162 * vdd might still be enabled do to the delayed vdd off.
7163 * Make sure vdd is actually turned off here.
7165 with_pps_lock(intel_dp
, wakeref
)
7166 edp_panel_vdd_off_sync(intel_dp
);
7171 static void intel_dp_modeset_retry_work_fn(struct work_struct
*work
)
7173 struct intel_connector
*intel_connector
;
7174 struct drm_connector
*connector
;
7176 intel_connector
= container_of(work
, typeof(*intel_connector
),
7177 modeset_retry_work
);
7178 connector
= &intel_connector
->base
;
7179 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector
->base
.id
,
7182 /* Grab the locks before changing connector property*/
7183 mutex_lock(&connector
->dev
->mode_config
.mutex
);
7184 /* Set connector link status to BAD and send a Uevent to notify
7185 * userspace to do a modeset.
7187 drm_connector_set_link_status_property(connector
,
7188 DRM_MODE_LINK_STATUS_BAD
);
7189 mutex_unlock(&connector
->dev
->mode_config
.mutex
);
7190 /* Send Hotplug uevent so userspace can reprobe */
7191 drm_kms_helper_hotplug_event(connector
->dev
);
7195 intel_dp_init_connector(struct intel_digital_port
*intel_dig_port
,
7196 struct intel_connector
*intel_connector
)
7198 struct drm_connector
*connector
= &intel_connector
->base
;
7199 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
7200 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
7201 struct drm_device
*dev
= intel_encoder
->base
.dev
;
7202 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7203 enum port port
= intel_encoder
->port
;
7206 /* Initialize the work for modeset in case of link train failure */
7207 INIT_WORK(&intel_connector
->modeset_retry_work
,
7208 intel_dp_modeset_retry_work_fn
);
7210 if (WARN(intel_dig_port
->max_lanes
< 1,
7211 "Not enough lanes (%d) for DP on port %c\n",
7212 intel_dig_port
->max_lanes
, port_name(port
)))
7215 intel_dp_set_source_rates(intel_dp
);
7217 intel_dp
->reset_link_params
= true;
7218 intel_dp
->pps_pipe
= INVALID_PIPE
;
7219 intel_dp
->active_pipe
= INVALID_PIPE
;
7221 /* intel_dp vfuncs */
7222 if (HAS_DDI(dev_priv
))
7223 intel_dp
->prepare_link_retrain
= intel_ddi_prepare_link_retrain
;
7225 /* Preserve the current hw state. */
7226 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
);
7227 intel_dp
->attached_connector
= intel_connector
;
7229 if (intel_dp_is_port_edp(dev_priv
, port
))
7230 type
= DRM_MODE_CONNECTOR_eDP
;
7232 type
= DRM_MODE_CONNECTOR_DisplayPort
;
7234 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
7235 intel_dp
->active_pipe
= vlv_active_pipe(intel_dp
);
7238 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7239 * for DP the encoder type can be set by the caller to
7240 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7242 if (type
== DRM_MODE_CONNECTOR_eDP
)
7243 intel_encoder
->type
= INTEL_OUTPUT_EDP
;
7245 /* eDP only on port B and/or C on vlv/chv */
7246 if (WARN_ON((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
7247 intel_dp_is_edp(intel_dp
) &&
7248 port
!= PORT_B
&& port
!= PORT_C
))
7251 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7252 type
== DRM_MODE_CONNECTOR_eDP
? "eDP" : "DP",
7255 drm_connector_init(dev
, connector
, &intel_dp_connector_funcs
, type
);
7256 drm_connector_helper_add(connector
, &intel_dp_connector_helper_funcs
);
7258 if (!HAS_GMCH(dev_priv
))
7259 connector
->interlace_allowed
= true;
7260 connector
->doublescan_allowed
= 0;
7262 intel_encoder
->hpd_pin
= intel_hpd_pin_default(dev_priv
, port
);
7264 intel_dp_aux_init(intel_dp
);
7266 intel_connector_attach_encoder(intel_connector
, intel_encoder
);
7268 if (HAS_DDI(dev_priv
))
7269 intel_connector
->get_hw_state
= intel_ddi_connector_get_hw_state
;
7271 intel_connector
->get_hw_state
= intel_connector_get_hw_state
;
7273 /* init MST on ports that can support it */
7274 if (HAS_DP_MST(dev_priv
) && !intel_dp_is_edp(intel_dp
) &&
7275 (port
== PORT_B
|| port
== PORT_C
||
7276 port
== PORT_D
|| port
== PORT_F
))
7277 intel_dp_mst_encoder_init(intel_dig_port
,
7278 intel_connector
->base
.base
.id
);
7280 if (!intel_edp_init_connector(intel_dp
, intel_connector
)) {
7281 intel_dp_aux_fini(intel_dp
);
7282 intel_dp_mst_encoder_cleanup(intel_dig_port
);
7286 intel_dp_add_properties(intel_dp
, connector
);
7288 if (is_hdcp_supported(dev_priv
, port
) && !intel_dp_is_edp(intel_dp
)) {
7289 int ret
= intel_hdcp_init(intel_connector
, &intel_dp_hdcp_shim
);
7291 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7294 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7295 * 0xd. Failure to do so will result in spurious interrupts being
7296 * generated on the port when a cable is not attached.
7298 if (IS_G45(dev_priv
)) {
7299 u32 temp
= I915_READ(PEG_BAND_GAP_DATA
);
7300 I915_WRITE(PEG_BAND_GAP_DATA
, (temp
& ~0xf) | 0xd);
7306 drm_connector_cleanup(connector
);
7311 bool intel_dp_init(struct drm_i915_private
*dev_priv
,
7312 i915_reg_t output_reg
,
7315 struct intel_digital_port
*intel_dig_port
;
7316 struct intel_encoder
*intel_encoder
;
7317 struct drm_encoder
*encoder
;
7318 struct intel_connector
*intel_connector
;
7320 intel_dig_port
= kzalloc(sizeof(*intel_dig_port
), GFP_KERNEL
);
7321 if (!intel_dig_port
)
7324 intel_connector
= intel_connector_alloc();
7325 if (!intel_connector
)
7326 goto err_connector_alloc
;
7328 intel_encoder
= &intel_dig_port
->base
;
7329 encoder
= &intel_encoder
->base
;
7331 if (drm_encoder_init(&dev_priv
->drm
, &intel_encoder
->base
,
7332 &intel_dp_enc_funcs
, DRM_MODE_ENCODER_TMDS
,
7333 "DP %c", port_name(port
)))
7334 goto err_encoder_init
;
7336 intel_encoder
->hotplug
= intel_dp_hotplug
;
7337 intel_encoder
->compute_config
= intel_dp_compute_config
;
7338 intel_encoder
->get_hw_state
= intel_dp_get_hw_state
;
7339 intel_encoder
->get_config
= intel_dp_get_config
;
7340 intel_encoder
->update_pipe
= intel_panel_update_backlight
;
7341 intel_encoder
->suspend
= intel_dp_encoder_suspend
;
7342 if (IS_CHERRYVIEW(dev_priv
)) {
7343 intel_encoder
->pre_pll_enable
= chv_dp_pre_pll_enable
;
7344 intel_encoder
->pre_enable
= chv_pre_enable_dp
;
7345 intel_encoder
->enable
= vlv_enable_dp
;
7346 intel_encoder
->disable
= vlv_disable_dp
;
7347 intel_encoder
->post_disable
= chv_post_disable_dp
;
7348 intel_encoder
->post_pll_disable
= chv_dp_post_pll_disable
;
7349 } else if (IS_VALLEYVIEW(dev_priv
)) {
7350 intel_encoder
->pre_pll_enable
= vlv_dp_pre_pll_enable
;
7351 intel_encoder
->pre_enable
= vlv_pre_enable_dp
;
7352 intel_encoder
->enable
= vlv_enable_dp
;
7353 intel_encoder
->disable
= vlv_disable_dp
;
7354 intel_encoder
->post_disable
= vlv_post_disable_dp
;
7356 intel_encoder
->pre_enable
= g4x_pre_enable_dp
;
7357 intel_encoder
->enable
= g4x_enable_dp
;
7358 intel_encoder
->disable
= g4x_disable_dp
;
7359 intel_encoder
->post_disable
= g4x_post_disable_dp
;
7362 intel_dig_port
->dp
.output_reg
= output_reg
;
7363 intel_dig_port
->max_lanes
= 4;
7365 intel_encoder
->type
= INTEL_OUTPUT_DP
;
7366 intel_encoder
->power_domain
= intel_port_to_power_domain(port
);
7367 if (IS_CHERRYVIEW(dev_priv
)) {
7369 intel_encoder
->crtc_mask
= 1 << 2;
7371 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1);
7373 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1) | (1 << 2);
7375 intel_encoder
->cloneable
= 0;
7376 intel_encoder
->port
= port
;
7378 intel_dig_port
->hpd_pulse
= intel_dp_hpd_pulse
;
7381 intel_infoframe_init(intel_dig_port
);
7383 intel_dig_port
->aux_ch
= intel_bios_port_aux_ch(dev_priv
, port
);
7384 if (!intel_dp_init_connector(intel_dig_port
, intel_connector
))
7385 goto err_init_connector
;
7390 drm_encoder_cleanup(encoder
);
7392 kfree(intel_connector
);
7393 err_connector_alloc
:
7394 kfree(intel_dig_port
);
7398 void intel_dp_mst_suspend(struct drm_i915_private
*dev_priv
)
7400 struct intel_encoder
*encoder
;
7402 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
7403 struct intel_dp
*intel_dp
;
7405 if (encoder
->type
!= INTEL_OUTPUT_DDI
)
7408 intel_dp
= enc_to_intel_dp(&encoder
->base
);
7410 if (!intel_dp
->can_mst
)
7413 if (intel_dp
->is_mst
)
7414 drm_dp_mst_topology_mgr_suspend(&intel_dp
->mst_mgr
);
7418 void intel_dp_mst_resume(struct drm_i915_private
*dev_priv
)
7420 struct intel_encoder
*encoder
;
7422 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
7423 struct intel_dp
*intel_dp
;
7426 if (encoder
->type
!= INTEL_OUTPUT_DDI
)
7429 intel_dp
= enc_to_intel_dp(&encoder
->base
);
7431 if (!intel_dp
->can_mst
)
7434 ret
= drm_dp_mst_topology_mgr_resume(&intel_dp
->mst_mgr
);
7436 intel_dp
->is_mst
= false;
7437 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
,