2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll
[] = {
57 { .p1
= 2, .p2
= 10, .n
= 2, .m1
= 23, .m2
= 8 } },
59 { .p1
= 1, .p2
= 10, .n
= 1, .m1
= 14, .m2
= 2 } }
62 static const struct dp_link_dpll pch_dpll
[] = {
64 { .p1
= 2, .p2
= 10, .n
= 1, .m1
= 12, .m2
= 9 } },
66 { .p1
= 1, .p2
= 10, .n
= 2, .m1
= 14, .m2
= 8 } }
69 static const struct dp_link_dpll vlv_dpll
[] = {
71 { .p1
= 3, .p2
= 2, .n
= 5, .m1
= 3, .m2
= 81 } },
73 { .p1
= 2, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll
[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1
= 4, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1
= 4, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1
= 2, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } }
94 static const int bxt_rates
[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates
[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates
[] = { 162000, 270000, 540000 };
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
107 static bool is_edp(struct intel_dp
*intel_dp
)
109 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
111 return intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
;
114 static struct drm_device
*intel_dp_to_dev(struct intel_dp
*intel_dp
)
116 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
118 return intel_dig_port
->base
.base
.dev
;
121 static struct intel_dp
*intel_attached_dp(struct drm_connector
*connector
)
123 return enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
126 static void intel_dp_link_down(struct intel_dp
*intel_dp
);
127 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
);
128 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
);
129 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
);
130 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
134 intel_dp_max_link_bw(struct intel_dp
*intel_dp
)
136 int max_link_bw
= intel_dp
->dpcd
[DP_MAX_LINK_RATE
];
138 switch (max_link_bw
) {
139 case DP_LINK_BW_1_62
:
144 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 max_link_bw
= DP_LINK_BW_1_62
;
152 static u8
intel_dp_max_lane_count(struct intel_dp
*intel_dp
)
154 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
155 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
156 u8 source_max
, sink_max
;
159 if (HAS_DDI(dev
) && intel_dig_port
->port
== PORT_A
&&
160 (intel_dig_port
->saved_port_bits
& DDI_A_4_LANES
) == 0)
163 sink_max
= drm_dp_max_lane_count(intel_dp
->dpcd
);
165 return min(source_max
, sink_max
);
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 * 270000 * 1 * 8 / 10 == 216000
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
186 intel_dp_link_required(int pixel_clock
, int bpp
)
188 return (pixel_clock
* bpp
+ 9) / 10;
192 intel_dp_max_data_rate(int max_link_clock
, int max_lanes
)
194 return (max_link_clock
* max_lanes
* 8) / 10;
197 static enum drm_mode_status
198 intel_dp_mode_valid(struct drm_connector
*connector
,
199 struct drm_display_mode
*mode
)
201 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
202 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
203 struct drm_display_mode
*fixed_mode
= intel_connector
->panel
.fixed_mode
;
204 int target_clock
= mode
->clock
;
205 int max_rate
, mode_rate
, max_lanes
, max_link_clock
;
207 if (is_edp(intel_dp
) && fixed_mode
) {
208 if (mode
->hdisplay
> fixed_mode
->hdisplay
)
211 if (mode
->vdisplay
> fixed_mode
->vdisplay
)
214 target_clock
= fixed_mode
->clock
;
217 max_link_clock
= intel_dp_max_link_rate(intel_dp
);
218 max_lanes
= intel_dp_max_lane_count(intel_dp
);
220 max_rate
= intel_dp_max_data_rate(max_link_clock
, max_lanes
);
221 mode_rate
= intel_dp_link_required(target_clock
, 18);
223 if (mode_rate
> max_rate
)
224 return MODE_CLOCK_HIGH
;
226 if (mode
->clock
< 10000)
227 return MODE_CLOCK_LOW
;
229 if (mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
230 return MODE_H_ILLEGAL
;
235 uint32_t intel_dp_pack_aux(const uint8_t *src
, int src_bytes
)
242 for (i
= 0; i
< src_bytes
; i
++)
243 v
|= ((uint32_t) src
[i
]) << ((3-i
) * 8);
247 static void intel_dp_unpack_aux(uint32_t src
, uint8_t *dst
, int dst_bytes
)
252 for (i
= 0; i
< dst_bytes
; i
++)
253 dst
[i
] = src
>> ((3-i
) * 8);
256 /* hrawclock is 1/4 the FSB frequency */
258 intel_hrawclk(struct drm_device
*dev
)
260 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
263 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
264 if (IS_VALLEYVIEW(dev
))
267 clkcfg
= I915_READ(CLKCFG
);
268 switch (clkcfg
& CLKCFG_FSB_MASK
) {
277 case CLKCFG_FSB_1067
:
279 case CLKCFG_FSB_1333
:
281 /* these two are just a guess; one of them might be right */
282 case CLKCFG_FSB_1600
:
283 case CLKCFG_FSB_1600_ALT
:
291 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
292 struct intel_dp
*intel_dp
);
294 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
295 struct intel_dp
*intel_dp
);
297 static void pps_lock(struct intel_dp
*intel_dp
)
299 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
300 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
301 struct drm_device
*dev
= encoder
->base
.dev
;
302 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
303 enum intel_display_power_domain power_domain
;
306 * See vlv_power_sequencer_reset() why we need
307 * a power domain reference here.
309 power_domain
= intel_display_port_power_domain(encoder
);
310 intel_display_power_get(dev_priv
, power_domain
);
312 mutex_lock(&dev_priv
->pps_mutex
);
315 static void pps_unlock(struct intel_dp
*intel_dp
)
317 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
318 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
319 struct drm_device
*dev
= encoder
->base
.dev
;
320 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
321 enum intel_display_power_domain power_domain
;
323 mutex_unlock(&dev_priv
->pps_mutex
);
325 power_domain
= intel_display_port_power_domain(encoder
);
326 intel_display_power_put(dev_priv
, power_domain
);
330 vlv_power_sequencer_kick(struct intel_dp
*intel_dp
)
332 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
333 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
335 enum pipe pipe
= intel_dp
->pps_pipe
;
339 if (WARN(I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
,
340 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
341 pipe_name(pipe
), port_name(intel_dig_port
->port
)))
344 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
345 pipe_name(pipe
), port_name(intel_dig_port
->port
));
347 /* Preserve the BIOS-computed detected bit. This is
348 * supposed to be read-only.
350 DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
351 DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
352 DP
|= DP_PORT_WIDTH(1);
353 DP
|= DP_LINK_TRAIN_PAT_1
;
355 if (IS_CHERRYVIEW(dev
))
356 DP
|= DP_PIPE_SELECT_CHV(pipe
);
357 else if (pipe
== PIPE_B
)
358 DP
|= DP_PIPEB_SELECT
;
360 pll_enabled
= I915_READ(DPLL(pipe
)) & DPLL_VCO_ENABLE
;
363 * The DPLL for the pipe must be enabled for this to work.
364 * So enable temporarily it if it's not already enabled.
367 vlv_force_pll_on(dev
, pipe
, IS_CHERRYVIEW(dev
) ?
368 &chv_dpll
[0].dpll
: &vlv_dpll
[0].dpll
);
371 * Similar magic as in intel_dp_enable_port().
372 * We _must_ do this port enable + disable trick
373 * to make this power seqeuencer lock onto the port.
374 * Otherwise even VDD force bit won't work.
376 I915_WRITE(intel_dp
->output_reg
, DP
);
377 POSTING_READ(intel_dp
->output_reg
);
379 I915_WRITE(intel_dp
->output_reg
, DP
| DP_PORT_EN
);
380 POSTING_READ(intel_dp
->output_reg
);
382 I915_WRITE(intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
383 POSTING_READ(intel_dp
->output_reg
);
386 vlv_force_pll_off(dev
, pipe
);
390 vlv_power_sequencer_pipe(struct intel_dp
*intel_dp
)
392 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
393 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
394 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
395 struct intel_encoder
*encoder
;
396 unsigned int pipes
= (1 << PIPE_A
) | (1 << PIPE_B
);
399 lockdep_assert_held(&dev_priv
->pps_mutex
);
401 /* We should never land here with regular DP ports */
402 WARN_ON(!is_edp(intel_dp
));
404 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
405 return intel_dp
->pps_pipe
;
408 * We don't have power sequencer currently.
409 * Pick one that's not used by other ports.
411 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
413 struct intel_dp
*tmp
;
415 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
418 tmp
= enc_to_intel_dp(&encoder
->base
);
420 if (tmp
->pps_pipe
!= INVALID_PIPE
)
421 pipes
&= ~(1 << tmp
->pps_pipe
);
425 * Didn't find one. This should not happen since there
426 * are two power sequencers and up to two eDP ports.
428 if (WARN_ON(pipes
== 0))
431 pipe
= ffs(pipes
) - 1;
433 vlv_steal_power_sequencer(dev
, pipe
);
434 intel_dp
->pps_pipe
= pipe
;
436 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
437 pipe_name(intel_dp
->pps_pipe
),
438 port_name(intel_dig_port
->port
));
440 /* init power sequencer on this pipe and port */
441 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
442 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
445 * Even vdd force doesn't work until we've made
446 * the power sequencer lock in on the port.
448 vlv_power_sequencer_kick(intel_dp
);
450 return intel_dp
->pps_pipe
;
453 typedef bool (*vlv_pipe_check
)(struct drm_i915_private
*dev_priv
,
456 static bool vlv_pipe_has_pp_on(struct drm_i915_private
*dev_priv
,
459 return I915_READ(VLV_PIPE_PP_STATUS(pipe
)) & PP_ON
;
462 static bool vlv_pipe_has_vdd_on(struct drm_i915_private
*dev_priv
,
465 return I915_READ(VLV_PIPE_PP_CONTROL(pipe
)) & EDP_FORCE_VDD
;
468 static bool vlv_pipe_any(struct drm_i915_private
*dev_priv
,
475 vlv_initial_pps_pipe(struct drm_i915_private
*dev_priv
,
477 vlv_pipe_check pipe_check
)
481 for (pipe
= PIPE_A
; pipe
<= PIPE_B
; pipe
++) {
482 u32 port_sel
= I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe
)) &
483 PANEL_PORT_SELECT_MASK
;
485 if (port_sel
!= PANEL_PORT_SELECT_VLV(port
))
488 if (!pipe_check(dev_priv
, pipe
))
498 vlv_initial_power_sequencer_setup(struct intel_dp
*intel_dp
)
500 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
501 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
502 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
503 enum port port
= intel_dig_port
->port
;
505 lockdep_assert_held(&dev_priv
->pps_mutex
);
507 /* try to find a pipe with this port selected */
508 /* first pick one where the panel is on */
509 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
511 /* didn't find one? pick one where vdd is on */
512 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
513 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
514 vlv_pipe_has_vdd_on
);
515 /* didn't find one? pick one with just the correct port */
516 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
517 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
520 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
521 if (intel_dp
->pps_pipe
== INVALID_PIPE
) {
522 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
527 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
528 port_name(port
), pipe_name(intel_dp
->pps_pipe
));
530 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
531 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
534 void vlv_power_sequencer_reset(struct drm_i915_private
*dev_priv
)
536 struct drm_device
*dev
= dev_priv
->dev
;
537 struct intel_encoder
*encoder
;
539 if (WARN_ON(!IS_VALLEYVIEW(dev
)))
543 * We can't grab pps_mutex here due to deadlock with power_domain
544 * mutex when power_domain functions are called while holding pps_mutex.
545 * That also means that in order to use pps_pipe the code needs to
546 * hold both a power domain reference and pps_mutex, and the power domain
547 * reference get/put must be done while _not_ holding pps_mutex.
548 * pps_{lock,unlock}() do these steps in the correct order, so one
549 * should use them always.
552 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
553 struct intel_dp
*intel_dp
;
555 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
558 intel_dp
= enc_to_intel_dp(&encoder
->base
);
559 intel_dp
->pps_pipe
= INVALID_PIPE
;
563 static u32
_pp_ctrl_reg(struct intel_dp
*intel_dp
)
565 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
568 return BXT_PP_CONTROL(0);
569 else if (HAS_PCH_SPLIT(dev
))
570 return PCH_PP_CONTROL
;
572 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp
));
575 static u32
_pp_stat_reg(struct intel_dp
*intel_dp
)
577 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
580 return BXT_PP_STATUS(0);
581 else if (HAS_PCH_SPLIT(dev
))
582 return PCH_PP_STATUS
;
584 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp
));
587 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
588 This function only applicable when panel PM state is not to be tracked */
589 static int edp_notify_handler(struct notifier_block
*this, unsigned long code
,
592 struct intel_dp
*intel_dp
= container_of(this, typeof(* intel_dp
),
594 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
595 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
597 u32 pp_ctrl_reg
, pp_div_reg
;
599 if (!is_edp(intel_dp
) || code
!= SYS_RESTART
)
604 if (IS_VALLEYVIEW(dev
)) {
605 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
607 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
608 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
609 pp_div
= I915_READ(pp_div_reg
);
610 pp_div
&= PP_REFERENCE_DIVIDER_MASK
;
612 /* 0x1F write to PP_DIV_REG sets max cycle delay */
613 I915_WRITE(pp_div_reg
, pp_div
| 0x1F);
614 I915_WRITE(pp_ctrl_reg
, PANEL_UNLOCK_REGS
| PANEL_POWER_OFF
);
615 msleep(intel_dp
->panel_power_cycle_delay
);
618 pps_unlock(intel_dp
);
623 static bool edp_have_panel_power(struct intel_dp
*intel_dp
)
625 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
626 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
628 lockdep_assert_held(&dev_priv
->pps_mutex
);
630 if (IS_VALLEYVIEW(dev
) &&
631 intel_dp
->pps_pipe
== INVALID_PIPE
)
634 return (I915_READ(_pp_stat_reg(intel_dp
)) & PP_ON
) != 0;
637 static bool edp_have_panel_vdd(struct intel_dp
*intel_dp
)
639 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
640 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
642 lockdep_assert_held(&dev_priv
->pps_mutex
);
644 if (IS_VALLEYVIEW(dev
) &&
645 intel_dp
->pps_pipe
== INVALID_PIPE
)
648 return I915_READ(_pp_ctrl_reg(intel_dp
)) & EDP_FORCE_VDD
;
652 intel_dp_check_edp(struct intel_dp
*intel_dp
)
654 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
655 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
657 if (!is_edp(intel_dp
))
660 if (!edp_have_panel_power(intel_dp
) && !edp_have_panel_vdd(intel_dp
)) {
661 WARN(1, "eDP powered off while attempting aux channel communication.\n");
662 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
663 I915_READ(_pp_stat_reg(intel_dp
)),
664 I915_READ(_pp_ctrl_reg(intel_dp
)));
669 intel_dp_aux_wait_done(struct intel_dp
*intel_dp
, bool has_aux_irq
)
671 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
672 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
673 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
674 uint32_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
678 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
680 done
= wait_event_timeout(dev_priv
->gmbus_wait_queue
, C
,
681 msecs_to_jiffies_timeout(10));
683 done
= wait_for_atomic(C
, 10) == 0;
685 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
692 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
694 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
695 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
698 * The clock divider is based off the hrawclk, and would like to run at
699 * 2MHz. So, take the hrawclk value and divide by 2 and use that
701 return index
? 0 : intel_hrawclk(dev
) / 2;
704 static uint32_t ilk_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
706 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
707 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
708 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
713 if (intel_dig_port
->port
== PORT_A
) {
714 return DIV_ROUND_UP(dev_priv
->cdclk_freq
, 2000);
717 return DIV_ROUND_UP(intel_pch_rawclk(dev
), 2);
721 static uint32_t hsw_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
723 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
724 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
725 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
727 if (intel_dig_port
->port
== PORT_A
) {
730 return DIV_ROUND_CLOSEST(dev_priv
->cdclk_freq
, 2000);
731 } else if (dev_priv
->pch_id
== INTEL_PCH_LPT_DEVICE_ID_TYPE
) {
732 /* Workaround for non-ULT HSW */
739 return index
? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev
), 2);
743 static uint32_t vlv_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
745 return index
? 0 : 100;
748 static uint32_t skl_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
755 return index
? 0 : 1;
758 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp
*intel_dp
,
761 uint32_t aux_clock_divider
)
763 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
764 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
765 uint32_t precharge
, timeout
;
772 if (IS_BROADWELL(dev
) && intel_dp
->aux_ch_ctl_reg
== DPA_AUX_CH_CTL
)
773 timeout
= DP_AUX_CH_CTL_TIME_OUT_600us
;
775 timeout
= DP_AUX_CH_CTL_TIME_OUT_400us
;
777 return DP_AUX_CH_CTL_SEND_BUSY
|
779 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
780 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
782 DP_AUX_CH_CTL_RECEIVE_ERROR
|
783 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
784 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
785 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
);
788 static uint32_t skl_get_aux_send_ctl(struct intel_dp
*intel_dp
,
793 return DP_AUX_CH_CTL_SEND_BUSY
|
795 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
797 DP_AUX_CH_CTL_TIME_OUT_1600us
|
798 DP_AUX_CH_CTL_RECEIVE_ERROR
|
799 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804 intel_dp_aux_ch(struct intel_dp
*intel_dp
,
805 const uint8_t *send
, int send_bytes
,
806 uint8_t *recv
, int recv_size
)
808 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
809 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
810 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
811 uint32_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
812 uint32_t ch_data
= ch_ctl
+ 4;
813 uint32_t aux_clock_divider
;
814 int i
, ret
, recv_bytes
;
817 bool has_aux_irq
= HAS_AUX_IRQ(dev
);
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
828 vdd
= edp_panel_vdd_on(intel_dp
);
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
834 pm_qos_update_request(&dev_priv
->pm_qos
, 0);
836 intel_dp_check_edp(intel_dp
);
838 intel_aux_display_runtime_get(dev_priv
);
840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
842 status
= I915_READ_NOTRACE(ch_ctl
);
843 if ((status
& DP_AUX_CH_CTL_SEND_BUSY
) == 0)
849 static u32 last_status
= -1;
850 const u32 status
= I915_READ(ch_ctl
);
852 if (status
!= last_status
) {
853 WARN(1, "dp_aux_ch not started status 0x%08x\n",
855 last_status
= status
;
862 /* Only 5 data registers! */
863 if (WARN_ON(send_bytes
> 20 || recv_size
> 20)) {
868 while ((aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, clock
++))) {
869 u32 send_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
,
874 /* Must try at least 3 times according to DP spec */
875 for (try = 0; try < 5; try++) {
876 /* Load the send data into the aux channel data registers */
877 for (i
= 0; i
< send_bytes
; i
+= 4)
878 I915_WRITE(ch_data
+ i
,
879 intel_dp_pack_aux(send
+ i
,
882 /* Send the command and wait for it to complete */
883 I915_WRITE(ch_ctl
, send_ctl
);
885 status
= intel_dp_aux_wait_done(intel_dp
, has_aux_irq
);
887 /* Clear done status and any errors */
891 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
892 DP_AUX_CH_CTL_RECEIVE_ERROR
);
894 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
)
897 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
898 * 400us delay required for errors and timeouts
899 * Timeout errors from the HW already meet this
900 * requirement so skip to next iteration
902 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
903 usleep_range(400, 500);
906 if (status
& DP_AUX_CH_CTL_DONE
)
911 if ((status
& DP_AUX_CH_CTL_DONE
) == 0) {
912 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status
);
918 /* Check for timeout or receive error.
919 * Timeouts occur when the sink is not connected
921 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
922 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status
);
927 /* Timeouts occur when the device isn't connected, so they're
928 * "normal" -- don't fill the kernel log with these */
929 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
) {
930 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status
);
935 /* Unload any bytes sent back from the other side */
936 recv_bytes
= ((status
& DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
) >>
937 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
938 if (recv_bytes
> recv_size
)
939 recv_bytes
= recv_size
;
941 for (i
= 0; i
< recv_bytes
; i
+= 4)
942 intel_dp_unpack_aux(I915_READ(ch_data
+ i
),
943 recv
+ i
, recv_bytes
- i
);
947 pm_qos_update_request(&dev_priv
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
948 intel_aux_display_runtime_put(dev_priv
);
951 edp_panel_vdd_off(intel_dp
, false);
953 pps_unlock(intel_dp
);
958 #define BARE_ADDRESS_SIZE 3
959 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
961 intel_dp_aux_transfer(struct drm_dp_aux
*aux
, struct drm_dp_aux_msg
*msg
)
963 struct intel_dp
*intel_dp
= container_of(aux
, struct intel_dp
, aux
);
964 uint8_t txbuf
[20], rxbuf
[20];
965 size_t txsize
, rxsize
;
968 txbuf
[0] = (msg
->request
<< 4) |
969 ((msg
->address
>> 16) & 0xf);
970 txbuf
[1] = (msg
->address
>> 8) & 0xff;
971 txbuf
[2] = msg
->address
& 0xff;
972 txbuf
[3] = msg
->size
- 1;
974 switch (msg
->request
& ~DP_AUX_I2C_MOT
) {
975 case DP_AUX_NATIVE_WRITE
:
976 case DP_AUX_I2C_WRITE
:
977 txsize
= msg
->size
? HEADER_SIZE
+ msg
->size
: BARE_ADDRESS_SIZE
;
978 rxsize
= 2; /* 0 or 1 data bytes */
980 if (WARN_ON(txsize
> 20))
983 memcpy(txbuf
+ HEADER_SIZE
, msg
->buffer
, msg
->size
);
985 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
987 msg
->reply
= rxbuf
[0] >> 4;
990 /* Number of bytes written in a short write. */
991 ret
= clamp_t(int, rxbuf
[1], 0, msg
->size
);
993 /* Return payload size. */
999 case DP_AUX_NATIVE_READ
:
1000 case DP_AUX_I2C_READ
:
1001 txsize
= msg
->size
? HEADER_SIZE
: BARE_ADDRESS_SIZE
;
1002 rxsize
= msg
->size
+ 1;
1004 if (WARN_ON(rxsize
> 20))
1007 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
1009 msg
->reply
= rxbuf
[0] >> 4;
1011 * Assume happy day, and copy the data. The caller is
1012 * expected to check msg->reply before touching it.
1014 * Return payload size.
1017 memcpy(msg
->buffer
, rxbuf
+ 1, ret
);
1030 intel_dp_aux_init(struct intel_dp
*intel_dp
, struct intel_connector
*connector
)
1032 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1033 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1034 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1035 enum port port
= intel_dig_port
->port
;
1036 struct ddi_vbt_port_info
*info
= &dev_priv
->vbt
.ddi_port_info
[port
];
1037 const char *name
= NULL
;
1038 uint32_t porte_aux_ctl_reg
= DPA_AUX_CH_CTL
;
1041 /* On SKL we don't have Aux for port E so we rely on VBT to set
1042 * a proper alternate aux channel.
1044 if (IS_SKYLAKE(dev
) && port
== PORT_E
) {
1045 switch (info
->alternate_aux_channel
) {
1047 porte_aux_ctl_reg
= DPB_AUX_CH_CTL
;
1050 porte_aux_ctl_reg
= DPC_AUX_CH_CTL
;
1053 porte_aux_ctl_reg
= DPD_AUX_CH_CTL
;
1057 porte_aux_ctl_reg
= DPA_AUX_CH_CTL
;
1063 intel_dp
->aux_ch_ctl_reg
= DPA_AUX_CH_CTL
;
1067 intel_dp
->aux_ch_ctl_reg
= PCH_DPB_AUX_CH_CTL
;
1071 intel_dp
->aux_ch_ctl_reg
= PCH_DPC_AUX_CH_CTL
;
1075 intel_dp
->aux_ch_ctl_reg
= PCH_DPD_AUX_CH_CTL
;
1079 intel_dp
->aux_ch_ctl_reg
= porte_aux_ctl_reg
;
1087 * The AUX_CTL register is usually DP_CTL + 0x10.
1089 * On Haswell and Broadwell though:
1090 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1091 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1093 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1095 if (!IS_HASWELL(dev
) && !IS_BROADWELL(dev
) && port
!= PORT_E
)
1096 intel_dp
->aux_ch_ctl_reg
= intel_dp
->output_reg
+ 0x10;
1098 intel_dp
->aux
.name
= name
;
1099 intel_dp
->aux
.dev
= dev
->dev
;
1100 intel_dp
->aux
.transfer
= intel_dp_aux_transfer
;
1102 DRM_DEBUG_KMS("registering %s bus for %s\n", name
,
1103 connector
->base
.kdev
->kobj
.name
);
1105 ret
= drm_dp_aux_register(&intel_dp
->aux
);
1107 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1112 ret
= sysfs_create_link(&connector
->base
.kdev
->kobj
,
1113 &intel_dp
->aux
.ddc
.dev
.kobj
,
1114 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1116 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name
, ret
);
1117 drm_dp_aux_unregister(&intel_dp
->aux
);
1122 intel_dp_connector_unregister(struct intel_connector
*intel_connector
)
1124 struct intel_dp
*intel_dp
= intel_attached_dp(&intel_connector
->base
);
1126 if (!intel_connector
->mst_port
)
1127 sysfs_remove_link(&intel_connector
->base
.kdev
->kobj
,
1128 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1129 intel_connector_unregister(intel_connector
);
1133 skl_edp_set_pll_config(struct intel_crtc_state
*pipe_config
)
1137 memset(&pipe_config
->dpll_hw_state
, 0,
1138 sizeof(pipe_config
->dpll_hw_state
));
1140 pipe_config
->ddi_pll_sel
= SKL_DPLL0
;
1141 pipe_config
->dpll_hw_state
.cfgcr1
= 0;
1142 pipe_config
->dpll_hw_state
.cfgcr2
= 0;
1144 ctrl1
= DPLL_CTRL1_OVERRIDE(SKL_DPLL0
);
1145 switch (pipe_config
->port_clock
/ 2) {
1147 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810
,
1151 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350
,
1155 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700
,
1159 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620
,
1162 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1163 results in CDCLK change. Need to handle the change of CDCLK by
1164 disabling pipes and re-enabling them */
1166 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080
,
1170 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160
,
1175 pipe_config
->dpll_hw_state
.ctrl1
= ctrl1
;
1179 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state
*pipe_config
)
1181 memset(&pipe_config
->dpll_hw_state
, 0,
1182 sizeof(pipe_config
->dpll_hw_state
));
1184 switch (pipe_config
->port_clock
/ 2) {
1186 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_810
;
1189 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_1350
;
1192 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_2700
;
1198 intel_dp_sink_rates(struct intel_dp
*intel_dp
, const int **sink_rates
)
1200 if (intel_dp
->num_sink_rates
) {
1201 *sink_rates
= intel_dp
->sink_rates
;
1202 return intel_dp
->num_sink_rates
;
1205 *sink_rates
= default_rates
;
1207 return (intel_dp_max_link_bw(intel_dp
) >> 3) + 1;
1210 static bool intel_dp_source_supports_hbr2(struct drm_device
*dev
)
1212 /* WaDisableHBR2:skl */
1213 if (IS_SKYLAKE(dev
) && INTEL_REVID(dev
) <= SKL_REVID_B0
)
1216 if ((IS_HASWELL(dev
) && !IS_HSW_ULX(dev
)) || IS_BROADWELL(dev
) ||
1217 (INTEL_INFO(dev
)->gen
>= 9))
1224 intel_dp_source_rates(struct drm_device
*dev
, const int **source_rates
)
1228 if (IS_BROXTON(dev
)) {
1229 *source_rates
= bxt_rates
;
1230 size
= ARRAY_SIZE(bxt_rates
);
1231 } else if (IS_SKYLAKE(dev
)) {
1232 *source_rates
= skl_rates
;
1233 size
= ARRAY_SIZE(skl_rates
);
1235 *source_rates
= default_rates
;
1236 size
= ARRAY_SIZE(default_rates
);
1239 /* This depends on the fact that 5.4 is last value in the array */
1240 if (!intel_dp_source_supports_hbr2(dev
))
1247 intel_dp_set_clock(struct intel_encoder
*encoder
,
1248 struct intel_crtc_state
*pipe_config
)
1250 struct drm_device
*dev
= encoder
->base
.dev
;
1251 const struct dp_link_dpll
*divisor
= NULL
;
1255 divisor
= gen4_dpll
;
1256 count
= ARRAY_SIZE(gen4_dpll
);
1257 } else if (HAS_PCH_SPLIT(dev
)) {
1259 count
= ARRAY_SIZE(pch_dpll
);
1260 } else if (IS_CHERRYVIEW(dev
)) {
1262 count
= ARRAY_SIZE(chv_dpll
);
1263 } else if (IS_VALLEYVIEW(dev
)) {
1265 count
= ARRAY_SIZE(vlv_dpll
);
1268 if (divisor
&& count
) {
1269 for (i
= 0; i
< count
; i
++) {
1270 if (pipe_config
->port_clock
== divisor
[i
].clock
) {
1271 pipe_config
->dpll
= divisor
[i
].dpll
;
1272 pipe_config
->clock_set
= true;
1279 static int intersect_rates(const int *source_rates
, int source_len
,
1280 const int *sink_rates
, int sink_len
,
1283 int i
= 0, j
= 0, k
= 0;
1285 while (i
< source_len
&& j
< sink_len
) {
1286 if (source_rates
[i
] == sink_rates
[j
]) {
1287 if (WARN_ON(k
>= DP_MAX_SUPPORTED_RATES
))
1289 common_rates
[k
] = source_rates
[i
];
1293 } else if (source_rates
[i
] < sink_rates
[j
]) {
1302 static int intel_dp_common_rates(struct intel_dp
*intel_dp
,
1305 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1306 const int *source_rates
, *sink_rates
;
1307 int source_len
, sink_len
;
1309 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1310 source_len
= intel_dp_source_rates(dev
, &source_rates
);
1312 return intersect_rates(source_rates
, source_len
,
1313 sink_rates
, sink_len
,
1317 static void snprintf_int_array(char *str
, size_t len
,
1318 const int *array
, int nelem
)
1324 for (i
= 0; i
< nelem
; i
++) {
1325 int r
= snprintf(str
, len
, "%s%d", i
? ", " : "", array
[i
]);
1333 static void intel_dp_print_rates(struct intel_dp
*intel_dp
)
1335 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1336 const int *source_rates
, *sink_rates
;
1337 int source_len
, sink_len
, common_len
;
1338 int common_rates
[DP_MAX_SUPPORTED_RATES
];
1339 char str
[128]; /* FIXME: too big for stack? */
1341 if ((drm_debug
& DRM_UT_KMS
) == 0)
1344 source_len
= intel_dp_source_rates(dev
, &source_rates
);
1345 snprintf_int_array(str
, sizeof(str
), source_rates
, source_len
);
1346 DRM_DEBUG_KMS("source rates: %s\n", str
);
1348 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1349 snprintf_int_array(str
, sizeof(str
), sink_rates
, sink_len
);
1350 DRM_DEBUG_KMS("sink rates: %s\n", str
);
1352 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1353 snprintf_int_array(str
, sizeof(str
), common_rates
, common_len
);
1354 DRM_DEBUG_KMS("common rates: %s\n", str
);
1357 static int rate_to_index(int find
, const int *rates
)
1361 for (i
= 0; i
< DP_MAX_SUPPORTED_RATES
; ++i
)
1362 if (find
== rates
[i
])
1369 intel_dp_max_link_rate(struct intel_dp
*intel_dp
)
1371 int rates
[DP_MAX_SUPPORTED_RATES
] = {};
1374 len
= intel_dp_common_rates(intel_dp
, rates
);
1375 if (WARN_ON(len
<= 0))
1378 return rates
[rate_to_index(0, rates
) - 1];
1381 int intel_dp_rate_select(struct intel_dp
*intel_dp
, int rate
)
1383 return rate_to_index(rate
, intel_dp
->sink_rates
);
1387 intel_dp_compute_config(struct intel_encoder
*encoder
,
1388 struct intel_crtc_state
*pipe_config
)
1390 struct drm_device
*dev
= encoder
->base
.dev
;
1391 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1392 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1393 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1394 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1395 struct intel_crtc
*intel_crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
1396 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
1397 int lane_count
, clock
;
1398 int min_lane_count
= 1;
1399 int max_lane_count
= intel_dp_max_lane_count(intel_dp
);
1400 /* Conveniently, the link BW constants become indices with a shift...*/
1404 int link_avail
, link_clock
;
1405 int common_rates
[DP_MAX_SUPPORTED_RATES
] = {};
1408 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1410 /* No common link rates between source and sink */
1411 WARN_ON(common_len
<= 0);
1413 max_clock
= common_len
- 1;
1415 if (HAS_PCH_SPLIT(dev
) && !HAS_DDI(dev
) && port
!= PORT_A
)
1416 pipe_config
->has_pch_encoder
= true;
1418 pipe_config
->has_dp_encoder
= true;
1419 pipe_config
->has_drrs
= false;
1420 pipe_config
->has_audio
= intel_dp
->has_audio
&& port
!= PORT_A
;
1422 if (is_edp(intel_dp
) && intel_connector
->panel
.fixed_mode
) {
1423 intel_fixed_panel_mode(intel_connector
->panel
.fixed_mode
,
1426 if (INTEL_INFO(dev
)->gen
>= 9) {
1428 ret
= skl_update_scaler_crtc(pipe_config
);
1433 if (!HAS_PCH_SPLIT(dev
))
1434 intel_gmch_panel_fitting(intel_crtc
, pipe_config
,
1435 intel_connector
->panel
.fitting_mode
);
1437 intel_pch_panel_fitting(intel_crtc
, pipe_config
,
1438 intel_connector
->panel
.fitting_mode
);
1441 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
1444 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1445 "max bw %d pixel clock %iKHz\n",
1446 max_lane_count
, common_rates
[max_clock
],
1447 adjusted_mode
->crtc_clock
);
1449 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1450 * bpc in between. */
1451 bpp
= pipe_config
->pipe_bpp
;
1452 if (is_edp(intel_dp
)) {
1454 /* Get bpp from vbt only for panels that dont have bpp in edid */
1455 if (intel_connector
->base
.display_info
.bpc
== 0 &&
1456 (dev_priv
->vbt
.edp_bpp
&& dev_priv
->vbt
.edp_bpp
< bpp
)) {
1457 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1458 dev_priv
->vbt
.edp_bpp
);
1459 bpp
= dev_priv
->vbt
.edp_bpp
;
1463 * Use the maximum clock and number of lanes the eDP panel
1464 * advertizes being capable of. The panels are generally
1465 * designed to support only a single clock and lane
1466 * configuration, and typically these values correspond to the
1467 * native resolution of the panel.
1469 min_lane_count
= max_lane_count
;
1470 min_clock
= max_clock
;
1473 for (; bpp
>= 6*3; bpp
-= 2*3) {
1474 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
1477 for (clock
= min_clock
; clock
<= max_clock
; clock
++) {
1478 for (lane_count
= min_lane_count
;
1479 lane_count
<= max_lane_count
;
1482 link_clock
= common_rates
[clock
];
1483 link_avail
= intel_dp_max_data_rate(link_clock
,
1486 if (mode_rate
<= link_avail
) {
1496 if (intel_dp
->color_range_auto
) {
1499 * CEA-861-E - 5.1 Default Encoding Parameters
1500 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1502 if (bpp
!= 18 && drm_match_cea_mode(adjusted_mode
) > 1)
1503 intel_dp
->color_range
= DP_COLOR_RANGE_16_235
;
1505 intel_dp
->color_range
= 0;
1508 if (intel_dp
->color_range
)
1509 pipe_config
->limited_color_range
= true;
1511 intel_dp
->lane_count
= lane_count
;
1513 if (intel_dp
->num_sink_rates
) {
1514 intel_dp
->link_bw
= 0;
1515 intel_dp
->rate_select
=
1516 intel_dp_rate_select(intel_dp
, common_rates
[clock
]);
1519 drm_dp_link_rate_to_bw_code(common_rates
[clock
]);
1520 intel_dp
->rate_select
= 0;
1523 pipe_config
->pipe_bpp
= bpp
;
1524 pipe_config
->port_clock
= common_rates
[clock
];
1526 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1527 intel_dp
->link_bw
, intel_dp
->lane_count
,
1528 pipe_config
->port_clock
, bpp
);
1529 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1530 mode_rate
, link_avail
);
1532 intel_link_compute_m_n(bpp
, lane_count
,
1533 adjusted_mode
->crtc_clock
,
1534 pipe_config
->port_clock
,
1535 &pipe_config
->dp_m_n
);
1537 if (intel_connector
->panel
.downclock_mode
!= NULL
&&
1538 dev_priv
->drrs
.type
== SEAMLESS_DRRS_SUPPORT
) {
1539 pipe_config
->has_drrs
= true;
1540 intel_link_compute_m_n(bpp
, lane_count
,
1541 intel_connector
->panel
.downclock_mode
->clock
,
1542 pipe_config
->port_clock
,
1543 &pipe_config
->dp_m2_n2
);
1546 if (IS_SKYLAKE(dev
) && is_edp(intel_dp
))
1547 skl_edp_set_pll_config(pipe_config
);
1548 else if (IS_BROXTON(dev
))
1549 /* handled in ddi */;
1550 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1551 hsw_dp_set_ddi_pll_sel(pipe_config
);
1553 intel_dp_set_clock(encoder
, pipe_config
);
1558 static void ironlake_set_pll_cpu_edp(struct intel_dp
*intel_dp
)
1560 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1561 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
1562 struct drm_device
*dev
= crtc
->base
.dev
;
1563 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1566 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1567 crtc
->config
->port_clock
);
1568 dpa_ctl
= I915_READ(DP_A
);
1569 dpa_ctl
&= ~DP_PLL_FREQ_MASK
;
1571 if (crtc
->config
->port_clock
== 162000) {
1572 /* For a long time we've carried around a ILK-DevA w/a for the
1573 * 160MHz clock. If we're really unlucky, it's still required.
1575 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1576 dpa_ctl
|= DP_PLL_FREQ_160MHZ
;
1577 intel_dp
->DP
|= DP_PLL_FREQ_160MHZ
;
1579 dpa_ctl
|= DP_PLL_FREQ_270MHZ
;
1580 intel_dp
->DP
|= DP_PLL_FREQ_270MHZ
;
1583 I915_WRITE(DP_A
, dpa_ctl
);
1589 static void intel_dp_prepare(struct intel_encoder
*encoder
)
1591 struct drm_device
*dev
= encoder
->base
.dev
;
1592 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1593 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1594 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1595 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
1596 struct drm_display_mode
*adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
1599 * There are four kinds of DP registers:
1606 * IBX PCH and CPU are the same for almost everything,
1607 * except that the CPU DP PLL is configured in this
1610 * CPT PCH is quite different, having many bits moved
1611 * to the TRANS_DP_CTL register instead. That
1612 * configuration happens (oddly) in ironlake_pch_enable
1615 /* Preserve the BIOS-computed detected bit. This is
1616 * supposed to be read-only.
1618 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
1620 /* Handle DP bits in common between all three register formats */
1621 intel_dp
->DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
1622 intel_dp
->DP
|= DP_PORT_WIDTH(intel_dp
->lane_count
);
1624 if (crtc
->config
->has_audio
)
1625 intel_dp
->DP
|= DP_AUDIO_OUTPUT_ENABLE
;
1627 /* Split out the IBX/CPU vs CPT settings */
1629 if (IS_GEN7(dev
) && port
== PORT_A
) {
1630 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1631 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1632 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1633 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1634 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1636 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1637 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1639 intel_dp
->DP
|= crtc
->pipe
<< 29;
1640 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
1643 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1645 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
1646 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1647 trans_dp
|= TRANS_DP_ENH_FRAMING
;
1649 trans_dp
&= ~TRANS_DP_ENH_FRAMING
;
1650 I915_WRITE(TRANS_DP_CTL(crtc
->pipe
), trans_dp
);
1652 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
))
1653 intel_dp
->DP
|= intel_dp
->color_range
;
1655 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1656 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1657 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1658 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1659 intel_dp
->DP
|= DP_LINK_TRAIN_OFF
;
1661 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1662 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1664 if (IS_CHERRYVIEW(dev
))
1665 intel_dp
->DP
|= DP_PIPE_SELECT_CHV(crtc
->pipe
);
1666 else if (crtc
->pipe
== PIPE_B
)
1667 intel_dp
->DP
|= DP_PIPEB_SELECT
;
1671 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1672 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1674 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1675 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1677 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1678 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1680 static void wait_panel_status(struct intel_dp
*intel_dp
,
1684 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1685 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1686 u32 pp_stat_reg
, pp_ctrl_reg
;
1688 lockdep_assert_held(&dev_priv
->pps_mutex
);
1690 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1691 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1693 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1695 I915_READ(pp_stat_reg
),
1696 I915_READ(pp_ctrl_reg
));
1698 if (_wait_for((I915_READ(pp_stat_reg
) & mask
) == value
, 5000, 10)) {
1699 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1700 I915_READ(pp_stat_reg
),
1701 I915_READ(pp_ctrl_reg
));
1704 DRM_DEBUG_KMS("Wait complete\n");
1707 static void wait_panel_on(struct intel_dp
*intel_dp
)
1709 DRM_DEBUG_KMS("Wait for panel power on\n");
1710 wait_panel_status(intel_dp
, IDLE_ON_MASK
, IDLE_ON_VALUE
);
1713 static void wait_panel_off(struct intel_dp
*intel_dp
)
1715 DRM_DEBUG_KMS("Wait for panel power off time\n");
1716 wait_panel_status(intel_dp
, IDLE_OFF_MASK
, IDLE_OFF_VALUE
);
1719 static void wait_panel_power_cycle(struct intel_dp
*intel_dp
)
1721 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1723 /* When we disable the VDD override bit last we have to do the manual
1725 wait_remaining_ms_from_jiffies(intel_dp
->last_power_cycle
,
1726 intel_dp
->panel_power_cycle_delay
);
1728 wait_panel_status(intel_dp
, IDLE_CYCLE_MASK
, IDLE_CYCLE_VALUE
);
1731 static void wait_backlight_on(struct intel_dp
*intel_dp
)
1733 wait_remaining_ms_from_jiffies(intel_dp
->last_power_on
,
1734 intel_dp
->backlight_on_delay
);
1737 static void edp_wait_backlight_off(struct intel_dp
*intel_dp
)
1739 wait_remaining_ms_from_jiffies(intel_dp
->last_backlight_off
,
1740 intel_dp
->backlight_off_delay
);
1743 /* Read the current pp_control value, unlocking the register if it
1747 static u32
ironlake_get_pp_control(struct intel_dp
*intel_dp
)
1749 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1750 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1753 lockdep_assert_held(&dev_priv
->pps_mutex
);
1755 control
= I915_READ(_pp_ctrl_reg(intel_dp
));
1756 if (!IS_BROXTON(dev
)) {
1757 control
&= ~PANEL_UNLOCK_MASK
;
1758 control
|= PANEL_UNLOCK_REGS
;
1764 * Must be paired with edp_panel_vdd_off().
1765 * Must hold pps_mutex around the whole on/off sequence.
1766 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1768 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1770 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1771 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1772 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1773 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1774 enum intel_display_power_domain power_domain
;
1776 u32 pp_stat_reg
, pp_ctrl_reg
;
1777 bool need_to_disable
= !intel_dp
->want_panel_vdd
;
1779 lockdep_assert_held(&dev_priv
->pps_mutex
);
1781 if (!is_edp(intel_dp
))
1784 cancel_delayed_work(&intel_dp
->panel_vdd_work
);
1785 intel_dp
->want_panel_vdd
= true;
1787 if (edp_have_panel_vdd(intel_dp
))
1788 return need_to_disable
;
1790 power_domain
= intel_display_port_power_domain(intel_encoder
);
1791 intel_display_power_get(dev_priv
, power_domain
);
1793 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1794 port_name(intel_dig_port
->port
));
1796 if (!edp_have_panel_power(intel_dp
))
1797 wait_panel_power_cycle(intel_dp
);
1799 pp
= ironlake_get_pp_control(intel_dp
);
1800 pp
|= EDP_FORCE_VDD
;
1802 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1803 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1805 I915_WRITE(pp_ctrl_reg
, pp
);
1806 POSTING_READ(pp_ctrl_reg
);
1807 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1808 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1810 * If the panel wasn't on, delay before accessing aux channel
1812 if (!edp_have_panel_power(intel_dp
)) {
1813 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1814 port_name(intel_dig_port
->port
));
1815 msleep(intel_dp
->panel_power_up_delay
);
1818 return need_to_disable
;
1822 * Must be paired with intel_edp_panel_vdd_off() or
1823 * intel_edp_panel_off().
1824 * Nested calls to these functions are not allowed since
1825 * we drop the lock. Caller must use some higher level
1826 * locking to prevent nested calls from other threads.
1828 void intel_edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1832 if (!is_edp(intel_dp
))
1836 vdd
= edp_panel_vdd_on(intel_dp
);
1837 pps_unlock(intel_dp
);
1839 I915_STATE_WARN(!vdd
, "eDP port %c VDD already requested on\n",
1840 port_name(dp_to_dig_port(intel_dp
)->port
));
1843 static void edp_panel_vdd_off_sync(struct intel_dp
*intel_dp
)
1845 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1846 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1847 struct intel_digital_port
*intel_dig_port
=
1848 dp_to_dig_port(intel_dp
);
1849 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1850 enum intel_display_power_domain power_domain
;
1852 u32 pp_stat_reg
, pp_ctrl_reg
;
1854 lockdep_assert_held(&dev_priv
->pps_mutex
);
1856 WARN_ON(intel_dp
->want_panel_vdd
);
1858 if (!edp_have_panel_vdd(intel_dp
))
1861 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1862 port_name(intel_dig_port
->port
));
1864 pp
= ironlake_get_pp_control(intel_dp
);
1865 pp
&= ~EDP_FORCE_VDD
;
1867 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1868 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1870 I915_WRITE(pp_ctrl_reg
, pp
);
1871 POSTING_READ(pp_ctrl_reg
);
1873 /* Make sure sequencer is idle before allowing subsequent activity */
1874 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1875 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1877 if ((pp
& POWER_TARGET_ON
) == 0)
1878 intel_dp
->last_power_cycle
= jiffies
;
1880 power_domain
= intel_display_port_power_domain(intel_encoder
);
1881 intel_display_power_put(dev_priv
, power_domain
);
1884 static void edp_panel_vdd_work(struct work_struct
*__work
)
1886 struct intel_dp
*intel_dp
= container_of(to_delayed_work(__work
),
1887 struct intel_dp
, panel_vdd_work
);
1890 if (!intel_dp
->want_panel_vdd
)
1891 edp_panel_vdd_off_sync(intel_dp
);
1892 pps_unlock(intel_dp
);
1895 static void edp_panel_vdd_schedule_off(struct intel_dp
*intel_dp
)
1897 unsigned long delay
;
1900 * Queue the timer to fire a long time from now (relative to the power
1901 * down delay) to keep the panel power up across a sequence of
1904 delay
= msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5);
1905 schedule_delayed_work(&intel_dp
->panel_vdd_work
, delay
);
1909 * Must be paired with edp_panel_vdd_on().
1910 * Must hold pps_mutex around the whole on/off sequence.
1911 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1913 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
)
1915 struct drm_i915_private
*dev_priv
=
1916 intel_dp_to_dev(intel_dp
)->dev_private
;
1918 lockdep_assert_held(&dev_priv
->pps_mutex
);
1920 if (!is_edp(intel_dp
))
1923 I915_STATE_WARN(!intel_dp
->want_panel_vdd
, "eDP port %c VDD not forced on",
1924 port_name(dp_to_dig_port(intel_dp
)->port
));
1926 intel_dp
->want_panel_vdd
= false;
1929 edp_panel_vdd_off_sync(intel_dp
);
1931 edp_panel_vdd_schedule_off(intel_dp
);
1934 static void edp_panel_on(struct intel_dp
*intel_dp
)
1936 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1937 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1941 lockdep_assert_held(&dev_priv
->pps_mutex
);
1943 if (!is_edp(intel_dp
))
1946 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1947 port_name(dp_to_dig_port(intel_dp
)->port
));
1949 if (WARN(edp_have_panel_power(intel_dp
),
1950 "eDP port %c panel power already on\n",
1951 port_name(dp_to_dig_port(intel_dp
)->port
)))
1954 wait_panel_power_cycle(intel_dp
);
1956 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1957 pp
= ironlake_get_pp_control(intel_dp
);
1959 /* ILK workaround: disable reset around power sequence */
1960 pp
&= ~PANEL_POWER_RESET
;
1961 I915_WRITE(pp_ctrl_reg
, pp
);
1962 POSTING_READ(pp_ctrl_reg
);
1965 pp
|= POWER_TARGET_ON
;
1967 pp
|= PANEL_POWER_RESET
;
1969 I915_WRITE(pp_ctrl_reg
, pp
);
1970 POSTING_READ(pp_ctrl_reg
);
1972 wait_panel_on(intel_dp
);
1973 intel_dp
->last_power_on
= jiffies
;
1976 pp
|= PANEL_POWER_RESET
; /* restore panel reset bit */
1977 I915_WRITE(pp_ctrl_reg
, pp
);
1978 POSTING_READ(pp_ctrl_reg
);
1982 void intel_edp_panel_on(struct intel_dp
*intel_dp
)
1984 if (!is_edp(intel_dp
))
1988 edp_panel_on(intel_dp
);
1989 pps_unlock(intel_dp
);
1993 static void edp_panel_off(struct intel_dp
*intel_dp
)
1995 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1996 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1997 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1998 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1999 enum intel_display_power_domain power_domain
;
2003 lockdep_assert_held(&dev_priv
->pps_mutex
);
2005 if (!is_edp(intel_dp
))
2008 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2009 port_name(dp_to_dig_port(intel_dp
)->port
));
2011 WARN(!intel_dp
->want_panel_vdd
, "Need eDP port %c VDD to turn off panel\n",
2012 port_name(dp_to_dig_port(intel_dp
)->port
));
2014 pp
= ironlake_get_pp_control(intel_dp
);
2015 /* We need to switch off panel power _and_ force vdd, for otherwise some
2016 * panels get very unhappy and cease to work. */
2017 pp
&= ~(POWER_TARGET_ON
| PANEL_POWER_RESET
| EDP_FORCE_VDD
|
2020 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2022 intel_dp
->want_panel_vdd
= false;
2024 I915_WRITE(pp_ctrl_reg
, pp
);
2025 POSTING_READ(pp_ctrl_reg
);
2027 intel_dp
->last_power_cycle
= jiffies
;
2028 wait_panel_off(intel_dp
);
2030 /* We got a reference when we enabled the VDD. */
2031 power_domain
= intel_display_port_power_domain(intel_encoder
);
2032 intel_display_power_put(dev_priv
, power_domain
);
2035 void intel_edp_panel_off(struct intel_dp
*intel_dp
)
2037 if (!is_edp(intel_dp
))
2041 edp_panel_off(intel_dp
);
2042 pps_unlock(intel_dp
);
2045 /* Enable backlight in the panel power control. */
2046 static void _intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2048 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2049 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2050 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2055 * If we enable the backlight right away following a panel power
2056 * on, we may see slight flicker as the panel syncs with the eDP
2057 * link. So delay a bit to make sure the image is solid before
2058 * allowing it to appear.
2060 wait_backlight_on(intel_dp
);
2064 pp
= ironlake_get_pp_control(intel_dp
);
2065 pp
|= EDP_BLC_ENABLE
;
2067 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2069 I915_WRITE(pp_ctrl_reg
, pp
);
2070 POSTING_READ(pp_ctrl_reg
);
2072 pps_unlock(intel_dp
);
2075 /* Enable backlight PWM and backlight PP control. */
2076 void intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2078 if (!is_edp(intel_dp
))
2081 DRM_DEBUG_KMS("\n");
2083 intel_panel_enable_backlight(intel_dp
->attached_connector
);
2084 _intel_edp_backlight_on(intel_dp
);
2087 /* Disable backlight in the panel power control. */
2088 static void _intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2090 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2091 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2095 if (!is_edp(intel_dp
))
2100 pp
= ironlake_get_pp_control(intel_dp
);
2101 pp
&= ~EDP_BLC_ENABLE
;
2103 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2105 I915_WRITE(pp_ctrl_reg
, pp
);
2106 POSTING_READ(pp_ctrl_reg
);
2108 pps_unlock(intel_dp
);
2110 intel_dp
->last_backlight_off
= jiffies
;
2111 edp_wait_backlight_off(intel_dp
);
2114 /* Disable backlight PP control and backlight PWM. */
2115 void intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2117 if (!is_edp(intel_dp
))
2120 DRM_DEBUG_KMS("\n");
2122 _intel_edp_backlight_off(intel_dp
);
2123 intel_panel_disable_backlight(intel_dp
->attached_connector
);
2127 * Hook for controlling the panel power control backlight through the bl_power
2128 * sysfs attribute. Take care to handle multiple calls.
2130 static void intel_edp_backlight_power(struct intel_connector
*connector
,
2133 struct intel_dp
*intel_dp
= intel_attached_dp(&connector
->base
);
2137 is_enabled
= ironlake_get_pp_control(intel_dp
) & EDP_BLC_ENABLE
;
2138 pps_unlock(intel_dp
);
2140 if (is_enabled
== enable
)
2143 DRM_DEBUG_KMS("panel power control backlight %s\n",
2144 enable
? "enable" : "disable");
2147 _intel_edp_backlight_on(intel_dp
);
2149 _intel_edp_backlight_off(intel_dp
);
2152 static void ironlake_edp_pll_on(struct intel_dp
*intel_dp
)
2154 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2155 struct drm_crtc
*crtc
= intel_dig_port
->base
.base
.crtc
;
2156 struct drm_device
*dev
= crtc
->dev
;
2157 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2160 assert_pipe_disabled(dev_priv
,
2161 to_intel_crtc(crtc
)->pipe
);
2163 DRM_DEBUG_KMS("\n");
2164 dpa_ctl
= I915_READ(DP_A
);
2165 WARN(dpa_ctl
& DP_PLL_ENABLE
, "dp pll on, should be off\n");
2166 WARN(dpa_ctl
& DP_PORT_EN
, "dp port still on, should be off\n");
2168 /* We don't adjust intel_dp->DP while tearing down the link, to
2169 * facilitate link retraining (e.g. after hotplug). Hence clear all
2170 * enable bits here to ensure that we don't enable too much. */
2171 intel_dp
->DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
2172 intel_dp
->DP
|= DP_PLL_ENABLE
;
2173 I915_WRITE(DP_A
, intel_dp
->DP
);
2178 static void ironlake_edp_pll_off(struct intel_dp
*intel_dp
)
2180 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2181 struct drm_crtc
*crtc
= intel_dig_port
->base
.base
.crtc
;
2182 struct drm_device
*dev
= crtc
->dev
;
2183 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2186 assert_pipe_disabled(dev_priv
,
2187 to_intel_crtc(crtc
)->pipe
);
2189 dpa_ctl
= I915_READ(DP_A
);
2190 WARN((dpa_ctl
& DP_PLL_ENABLE
) == 0,
2191 "dp pll off, should be on\n");
2192 WARN(dpa_ctl
& DP_PORT_EN
, "dp port still on, should be off\n");
2194 /* We can't rely on the value tracked for the DP register in
2195 * intel_dp->DP because link_down must not change that (otherwise link
2196 * re-training will fail. */
2197 dpa_ctl
&= ~DP_PLL_ENABLE
;
2198 I915_WRITE(DP_A
, dpa_ctl
);
2203 /* If the sink supports it, try to set the power state appropriately */
2204 void intel_dp_sink_dpms(struct intel_dp
*intel_dp
, int mode
)
2208 /* Should have a valid DPCD by this point */
2209 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
2212 if (mode
!= DRM_MODE_DPMS_ON
) {
2213 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2217 * When turning on, we need to retry for 1ms to give the sink
2220 for (i
= 0; i
< 3; i
++) {
2221 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2230 DRM_DEBUG_KMS("failed to %s sink power state\n",
2231 mode
== DRM_MODE_DPMS_ON
? "enable" : "disable");
2234 static bool intel_dp_get_hw_state(struct intel_encoder
*encoder
,
2237 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2238 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2239 struct drm_device
*dev
= encoder
->base
.dev
;
2240 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2241 enum intel_display_power_domain power_domain
;
2244 power_domain
= intel_display_port_power_domain(encoder
);
2245 if (!intel_display_power_is_enabled(dev_priv
, power_domain
))
2248 tmp
= I915_READ(intel_dp
->output_reg
);
2250 if (!(tmp
& DP_PORT_EN
))
2253 if (IS_GEN7(dev
) && port
== PORT_A
) {
2254 *pipe
= PORT_TO_PIPE_CPT(tmp
);
2255 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2258 for_each_pipe(dev_priv
, p
) {
2259 u32 trans_dp
= I915_READ(TRANS_DP_CTL(p
));
2260 if (TRANS_DP_PIPE_TO_PORT(trans_dp
) == port
) {
2266 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2267 intel_dp
->output_reg
);
2268 } else if (IS_CHERRYVIEW(dev
)) {
2269 *pipe
= DP_PORT_TO_PIPE_CHV(tmp
);
2271 *pipe
= PORT_TO_PIPE(tmp
);
2277 static void intel_dp_get_config(struct intel_encoder
*encoder
,
2278 struct intel_crtc_state
*pipe_config
)
2280 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2282 struct drm_device
*dev
= encoder
->base
.dev
;
2283 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2284 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2285 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2288 tmp
= I915_READ(intel_dp
->output_reg
);
2290 pipe_config
->has_audio
= tmp
& DP_AUDIO_OUTPUT_ENABLE
&& port
!= PORT_A
;
2292 if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2293 tmp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
2294 if (tmp
& TRANS_DP_HSYNC_ACTIVE_HIGH
)
2295 flags
|= DRM_MODE_FLAG_PHSYNC
;
2297 flags
|= DRM_MODE_FLAG_NHSYNC
;
2299 if (tmp
& TRANS_DP_VSYNC_ACTIVE_HIGH
)
2300 flags
|= DRM_MODE_FLAG_PVSYNC
;
2302 flags
|= DRM_MODE_FLAG_NVSYNC
;
2304 if (tmp
& DP_SYNC_HS_HIGH
)
2305 flags
|= DRM_MODE_FLAG_PHSYNC
;
2307 flags
|= DRM_MODE_FLAG_NHSYNC
;
2309 if (tmp
& DP_SYNC_VS_HIGH
)
2310 flags
|= DRM_MODE_FLAG_PVSYNC
;
2312 flags
|= DRM_MODE_FLAG_NVSYNC
;
2315 pipe_config
->base
.adjusted_mode
.flags
|= flags
;
2317 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
2318 tmp
& DP_COLOR_RANGE_16_235
)
2319 pipe_config
->limited_color_range
= true;
2321 pipe_config
->has_dp_encoder
= true;
2323 intel_dp_get_m_n(crtc
, pipe_config
);
2325 if (port
== PORT_A
) {
2326 if ((I915_READ(DP_A
) & DP_PLL_FREQ_MASK
) == DP_PLL_FREQ_160MHZ
)
2327 pipe_config
->port_clock
= 162000;
2329 pipe_config
->port_clock
= 270000;
2332 dotclock
= intel_dotclock_calculate(pipe_config
->port_clock
,
2333 &pipe_config
->dp_m_n
);
2335 if (HAS_PCH_SPLIT(dev_priv
->dev
) && port
!= PORT_A
)
2336 ironlake_check_encoder_dotclock(pipe_config
, dotclock
);
2338 pipe_config
->base
.adjusted_mode
.crtc_clock
= dotclock
;
2340 if (is_edp(intel_dp
) && dev_priv
->vbt
.edp_bpp
&&
2341 pipe_config
->pipe_bpp
> dev_priv
->vbt
.edp_bpp
) {
2343 * This is a big fat ugly hack.
2345 * Some machines in UEFI boot mode provide us a VBT that has 18
2346 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2347 * unknown we fail to light up. Yet the same BIOS boots up with
2348 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2349 * max, not what it tells us to use.
2351 * Note: This will still be broken if the eDP panel is not lit
2352 * up by the BIOS, and thus we can't get the mode at module
2355 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2356 pipe_config
->pipe_bpp
, dev_priv
->vbt
.edp_bpp
);
2357 dev_priv
->vbt
.edp_bpp
= pipe_config
->pipe_bpp
;
2361 static void intel_disable_dp(struct intel_encoder
*encoder
)
2363 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2364 struct drm_device
*dev
= encoder
->base
.dev
;
2365 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2367 if (crtc
->config
->has_audio
)
2368 intel_audio_codec_disable(encoder
);
2370 if (HAS_PSR(dev
) && !HAS_DDI(dev
))
2371 intel_psr_disable(intel_dp
);
2373 /* Make sure the panel is off before trying to change the mode. But also
2374 * ensure that we have vdd while we switch off the panel. */
2375 intel_edp_panel_vdd_on(intel_dp
);
2376 intel_edp_backlight_off(intel_dp
);
2377 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_OFF
);
2378 intel_edp_panel_off(intel_dp
);
2380 /* disable the port before the pipe on g4x */
2381 if (INTEL_INFO(dev
)->gen
< 5)
2382 intel_dp_link_down(intel_dp
);
2385 static void ilk_post_disable_dp(struct intel_encoder
*encoder
)
2387 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2388 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2390 intel_dp_link_down(intel_dp
);
2392 ironlake_edp_pll_off(intel_dp
);
2395 static void vlv_post_disable_dp(struct intel_encoder
*encoder
)
2397 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2399 intel_dp_link_down(intel_dp
);
2402 static void chv_post_disable_dp(struct intel_encoder
*encoder
)
2404 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2405 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2406 struct drm_device
*dev
= encoder
->base
.dev
;
2407 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2408 struct intel_crtc
*intel_crtc
=
2409 to_intel_crtc(encoder
->base
.crtc
);
2410 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2411 enum pipe pipe
= intel_crtc
->pipe
;
2414 intel_dp_link_down(intel_dp
);
2416 mutex_lock(&dev_priv
->sb_lock
);
2418 /* Propagate soft reset to data lane reset */
2419 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2420 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2421 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2423 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2424 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2425 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2427 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2428 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2429 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2431 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2432 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2433 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2435 mutex_unlock(&dev_priv
->sb_lock
);
2439 _intel_dp_set_link_train(struct intel_dp
*intel_dp
,
2441 uint8_t dp_train_pat
)
2443 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2444 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2445 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2446 enum port port
= intel_dig_port
->port
;
2449 uint32_t temp
= I915_READ(DP_TP_CTL(port
));
2451 if (dp_train_pat
& DP_LINK_SCRAMBLING_DISABLE
)
2452 temp
|= DP_TP_CTL_SCRAMBLE_DISABLE
;
2454 temp
&= ~DP_TP_CTL_SCRAMBLE_DISABLE
;
2456 temp
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
2457 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2458 case DP_TRAINING_PATTERN_DISABLE
:
2459 temp
|= DP_TP_CTL_LINK_TRAIN_NORMAL
;
2462 case DP_TRAINING_PATTERN_1
:
2463 temp
|= DP_TP_CTL_LINK_TRAIN_PAT1
;
2465 case DP_TRAINING_PATTERN_2
:
2466 temp
|= DP_TP_CTL_LINK_TRAIN_PAT2
;
2468 case DP_TRAINING_PATTERN_3
:
2469 temp
|= DP_TP_CTL_LINK_TRAIN_PAT3
;
2472 I915_WRITE(DP_TP_CTL(port
), temp
);
2474 } else if ((IS_GEN7(dev
) && port
== PORT_A
) ||
2475 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
2476 *DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
2478 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2479 case DP_TRAINING_PATTERN_DISABLE
:
2480 *DP
|= DP_LINK_TRAIN_OFF_CPT
;
2482 case DP_TRAINING_PATTERN_1
:
2483 *DP
|= DP_LINK_TRAIN_PAT_1_CPT
;
2485 case DP_TRAINING_PATTERN_2
:
2486 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2488 case DP_TRAINING_PATTERN_3
:
2489 DRM_ERROR("DP training pattern 3 not supported\n");
2490 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2495 if (IS_CHERRYVIEW(dev
))
2496 *DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
2498 *DP
&= ~DP_LINK_TRAIN_MASK
;
2500 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2501 case DP_TRAINING_PATTERN_DISABLE
:
2502 *DP
|= DP_LINK_TRAIN_OFF
;
2504 case DP_TRAINING_PATTERN_1
:
2505 *DP
|= DP_LINK_TRAIN_PAT_1
;
2507 case DP_TRAINING_PATTERN_2
:
2508 *DP
|= DP_LINK_TRAIN_PAT_2
;
2510 case DP_TRAINING_PATTERN_3
:
2511 if (IS_CHERRYVIEW(dev
)) {
2512 *DP
|= DP_LINK_TRAIN_PAT_3_CHV
;
2514 DRM_ERROR("DP training pattern 3 not supported\n");
2515 *DP
|= DP_LINK_TRAIN_PAT_2
;
2522 static void intel_dp_enable_port(struct intel_dp
*intel_dp
)
2524 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2525 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2527 /* enable with pattern 1 (as per spec) */
2528 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
2529 DP_TRAINING_PATTERN_1
);
2531 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2532 POSTING_READ(intel_dp
->output_reg
);
2535 * Magic for VLV/CHV. We _must_ first set up the register
2536 * without actually enabling the port, and then do another
2537 * write to enable the port. Otherwise link training will
2538 * fail when the power sequencer is freshly used for this port.
2540 intel_dp
->DP
|= DP_PORT_EN
;
2542 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2543 POSTING_READ(intel_dp
->output_reg
);
2546 static void intel_enable_dp(struct intel_encoder
*encoder
)
2548 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2549 struct drm_device
*dev
= encoder
->base
.dev
;
2550 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2551 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2552 uint32_t dp_reg
= I915_READ(intel_dp
->output_reg
);
2553 unsigned int lane_mask
= 0x0;
2555 if (WARN_ON(dp_reg
& DP_PORT_EN
))
2560 if (IS_VALLEYVIEW(dev
))
2561 vlv_init_panel_power_sequencer(intel_dp
);
2563 intel_dp_enable_port(intel_dp
);
2565 edp_panel_vdd_on(intel_dp
);
2566 edp_panel_on(intel_dp
);
2567 edp_panel_vdd_off(intel_dp
, true);
2569 pps_unlock(intel_dp
);
2571 if (IS_VALLEYVIEW(dev
))
2572 vlv_wait_port_ready(dev_priv
, dp_to_dig_port(intel_dp
),
2575 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_ON
);
2576 intel_dp_start_link_train(intel_dp
);
2577 intel_dp_complete_link_train(intel_dp
);
2578 intel_dp_stop_link_train(intel_dp
);
2580 if (crtc
->config
->has_audio
) {
2581 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2582 pipe_name(crtc
->pipe
));
2583 intel_audio_codec_enable(encoder
);
2587 static void g4x_enable_dp(struct intel_encoder
*encoder
)
2589 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2591 intel_enable_dp(encoder
);
2592 intel_edp_backlight_on(intel_dp
);
2595 static void vlv_enable_dp(struct intel_encoder
*encoder
)
2597 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2599 intel_edp_backlight_on(intel_dp
);
2600 intel_psr_enable(intel_dp
);
2603 static void g4x_pre_enable_dp(struct intel_encoder
*encoder
)
2605 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2606 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2608 intel_dp_prepare(encoder
);
2610 /* Only ilk+ has port A */
2611 if (dport
->port
== PORT_A
) {
2612 ironlake_set_pll_cpu_edp(intel_dp
);
2613 ironlake_edp_pll_on(intel_dp
);
2617 static void vlv_detach_power_sequencer(struct intel_dp
*intel_dp
)
2619 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2620 struct drm_i915_private
*dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
2621 enum pipe pipe
= intel_dp
->pps_pipe
;
2622 int pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
2624 edp_panel_vdd_off_sync(intel_dp
);
2627 * VLV seems to get confused when multiple power seqeuencers
2628 * have the same port selected (even if only one has power/vdd
2629 * enabled). The failure manifests as vlv_wait_port_ready() failing
2630 * CHV on the other hand doesn't seem to mind having the same port
2631 * selected in multiple power seqeuencers, but let's clear the
2632 * port select always when logically disconnecting a power sequencer
2635 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2636 pipe_name(pipe
), port_name(intel_dig_port
->port
));
2637 I915_WRITE(pp_on_reg
, 0);
2638 POSTING_READ(pp_on_reg
);
2640 intel_dp
->pps_pipe
= INVALID_PIPE
;
2643 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
2646 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2647 struct intel_encoder
*encoder
;
2649 lockdep_assert_held(&dev_priv
->pps_mutex
);
2651 if (WARN_ON(pipe
!= PIPE_A
&& pipe
!= PIPE_B
))
2654 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
2656 struct intel_dp
*intel_dp
;
2659 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2662 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2663 port
= dp_to_dig_port(intel_dp
)->port
;
2665 if (intel_dp
->pps_pipe
!= pipe
)
2668 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2669 pipe_name(pipe
), port_name(port
));
2671 WARN(encoder
->base
.crtc
,
2672 "stealing pipe %c power sequencer from active eDP port %c\n",
2673 pipe_name(pipe
), port_name(port
));
2675 /* make sure vdd is off before we steal it */
2676 vlv_detach_power_sequencer(intel_dp
);
2680 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
)
2682 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2683 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
2684 struct drm_device
*dev
= encoder
->base
.dev
;
2685 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2686 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2688 lockdep_assert_held(&dev_priv
->pps_mutex
);
2690 if (!is_edp(intel_dp
))
2693 if (intel_dp
->pps_pipe
== crtc
->pipe
)
2697 * If another power sequencer was being used on this
2698 * port previously make sure to turn off vdd there while
2699 * we still have control of it.
2701 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
2702 vlv_detach_power_sequencer(intel_dp
);
2705 * We may be stealing the power
2706 * sequencer from another port.
2708 vlv_steal_power_sequencer(dev
, crtc
->pipe
);
2710 /* now it's all ours */
2711 intel_dp
->pps_pipe
= crtc
->pipe
;
2713 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2714 pipe_name(intel_dp
->pps_pipe
), port_name(intel_dig_port
->port
));
2716 /* init power sequencer on this pipe and port */
2717 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
2718 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
2721 static void vlv_pre_enable_dp(struct intel_encoder
*encoder
)
2723 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2724 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2725 struct drm_device
*dev
= encoder
->base
.dev
;
2726 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2727 struct intel_crtc
*intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
2728 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2729 int pipe
= intel_crtc
->pipe
;
2732 mutex_lock(&dev_priv
->sb_lock
);
2734 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(port
));
2741 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW8(port
), val
);
2742 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW14(port
), 0x00760018);
2743 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW23(port
), 0x00400888);
2745 mutex_unlock(&dev_priv
->sb_lock
);
2747 intel_enable_dp(encoder
);
2750 static void vlv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2752 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2753 struct drm_device
*dev
= encoder
->base
.dev
;
2754 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2755 struct intel_crtc
*intel_crtc
=
2756 to_intel_crtc(encoder
->base
.crtc
);
2757 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2758 int pipe
= intel_crtc
->pipe
;
2760 intel_dp_prepare(encoder
);
2762 /* Program Tx lane resets to default */
2763 mutex_lock(&dev_priv
->sb_lock
);
2764 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW0(port
),
2765 DPIO_PCS_TX_LANE2_RESET
|
2766 DPIO_PCS_TX_LANE1_RESET
);
2767 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW1(port
),
2768 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN
|
2769 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN
|
2770 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT
) |
2771 DPIO_PCS_CLK_SOFT_RESET
);
2773 /* Fix up inter-pair skew failure */
2774 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW12(port
), 0x00750f00);
2775 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW11(port
), 0x00001500);
2776 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW14(port
), 0x40400000);
2777 mutex_unlock(&dev_priv
->sb_lock
);
2780 static void chv_pre_enable_dp(struct intel_encoder
*encoder
)
2782 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2783 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2784 struct drm_device
*dev
= encoder
->base
.dev
;
2785 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2786 struct intel_crtc
*intel_crtc
=
2787 to_intel_crtc(encoder
->base
.crtc
);
2788 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2789 int pipe
= intel_crtc
->pipe
;
2790 int data
, i
, stagger
;
2793 mutex_lock(&dev_priv
->sb_lock
);
2795 /* allow hardware to manage TX FIFO reset source */
2796 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
2797 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2798 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
2800 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
2801 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2802 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
2804 /* Deassert soft data lane reset*/
2805 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2806 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2807 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2809 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2810 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2811 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2813 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2814 val
|= (DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2815 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2817 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2818 val
|= (DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2819 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2821 /* Program Tx lane latency optimal setting*/
2822 for (i
= 0; i
< 4; i
++) {
2823 /* Set the upar bit */
2824 data
= (i
== 1) ? 0x0 : 0x1;
2825 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW14(ch
, i
),
2826 data
<< DPIO_UPAR_SHIFT
);
2829 /* Data lane stagger programming */
2830 if (intel_crtc
->config
->port_clock
> 270000)
2832 else if (intel_crtc
->config
->port_clock
> 135000)
2834 else if (intel_crtc
->config
->port_clock
> 67500)
2836 else if (intel_crtc
->config
->port_clock
> 33750)
2841 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
2842 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
2843 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
2845 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
2846 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
2847 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
2849 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW12(ch
),
2850 DPIO_LANESTAGGER_STRAP(stagger
) |
2851 DPIO_LANESTAGGER_STRAP_OVRD
|
2852 DPIO_TX1_STAGGER_MASK(0x1f) |
2853 DPIO_TX1_STAGGER_MULT(6) |
2854 DPIO_TX2_STAGGER_MULT(0));
2856 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW12(ch
),
2857 DPIO_LANESTAGGER_STRAP(stagger
) |
2858 DPIO_LANESTAGGER_STRAP_OVRD
|
2859 DPIO_TX1_STAGGER_MASK(0x1f) |
2860 DPIO_TX1_STAGGER_MULT(7) |
2861 DPIO_TX2_STAGGER_MULT(5));
2863 mutex_unlock(&dev_priv
->sb_lock
);
2865 intel_enable_dp(encoder
);
2868 static void chv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2870 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2871 struct drm_device
*dev
= encoder
->base
.dev
;
2872 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2873 struct intel_crtc
*intel_crtc
=
2874 to_intel_crtc(encoder
->base
.crtc
);
2875 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2876 enum pipe pipe
= intel_crtc
->pipe
;
2879 intel_dp_prepare(encoder
);
2881 mutex_lock(&dev_priv
->sb_lock
);
2883 /* program left/right clock distribution */
2884 if (pipe
!= PIPE_B
) {
2885 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
2886 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
2888 val
|= CHV_BUFLEFTENA1_FORCE
;
2890 val
|= CHV_BUFRIGHTENA1_FORCE
;
2891 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
2893 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
2894 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
2896 val
|= CHV_BUFLEFTENA2_FORCE
;
2898 val
|= CHV_BUFRIGHTENA2_FORCE
;
2899 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
2902 /* program clock channel usage */
2903 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(ch
));
2904 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
2906 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
2908 val
|= CHV_PCS_USEDCLKCHANNEL
;
2909 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW8(ch
), val
);
2911 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW8(ch
));
2912 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
2914 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
2916 val
|= CHV_PCS_USEDCLKCHANNEL
;
2917 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW8(ch
), val
);
2920 * This a a bit weird since generally CL
2921 * matches the pipe, but here we need to
2922 * pick the CL based on the port.
2924 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW19(ch
));
2926 val
&= ~CHV_CMN_USEDCLKCHANNEL
;
2928 val
|= CHV_CMN_USEDCLKCHANNEL
;
2929 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW19(ch
), val
);
2931 mutex_unlock(&dev_priv
->sb_lock
);
2935 * Native read with retry for link status and receiver capability reads for
2936 * cases where the sink may still be asleep.
2938 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2939 * supposed to retry 3 times per the spec.
2942 intel_dp_dpcd_read_wake(struct drm_dp_aux
*aux
, unsigned int offset
,
2943 void *buffer
, size_t size
)
2949 * Sometime we just get the same incorrect byte repeated
2950 * over the entire buffer. Doing just one throw away read
2951 * initially seems to "solve" it.
2953 drm_dp_dpcd_read(aux
, DP_DPCD_REV
, buffer
, 1);
2955 for (i
= 0; i
< 3; i
++) {
2956 ret
= drm_dp_dpcd_read(aux
, offset
, buffer
, size
);
2966 * Fetch AUX CH registers 0x202 - 0x207 which contain
2967 * link status information
2970 intel_dp_get_link_status(struct intel_dp
*intel_dp
, uint8_t link_status
[DP_LINK_STATUS_SIZE
])
2972 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
2975 DP_LINK_STATUS_SIZE
) == DP_LINK_STATUS_SIZE
;
2978 /* These are source-specific values. */
2980 intel_dp_voltage_max(struct intel_dp
*intel_dp
)
2982 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2983 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2984 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2986 if (IS_BROXTON(dev
))
2987 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
2988 else if (INTEL_INFO(dev
)->gen
>= 9) {
2989 if (dev_priv
->edp_low_vswing
&& port
== PORT_A
)
2990 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
2991 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
2992 } else if (IS_VALLEYVIEW(dev
))
2993 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
2994 else if (IS_GEN7(dev
) && port
== PORT_A
)
2995 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
2996 else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
)
2997 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
2999 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3003 intel_dp_pre_emphasis_max(struct intel_dp
*intel_dp
, uint8_t voltage_swing
)
3005 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3006 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3008 if (INTEL_INFO(dev
)->gen
>= 9) {
3009 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3011 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3013 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3014 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3015 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3016 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3017 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3019 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3021 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
3022 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3023 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3024 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3026 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3028 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3029 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3031 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3033 } else if (IS_VALLEYVIEW(dev
)) {
3034 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3036 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3038 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3040 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3043 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3045 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3046 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3047 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3048 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3051 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3053 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3056 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3058 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3062 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3065 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3070 static uint32_t vlv_signal_levels(struct intel_dp
*intel_dp
)
3072 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3073 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3074 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3075 struct intel_crtc
*intel_crtc
=
3076 to_intel_crtc(dport
->base
.base
.crtc
);
3077 unsigned long demph_reg_value
, preemph_reg_value
,
3078 uniqtranscale_reg_value
;
3079 uint8_t train_set
= intel_dp
->train_set
[0];
3080 enum dpio_channel port
= vlv_dport_to_channel(dport
);
3081 int pipe
= intel_crtc
->pipe
;
3083 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3084 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3085 preemph_reg_value
= 0x0004000;
3086 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3088 demph_reg_value
= 0x2B405555;
3089 uniqtranscale_reg_value
= 0x552AB83A;
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3092 demph_reg_value
= 0x2B404040;
3093 uniqtranscale_reg_value
= 0x5548B83A;
3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3096 demph_reg_value
= 0x2B245555;
3097 uniqtranscale_reg_value
= 0x5560B83A;
3099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3100 demph_reg_value
= 0x2B405555;
3101 uniqtranscale_reg_value
= 0x5598DA3A;
3107 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3108 preemph_reg_value
= 0x0002000;
3109 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3111 demph_reg_value
= 0x2B404040;
3112 uniqtranscale_reg_value
= 0x5552B83A;
3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3115 demph_reg_value
= 0x2B404848;
3116 uniqtranscale_reg_value
= 0x5580B83A;
3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3119 demph_reg_value
= 0x2B404040;
3120 uniqtranscale_reg_value
= 0x55ADDA3A;
3126 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3127 preemph_reg_value
= 0x0000000;
3128 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3130 demph_reg_value
= 0x2B305555;
3131 uniqtranscale_reg_value
= 0x5570B83A;
3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3134 demph_reg_value
= 0x2B2B4040;
3135 uniqtranscale_reg_value
= 0x55ADDA3A;
3141 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3142 preemph_reg_value
= 0x0006000;
3143 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3145 demph_reg_value
= 0x1B405555;
3146 uniqtranscale_reg_value
= 0x55ADDA3A;
3156 mutex_lock(&dev_priv
->sb_lock
);
3157 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x00000000);
3158 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW4(port
), demph_reg_value
);
3159 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW2(port
),
3160 uniqtranscale_reg_value
);
3161 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW3(port
), 0x0C782040);
3162 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW11(port
), 0x00030000);
3163 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW9(port
), preemph_reg_value
);
3164 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x80000000);
3165 mutex_unlock(&dev_priv
->sb_lock
);
3170 static uint32_t chv_signal_levels(struct intel_dp
*intel_dp
)
3172 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3173 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3174 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3175 struct intel_crtc
*intel_crtc
= to_intel_crtc(dport
->base
.base
.crtc
);
3176 u32 deemph_reg_value
, margin_reg_value
, val
;
3177 uint8_t train_set
= intel_dp
->train_set
[0];
3178 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3179 enum pipe pipe
= intel_crtc
->pipe
;
3182 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3183 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3184 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3185 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3186 deemph_reg_value
= 128;
3187 margin_reg_value
= 52;
3189 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3190 deemph_reg_value
= 128;
3191 margin_reg_value
= 77;
3193 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3194 deemph_reg_value
= 128;
3195 margin_reg_value
= 102;
3197 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3198 deemph_reg_value
= 128;
3199 margin_reg_value
= 154;
3200 /* FIXME extra to set for 1200 */
3206 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3207 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3208 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3209 deemph_reg_value
= 85;
3210 margin_reg_value
= 78;
3212 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3213 deemph_reg_value
= 85;
3214 margin_reg_value
= 116;
3216 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3217 deemph_reg_value
= 85;
3218 margin_reg_value
= 154;
3224 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3225 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3227 deemph_reg_value
= 64;
3228 margin_reg_value
= 104;
3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3231 deemph_reg_value
= 64;
3232 margin_reg_value
= 154;
3238 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3239 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3241 deemph_reg_value
= 43;
3242 margin_reg_value
= 154;
3252 mutex_lock(&dev_priv
->sb_lock
);
3254 /* Clear calc init */
3255 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3256 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3257 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3258 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3259 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3261 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3262 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3263 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3264 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3265 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3267 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW9(ch
));
3268 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3269 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3270 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW9(ch
), val
);
3272 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW9(ch
));
3273 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3274 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3275 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW9(ch
), val
);
3277 /* Program swing deemph */
3278 for (i
= 0; i
< 4; i
++) {
3279 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
));
3280 val
&= ~DPIO_SWING_DEEMPH9P5_MASK
;
3281 val
|= deemph_reg_value
<< DPIO_SWING_DEEMPH9P5_SHIFT
;
3282 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
), val
);
3285 /* Program swing margin */
3286 for (i
= 0; i
< 4; i
++) {
3287 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
));
3288 val
&= ~DPIO_SWING_MARGIN000_MASK
;
3289 val
|= margin_reg_value
<< DPIO_SWING_MARGIN000_SHIFT
;
3290 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
), val
);
3293 /* Disable unique transition scale */
3294 for (i
= 0; i
< 4; i
++) {
3295 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
));
3296 val
&= ~DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3297 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
), val
);
3300 if (((train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
)
3301 == DP_TRAIN_PRE_EMPH_LEVEL_0
) &&
3302 ((train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
)
3303 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3
)) {
3306 * The document said it needs to set bit 27 for ch0 and bit 26
3307 * for ch1. Might be a typo in the doc.
3308 * For now, for this unique transition scale selection, set bit
3309 * 27 for ch0 and ch1.
3311 for (i
= 0; i
< 4; i
++) {
3312 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
));
3313 val
|= DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3314 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
), val
);
3317 for (i
= 0; i
< 4; i
++) {
3318 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
));
3319 val
&= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT
);
3320 val
|= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT
);
3321 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
), val
);
3325 /* Start swing calculation */
3326 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3327 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3328 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3330 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3331 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3332 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3335 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW30
);
3336 val
|= DPIO_LRC_BYPASS
;
3337 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW30
, val
);
3339 mutex_unlock(&dev_priv
->sb_lock
);
3345 intel_get_adjust_train(struct intel_dp
*intel_dp
,
3346 const uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3351 uint8_t voltage_max
;
3352 uint8_t preemph_max
;
3354 for (lane
= 0; lane
< intel_dp
->lane_count
; lane
++) {
3355 uint8_t this_v
= drm_dp_get_adjust_request_voltage(link_status
, lane
);
3356 uint8_t this_p
= drm_dp_get_adjust_request_pre_emphasis(link_status
, lane
);
3364 voltage_max
= intel_dp_voltage_max(intel_dp
);
3365 if (v
>= voltage_max
)
3366 v
= voltage_max
| DP_TRAIN_MAX_SWING_REACHED
;
3368 preemph_max
= intel_dp_pre_emphasis_max(intel_dp
, v
);
3369 if (p
>= preemph_max
)
3370 p
= preemph_max
| DP_TRAIN_MAX_PRE_EMPHASIS_REACHED
;
3372 for (lane
= 0; lane
< 4; lane
++)
3373 intel_dp
->train_set
[lane
] = v
| p
;
3377 gen4_signal_levels(uint8_t train_set
)
3379 uint32_t signal_levels
= 0;
3381 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3384 signal_levels
|= DP_VOLTAGE_0_4
;
3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3387 signal_levels
|= DP_VOLTAGE_0_6
;
3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3390 signal_levels
|= DP_VOLTAGE_0_8
;
3392 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3393 signal_levels
|= DP_VOLTAGE_1_2
;
3396 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3397 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3399 signal_levels
|= DP_PRE_EMPHASIS_0
;
3401 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3402 signal_levels
|= DP_PRE_EMPHASIS_3_5
;
3404 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3405 signal_levels
|= DP_PRE_EMPHASIS_6
;
3407 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3408 signal_levels
|= DP_PRE_EMPHASIS_9_5
;
3411 return signal_levels
;
3414 /* Gen6's DP voltage swing and pre-emphasis control */
3416 gen6_edp_signal_levels(uint8_t train_set
)
3418 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3419 DP_TRAIN_PRE_EMPHASIS_MASK
);
3420 switch (signal_levels
) {
3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3423 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3425 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B
;
3426 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3428 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B
;
3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3431 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B
;
3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3434 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B
;
3436 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3437 "0x%x\n", signal_levels
);
3438 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3442 /* Gen7's DP voltage swing and pre-emphasis control */
3444 gen7_edp_signal_levels(uint8_t train_set
)
3446 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3447 DP_TRAIN_PRE_EMPHASIS_MASK
);
3448 switch (signal_levels
) {
3449 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3450 return EDP_LINK_TRAIN_400MV_0DB_IVB
;
3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3452 return EDP_LINK_TRAIN_400MV_3_5DB_IVB
;
3453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3454 return EDP_LINK_TRAIN_400MV_6DB_IVB
;
3456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3457 return EDP_LINK_TRAIN_600MV_0DB_IVB
;
3458 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3459 return EDP_LINK_TRAIN_600MV_3_5DB_IVB
;
3461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3462 return EDP_LINK_TRAIN_800MV_0DB_IVB
;
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3464 return EDP_LINK_TRAIN_800MV_3_5DB_IVB
;
3467 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3468 "0x%x\n", signal_levels
);
3469 return EDP_LINK_TRAIN_500MV_0DB_IVB
;
3473 /* Properly updates "DP" with the correct signal levels. */
3475 intel_dp_set_signal_levels(struct intel_dp
*intel_dp
, uint32_t *DP
)
3477 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3478 enum port port
= intel_dig_port
->port
;
3479 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3480 uint32_t signal_levels
, mask
= 0;
3481 uint8_t train_set
= intel_dp
->train_set
[0];
3484 signal_levels
= ddi_signal_levels(intel_dp
);
3486 if (IS_BROXTON(dev
))
3489 mask
= DDI_BUF_EMP_MASK
;
3490 } else if (IS_CHERRYVIEW(dev
)) {
3491 signal_levels
= chv_signal_levels(intel_dp
);
3492 } else if (IS_VALLEYVIEW(dev
)) {
3493 signal_levels
= vlv_signal_levels(intel_dp
);
3494 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3495 signal_levels
= gen7_edp_signal_levels(train_set
);
3496 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_IVB
;
3497 } else if (IS_GEN6(dev
) && port
== PORT_A
) {
3498 signal_levels
= gen6_edp_signal_levels(train_set
);
3499 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_SNB
;
3501 signal_levels
= gen4_signal_levels(train_set
);
3502 mask
= DP_VOLTAGE_MASK
| DP_PRE_EMPHASIS_MASK
;
3506 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels
);
3508 DRM_DEBUG_KMS("Using vswing level %d\n",
3509 train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
);
3510 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3511 (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) >>
3512 DP_TRAIN_PRE_EMPHASIS_SHIFT
);
3514 *DP
= (*DP
& ~mask
) | signal_levels
;
3518 intel_dp_set_link_train(struct intel_dp
*intel_dp
,
3520 uint8_t dp_train_pat
)
3522 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3523 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3524 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3525 uint8_t buf
[sizeof(intel_dp
->train_set
) + 1];
3528 _intel_dp_set_link_train(intel_dp
, DP
, dp_train_pat
);
3530 I915_WRITE(intel_dp
->output_reg
, *DP
);
3531 POSTING_READ(intel_dp
->output_reg
);
3533 buf
[0] = dp_train_pat
;
3534 if ((dp_train_pat
& DP_TRAINING_PATTERN_MASK
) ==
3535 DP_TRAINING_PATTERN_DISABLE
) {
3536 /* don't write DP_TRAINING_LANEx_SET on disable */
3539 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3540 memcpy(buf
+ 1, intel_dp
->train_set
, intel_dp
->lane_count
);
3541 len
= intel_dp
->lane_count
+ 1;
3544 ret
= drm_dp_dpcd_write(&intel_dp
->aux
, DP_TRAINING_PATTERN_SET
,
3551 intel_dp_reset_link_train(struct intel_dp
*intel_dp
, uint32_t *DP
,
3552 uint8_t dp_train_pat
)
3554 if (!intel_dp
->train_set_valid
)
3555 memset(intel_dp
->train_set
, 0, sizeof(intel_dp
->train_set
));
3556 intel_dp_set_signal_levels(intel_dp
, DP
);
3557 return intel_dp_set_link_train(intel_dp
, DP
, dp_train_pat
);
3561 intel_dp_update_link_train(struct intel_dp
*intel_dp
, uint32_t *DP
,
3562 const uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3564 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3565 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3566 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3569 intel_get_adjust_train(intel_dp
, link_status
);
3570 intel_dp_set_signal_levels(intel_dp
, DP
);
3572 I915_WRITE(intel_dp
->output_reg
, *DP
);
3573 POSTING_READ(intel_dp
->output_reg
);
3575 ret
= drm_dp_dpcd_write(&intel_dp
->aux
, DP_TRAINING_LANE0_SET
,
3576 intel_dp
->train_set
, intel_dp
->lane_count
);
3578 return ret
== intel_dp
->lane_count
;
3581 static void intel_dp_set_idle_link_train(struct intel_dp
*intel_dp
)
3583 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3584 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3585 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3586 enum port port
= intel_dig_port
->port
;
3592 val
= I915_READ(DP_TP_CTL(port
));
3593 val
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
3594 val
|= DP_TP_CTL_LINK_TRAIN_IDLE
;
3595 I915_WRITE(DP_TP_CTL(port
), val
);
3598 * On PORT_A we can have only eDP in SST mode. There the only reason
3599 * we need to set idle transmission mode is to work around a HW issue
3600 * where we enable the pipe while not in idle link-training mode.
3601 * In this case there is requirement to wait for a minimum number of
3602 * idle patterns to be sent.
3607 if (wait_for((I915_READ(DP_TP_STATUS(port
)) & DP_TP_STATUS_IDLE_DONE
),
3609 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3612 /* Enable corresponding port and start training pattern 1 */
3614 intel_dp_start_link_train(struct intel_dp
*intel_dp
)
3616 struct drm_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
.base
;
3617 struct drm_device
*dev
= encoder
->dev
;
3620 int voltage_tries
, loop_tries
;
3621 uint32_t DP
= intel_dp
->DP
;
3622 uint8_t link_config
[2];
3625 intel_ddi_prepare_link_retrain(encoder
);
3627 /* Write the link configuration data */
3628 link_config
[0] = intel_dp
->link_bw
;
3629 link_config
[1] = intel_dp
->lane_count
;
3630 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
3631 link_config
[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN
;
3632 drm_dp_dpcd_write(&intel_dp
->aux
, DP_LINK_BW_SET
, link_config
, 2);
3633 if (intel_dp
->num_sink_rates
)
3634 drm_dp_dpcd_write(&intel_dp
->aux
, DP_LINK_RATE_SET
,
3635 &intel_dp
->rate_select
, 1);
3638 link_config
[1] = DP_SET_ANSI_8B10B
;
3639 drm_dp_dpcd_write(&intel_dp
->aux
, DP_DOWNSPREAD_CTRL
, link_config
, 2);
3643 /* clock recovery */
3644 if (!intel_dp_reset_link_train(intel_dp
, &DP
,
3645 DP_TRAINING_PATTERN_1
|
3646 DP_LINK_SCRAMBLING_DISABLE
)) {
3647 DRM_ERROR("failed to enable link training\n");
3655 uint8_t link_status
[DP_LINK_STATUS_SIZE
];
3657 drm_dp_link_train_clock_recovery_delay(intel_dp
->dpcd
);
3658 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
3659 DRM_ERROR("failed to get link status\n");
3663 if (drm_dp_clock_recovery_ok(link_status
, intel_dp
->lane_count
)) {
3664 DRM_DEBUG_KMS("clock recovery OK\n");
3669 * if we used previously trained voltage and pre-emphasis values
3670 * and we don't get clock recovery, reset link training values
3672 if (intel_dp
->train_set_valid
) {
3673 DRM_DEBUG_KMS("clock recovery not ok, reset");
3674 /* clear the flag as we are not reusing train set */
3675 intel_dp
->train_set_valid
= false;
3676 if (!intel_dp_reset_link_train(intel_dp
, &DP
,
3677 DP_TRAINING_PATTERN_1
|
3678 DP_LINK_SCRAMBLING_DISABLE
)) {
3679 DRM_ERROR("failed to enable link training\n");
3685 /* Check to see if we've tried the max voltage */
3686 for (i
= 0; i
< intel_dp
->lane_count
; i
++)
3687 if ((intel_dp
->train_set
[i
] & DP_TRAIN_MAX_SWING_REACHED
) == 0)
3689 if (i
== intel_dp
->lane_count
) {
3691 if (loop_tries
== 5) {
3692 DRM_ERROR("too many full retries, give up\n");
3695 intel_dp_reset_link_train(intel_dp
, &DP
,
3696 DP_TRAINING_PATTERN_1
|
3697 DP_LINK_SCRAMBLING_DISABLE
);
3702 /* Check to see if we've tried the same voltage 5 times */
3703 if ((intel_dp
->train_set
[0] & DP_TRAIN_VOLTAGE_SWING_MASK
) == voltage
) {
3705 if (voltage_tries
== 5) {
3706 DRM_ERROR("too many voltage retries, give up\n");
3711 voltage
= intel_dp
->train_set
[0] & DP_TRAIN_VOLTAGE_SWING_MASK
;
3713 /* Update training set as requested by target */
3714 if (!intel_dp_update_link_train(intel_dp
, &DP
, link_status
)) {
3715 DRM_ERROR("failed to update link training\n");
3724 intel_dp_complete_link_train(struct intel_dp
*intel_dp
)
3726 bool channel_eq
= false;
3727 int tries
, cr_tries
;
3728 uint32_t DP
= intel_dp
->DP
;
3729 uint32_t training_pattern
= DP_TRAINING_PATTERN_2
;
3731 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3732 if (intel_dp
->link_bw
== DP_LINK_BW_5_4
|| intel_dp
->use_tps3
)
3733 training_pattern
= DP_TRAINING_PATTERN_3
;
3735 /* channel equalization */
3736 if (!intel_dp_set_link_train(intel_dp
, &DP
,
3738 DP_LINK_SCRAMBLING_DISABLE
)) {
3739 DRM_ERROR("failed to start channel equalization\n");
3747 uint8_t link_status
[DP_LINK_STATUS_SIZE
];
3750 DRM_ERROR("failed to train DP, aborting\n");
3754 drm_dp_link_train_channel_eq_delay(intel_dp
->dpcd
);
3755 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
3756 DRM_ERROR("failed to get link status\n");
3760 /* Make sure clock is still ok */
3761 if (!drm_dp_clock_recovery_ok(link_status
, intel_dp
->lane_count
)) {
3762 intel_dp
->train_set_valid
= false;
3763 intel_dp_start_link_train(intel_dp
);
3764 intel_dp_set_link_train(intel_dp
, &DP
,
3766 DP_LINK_SCRAMBLING_DISABLE
);
3771 if (drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
)) {
3776 /* Try 5 times, then try clock recovery if that fails */
3778 intel_dp
->train_set_valid
= false;
3779 intel_dp_start_link_train(intel_dp
);
3780 intel_dp_set_link_train(intel_dp
, &DP
,
3782 DP_LINK_SCRAMBLING_DISABLE
);
3788 /* Update training set as requested by target */
3789 if (!intel_dp_update_link_train(intel_dp
, &DP
, link_status
)) {
3790 DRM_ERROR("failed to update link training\n");
3796 intel_dp_set_idle_link_train(intel_dp
);
3801 intel_dp
->train_set_valid
= true;
3802 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3806 void intel_dp_stop_link_train(struct intel_dp
*intel_dp
)
3808 intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
3809 DP_TRAINING_PATTERN_DISABLE
);
3813 intel_dp_link_down(struct intel_dp
*intel_dp
)
3815 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3816 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
3817 enum port port
= intel_dig_port
->port
;
3818 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3819 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3820 uint32_t DP
= intel_dp
->DP
;
3822 if (WARN_ON(HAS_DDI(dev
)))
3825 if (WARN_ON((I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
) == 0))
3828 DRM_DEBUG_KMS("\n");
3830 if ((IS_GEN7(dev
) && port
== PORT_A
) ||
3831 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
3832 DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3833 DP
|= DP_LINK_TRAIN_PAT_IDLE_CPT
;
3835 if (IS_CHERRYVIEW(dev
))
3836 DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
3838 DP
&= ~DP_LINK_TRAIN_MASK
;
3839 DP
|= DP_LINK_TRAIN_PAT_IDLE
;
3841 I915_WRITE(intel_dp
->output_reg
, DP
);
3842 POSTING_READ(intel_dp
->output_reg
);
3844 DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
3845 I915_WRITE(intel_dp
->output_reg
, DP
);
3846 POSTING_READ(intel_dp
->output_reg
);
3849 * HW workaround for IBX, we need to move the port
3850 * to transcoder A after disabling it to allow the
3851 * matching HDMI port to be enabled on transcoder A.
3853 if (HAS_PCH_IBX(dev
) && crtc
->pipe
== PIPE_B
&& port
!= PORT_A
) {
3854 /* always enable with pattern 1 (as per spec) */
3855 DP
&= ~(DP_PIPEB_SELECT
| DP_LINK_TRAIN_MASK
);
3856 DP
|= DP_PORT_EN
| DP_LINK_TRAIN_PAT_1
;
3857 I915_WRITE(intel_dp
->output_reg
, DP
);
3858 POSTING_READ(intel_dp
->output_reg
);
3861 I915_WRITE(intel_dp
->output_reg
, DP
);
3862 POSTING_READ(intel_dp
->output_reg
);
3865 msleep(intel_dp
->panel_power_down_delay
);
3869 intel_dp_get_dpcd(struct intel_dp
*intel_dp
)
3871 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3872 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
3873 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3876 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, 0x000, intel_dp
->dpcd
,
3877 sizeof(intel_dp
->dpcd
)) < 0)
3878 return false; /* aux transfer failed */
3880 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp
->dpcd
), intel_dp
->dpcd
);
3882 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0)
3883 return false; /* DPCD not present */
3885 /* Check if the panel supports PSR */
3886 memset(intel_dp
->psr_dpcd
, 0, sizeof(intel_dp
->psr_dpcd
));
3887 if (is_edp(intel_dp
)) {
3888 intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_PSR_SUPPORT
,
3890 sizeof(intel_dp
->psr_dpcd
));
3891 if (intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
) {
3892 dev_priv
->psr
.sink_support
= true;
3893 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3896 if (INTEL_INFO(dev
)->gen
>= 9 &&
3897 (intel_dp
->psr_dpcd
[0] & DP_PSR2_IS_SUPPORTED
)) {
3898 uint8_t frame_sync_cap
;
3900 dev_priv
->psr
.sink_support
= true;
3901 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3902 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP
,
3903 &frame_sync_cap
, 1);
3904 dev_priv
->psr
.aux_frame_sync
= frame_sync_cap
? true : false;
3905 /* PSR2 needs frame sync as well */
3906 dev_priv
->psr
.psr2_support
= dev_priv
->psr
.aux_frame_sync
;
3907 DRM_DEBUG_KMS("PSR2 %s on sink",
3908 dev_priv
->psr
.psr2_support
? "supported" : "not supported");
3912 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3913 * have support for TP3 hence that check is used along with dpcd check
3914 * to ensure TP3 can be enabled.
3915 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3916 * supported but still not enabled.
3918 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x12 &&
3919 intel_dp
->dpcd
[DP_MAX_LANE_COUNT
] & DP_TPS3_SUPPORTED
&&
3920 intel_dp_source_supports_hbr2(dev
)) {
3921 intel_dp
->use_tps3
= true;
3922 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3924 intel_dp
->use_tps3
= false;
3926 /* Intermediate frequency support */
3927 if (is_edp(intel_dp
) &&
3928 (intel_dp
->dpcd
[DP_EDP_CONFIGURATION_CAP
] & DP_DPCD_DISPLAY_CONTROL_CAPABLE
) &&
3929 (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_EDP_DPCD_REV
, &rev
, 1) == 1) &&
3930 (rev
>= 0x03)) { /* eDp v1.4 or higher */
3931 __le16 sink_rates
[DP_MAX_SUPPORTED_RATES
];
3934 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3935 DP_SUPPORTED_LINK_RATES
,
3937 sizeof(sink_rates
));
3939 for (i
= 0; i
< ARRAY_SIZE(sink_rates
); i
++) {
3940 int val
= le16_to_cpu(sink_rates
[i
]);
3945 /* Value read is in kHz while drm clock is saved in deca-kHz */
3946 intel_dp
->sink_rates
[i
] = (val
* 200) / 10;
3948 intel_dp
->num_sink_rates
= i
;
3951 intel_dp_print_rates(intel_dp
);
3953 if (!(intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
3954 DP_DWN_STRM_PORT_PRESENT
))
3955 return true; /* native DP sink */
3957 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0x10)
3958 return true; /* no per-port downstream info */
3960 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_DOWNSTREAM_PORT_0
,
3961 intel_dp
->downstream_ports
,
3962 DP_MAX_DOWNSTREAM_PORTS
) < 0)
3963 return false; /* downstream port status fetch failed */
3969 intel_dp_probe_oui(struct intel_dp
*intel_dp
)
3973 if (!(intel_dp
->dpcd
[DP_DOWN_STREAM_PORT_COUNT
] & DP_OUI_SUPPORT
))
3976 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_OUI
, buf
, 3) == 3)
3977 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3978 buf
[0], buf
[1], buf
[2]);
3980 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_BRANCH_OUI
, buf
, 3) == 3)
3981 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3982 buf
[0], buf
[1], buf
[2]);
3986 intel_dp_probe_mst(struct intel_dp
*intel_dp
)
3990 if (!intel_dp
->can_mst
)
3993 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x12)
3996 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_MSTM_CAP
, buf
, 1)) {
3997 if (buf
[0] & DP_MST_CAP
) {
3998 DRM_DEBUG_KMS("Sink is MST capable\n");
3999 intel_dp
->is_mst
= true;
4001 DRM_DEBUG_KMS("Sink is not MST capable\n");
4002 intel_dp
->is_mst
= false;
4006 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4007 return intel_dp
->is_mst
;
4010 static void intel_dp_sink_crc_stop(struct intel_dp
*intel_dp
)
4012 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4013 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4016 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0) {
4017 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4021 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
4022 buf
& ~DP_TEST_SINK_START
) < 0)
4023 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4025 hsw_enable_ips(intel_crtc
);
4028 static int intel_dp_sink_crc_start(struct intel_dp
*intel_dp
)
4030 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4031 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4034 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK_MISC
, &buf
) < 0)
4037 if (!(buf
& DP_TEST_CRC_SUPPORTED
))
4040 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0)
4043 hsw_disable_ips(intel_crtc
);
4045 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
4046 buf
| DP_TEST_SINK_START
) < 0) {
4047 hsw_enable_ips(intel_crtc
);
4054 int intel_dp_sink_crc(struct intel_dp
*intel_dp
, u8
*crc
)
4056 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4057 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
4058 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4064 ret
= intel_dp_sink_crc_start(intel_dp
);
4068 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK_MISC
, &buf
) < 0) {
4073 test_crc_count
= buf
& DP_TEST_COUNT_MASK
;
4076 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
4077 DP_TEST_SINK_MISC
, &buf
) < 0) {
4081 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
4082 } while (--attempts
&& (buf
& DP_TEST_COUNT_MASK
) == test_crc_count
);
4084 if (attempts
== 0) {
4085 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4090 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_CRC_R_CR
, crc
, 6) < 0)
4093 intel_dp_sink_crc_stop(intel_dp
);
4098 intel_dp_get_sink_irq(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4100 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4101 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4102 sink_irq_vector
, 1) == 1;
4106 intel_dp_get_sink_irq_esi(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4110 ret
= intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4112 sink_irq_vector
, 14);
4119 static uint8_t intel_dp_autotest_link_training(struct intel_dp
*intel_dp
)
4121 uint8_t test_result
= DP_TEST_ACK
;
4125 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp
*intel_dp
)
4127 uint8_t test_result
= DP_TEST_NAK
;
4131 static uint8_t intel_dp_autotest_edid(struct intel_dp
*intel_dp
)
4133 uint8_t test_result
= DP_TEST_NAK
;
4134 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4135 struct drm_connector
*connector
= &intel_connector
->base
;
4137 if (intel_connector
->detect_edid
== NULL
||
4138 connector
->edid_corrupt
||
4139 intel_dp
->aux
.i2c_defer_count
> 6) {
4140 /* Check EDID read for NACKs, DEFERs and corruption
4141 * (DP CTS 1.2 Core r1.1)
4142 * 4.2.2.4 : Failed EDID read, I2C_NAK
4143 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4144 * 4.2.2.6 : EDID corruption detected
4145 * Use failsafe mode for all cases
4147 if (intel_dp
->aux
.i2c_nack_count
> 0 ||
4148 intel_dp
->aux
.i2c_defer_count
> 0)
4149 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4150 intel_dp
->aux
.i2c_nack_count
,
4151 intel_dp
->aux
.i2c_defer_count
);
4152 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_FAILSAFE
;
4154 struct edid
*block
= intel_connector
->detect_edid
;
4156 /* We have to write the checksum
4157 * of the last block read
4159 block
+= intel_connector
->detect_edid
->extensions
;
4161 if (!drm_dp_dpcd_write(&intel_dp
->aux
,
4162 DP_TEST_EDID_CHECKSUM
,
4165 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4167 test_result
= DP_TEST_ACK
| DP_TEST_EDID_CHECKSUM_WRITE
;
4168 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_STANDARD
;
4171 /* Set test active flag here so userspace doesn't interrupt things */
4172 intel_dp
->compliance_test_active
= 1;
4177 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp
*intel_dp
)
4179 uint8_t test_result
= DP_TEST_NAK
;
4183 static void intel_dp_handle_test_request(struct intel_dp
*intel_dp
)
4185 uint8_t response
= DP_TEST_NAK
;
4189 intel_dp
->compliance_test_active
= 0;
4190 intel_dp
->compliance_test_type
= 0;
4191 intel_dp
->compliance_test_data
= 0;
4193 intel_dp
->aux
.i2c_nack_count
= 0;
4194 intel_dp
->aux
.i2c_defer_count
= 0;
4196 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_REQUEST
, &rxdata
, 1);
4198 DRM_DEBUG_KMS("Could not read test request from sink\n");
4203 case DP_TEST_LINK_TRAINING
:
4204 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4205 intel_dp
->compliance_test_type
= DP_TEST_LINK_TRAINING
;
4206 response
= intel_dp_autotest_link_training(intel_dp
);
4208 case DP_TEST_LINK_VIDEO_PATTERN
:
4209 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4210 intel_dp
->compliance_test_type
= DP_TEST_LINK_VIDEO_PATTERN
;
4211 response
= intel_dp_autotest_video_pattern(intel_dp
);
4213 case DP_TEST_LINK_EDID_READ
:
4214 DRM_DEBUG_KMS("EDID test requested\n");
4215 intel_dp
->compliance_test_type
= DP_TEST_LINK_EDID_READ
;
4216 response
= intel_dp_autotest_edid(intel_dp
);
4218 case DP_TEST_LINK_PHY_TEST_PATTERN
:
4219 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4220 intel_dp
->compliance_test_type
= DP_TEST_LINK_PHY_TEST_PATTERN
;
4221 response
= intel_dp_autotest_phy_pattern(intel_dp
);
4224 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata
);
4229 status
= drm_dp_dpcd_write(&intel_dp
->aux
,
4233 DRM_DEBUG_KMS("Could not write test response to sink\n");
4237 intel_dp_check_mst_status(struct intel_dp
*intel_dp
)
4241 if (intel_dp
->is_mst
) {
4246 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4250 /* check link status - esi[10] = 0x200c */
4251 if (intel_dp
->active_mst_links
&& !drm_dp_channel_eq_ok(&esi
[10], intel_dp
->lane_count
)) {
4252 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4253 intel_dp_start_link_train(intel_dp
);
4254 intel_dp_complete_link_train(intel_dp
);
4255 intel_dp_stop_link_train(intel_dp
);
4258 DRM_DEBUG_KMS("got esi %3ph\n", esi
);
4259 ret
= drm_dp_mst_hpd_irq(&intel_dp
->mst_mgr
, esi
, &handled
);
4262 for (retry
= 0; retry
< 3; retry
++) {
4264 wret
= drm_dp_dpcd_write(&intel_dp
->aux
,
4265 DP_SINK_COUNT_ESI
+1,
4272 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4274 DRM_DEBUG_KMS("got esi2 %3ph\n", esi
);
4282 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4283 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4284 intel_dp
->is_mst
= false;
4285 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4286 /* send a hotplug event */
4287 drm_kms_helper_hotplug_event(intel_dig_port
->base
.base
.dev
);
4294 * According to DP spec
4297 * 2. Configure link according to Receiver Capabilities
4298 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4299 * 4. Check link status on receipt of hot-plug interrupt
4302 intel_dp_check_link_status(struct intel_dp
*intel_dp
)
4304 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4305 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4307 u8 link_status
[DP_LINK_STATUS_SIZE
];
4309 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
4311 if (!intel_encoder
->base
.crtc
)
4314 if (!to_intel_crtc(intel_encoder
->base
.crtc
)->active
)
4317 /* Try to read receiver status if the link appears to be up */
4318 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
4322 /* Now read the DPCD to see if it's actually running */
4323 if (!intel_dp_get_dpcd(intel_dp
)) {
4327 /* Try to read the source of the interrupt */
4328 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4329 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4330 /* Clear interrupt source */
4331 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4332 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4335 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4336 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4337 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4338 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4341 if (!drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
)) {
4342 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4343 intel_encoder
->base
.name
);
4344 intel_dp_start_link_train(intel_dp
);
4345 intel_dp_complete_link_train(intel_dp
);
4346 intel_dp_stop_link_train(intel_dp
);
4350 /* XXX this is probably wrong for multiple downstream ports */
4351 static enum drm_connector_status
4352 intel_dp_detect_dpcd(struct intel_dp
*intel_dp
)
4354 uint8_t *dpcd
= intel_dp
->dpcd
;
4357 if (!intel_dp_get_dpcd(intel_dp
))
4358 return connector_status_disconnected
;
4360 /* if there's no downstream port, we're done */
4361 if (!(dpcd
[DP_DOWNSTREAMPORT_PRESENT
] & DP_DWN_STRM_PORT_PRESENT
))
4362 return connector_status_connected
;
4364 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4365 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4366 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
) {
4369 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_COUNT
,
4371 return connector_status_unknown
;
4373 return DP_GET_SINK_COUNT(reg
) ? connector_status_connected
4374 : connector_status_disconnected
;
4377 /* If no HPD, poke DDC gently */
4378 if (drm_probe_ddc(&intel_dp
->aux
.ddc
))
4379 return connector_status_connected
;
4381 /* Well we tried, say unknown for unreliable port types */
4382 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11) {
4383 type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
4384 if (type
== DP_DS_PORT_TYPE_VGA
||
4385 type
== DP_DS_PORT_TYPE_NON_EDID
)
4386 return connector_status_unknown
;
4388 type
= intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
4389 DP_DWN_STRM_PORT_TYPE_MASK
;
4390 if (type
== DP_DWN_STRM_PORT_TYPE_ANALOG
||
4391 type
== DP_DWN_STRM_PORT_TYPE_OTHER
)
4392 return connector_status_unknown
;
4395 /* Anything else is out of spec, warn and ignore */
4396 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4397 return connector_status_disconnected
;
4400 static enum drm_connector_status
4401 edp_detect(struct intel_dp
*intel_dp
)
4403 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4404 enum drm_connector_status status
;
4406 status
= intel_panel_detect(dev
);
4407 if (status
== connector_status_unknown
)
4408 status
= connector_status_connected
;
4413 static enum drm_connector_status
4414 ironlake_dp_detect(struct intel_dp
*intel_dp
)
4416 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4417 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4418 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4420 if (!ibx_digital_port_connected(dev_priv
, intel_dig_port
))
4421 return connector_status_disconnected
;
4423 return intel_dp_detect_dpcd(intel_dp
);
4426 static int g4x_digital_port_connected(struct drm_device
*dev
,
4427 struct intel_digital_port
*intel_dig_port
)
4429 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4432 if (IS_VALLEYVIEW(dev
)) {
4433 switch (intel_dig_port
->port
) {
4435 bit
= PORTB_HOTPLUG_LIVE_STATUS_VLV
;
4438 bit
= PORTC_HOTPLUG_LIVE_STATUS_VLV
;
4441 bit
= PORTD_HOTPLUG_LIVE_STATUS_VLV
;
4447 switch (intel_dig_port
->port
) {
4449 bit
= PORTB_HOTPLUG_LIVE_STATUS_G4X
;
4452 bit
= PORTC_HOTPLUG_LIVE_STATUS_G4X
;
4455 bit
= PORTD_HOTPLUG_LIVE_STATUS_G4X
;
4462 if ((I915_READ(PORT_HOTPLUG_STAT
) & bit
) == 0)
4467 static enum drm_connector_status
4468 g4x_dp_detect(struct intel_dp
*intel_dp
)
4470 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4471 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4474 /* Can't disconnect eDP, but you can close the lid... */
4475 if (is_edp(intel_dp
)) {
4476 enum drm_connector_status status
;
4478 status
= intel_panel_detect(dev
);
4479 if (status
== connector_status_unknown
)
4480 status
= connector_status_connected
;
4484 ret
= g4x_digital_port_connected(dev
, intel_dig_port
);
4486 return connector_status_unknown
;
4488 return connector_status_disconnected
;
4490 return intel_dp_detect_dpcd(intel_dp
);
4493 static struct edid
*
4494 intel_dp_get_edid(struct intel_dp
*intel_dp
)
4496 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4498 /* use cached edid if we have one */
4499 if (intel_connector
->edid
) {
4501 if (IS_ERR(intel_connector
->edid
))
4504 return drm_edid_duplicate(intel_connector
->edid
);
4506 return drm_get_edid(&intel_connector
->base
,
4507 &intel_dp
->aux
.ddc
);
4511 intel_dp_set_edid(struct intel_dp
*intel_dp
)
4513 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4516 edid
= intel_dp_get_edid(intel_dp
);
4517 intel_connector
->detect_edid
= edid
;
4519 if (intel_dp
->force_audio
!= HDMI_AUDIO_AUTO
)
4520 intel_dp
->has_audio
= intel_dp
->force_audio
== HDMI_AUDIO_ON
;
4522 intel_dp
->has_audio
= drm_detect_monitor_audio(edid
);
4526 intel_dp_unset_edid(struct intel_dp
*intel_dp
)
4528 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4530 kfree(intel_connector
->detect_edid
);
4531 intel_connector
->detect_edid
= NULL
;
4533 intel_dp
->has_audio
= false;
4536 static enum intel_display_power_domain
4537 intel_dp_power_get(struct intel_dp
*dp
)
4539 struct intel_encoder
*encoder
= &dp_to_dig_port(dp
)->base
;
4540 enum intel_display_power_domain power_domain
;
4542 power_domain
= intel_display_port_power_domain(encoder
);
4543 intel_display_power_get(to_i915(encoder
->base
.dev
), power_domain
);
4545 return power_domain
;
4549 intel_dp_power_put(struct intel_dp
*dp
,
4550 enum intel_display_power_domain power_domain
)
4552 struct intel_encoder
*encoder
= &dp_to_dig_port(dp
)->base
;
4553 intel_display_power_put(to_i915(encoder
->base
.dev
), power_domain
);
4556 static enum drm_connector_status
4557 intel_dp_detect(struct drm_connector
*connector
, bool force
)
4559 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4560 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4561 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4562 struct drm_device
*dev
= connector
->dev
;
4563 enum drm_connector_status status
;
4564 enum intel_display_power_domain power_domain
;
4568 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4569 connector
->base
.id
, connector
->name
);
4570 intel_dp_unset_edid(intel_dp
);
4572 if (intel_dp
->is_mst
) {
4573 /* MST devices are disconnected from a monitor POV */
4574 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4575 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4576 return connector_status_disconnected
;
4579 power_domain
= intel_dp_power_get(intel_dp
);
4581 /* Can't disconnect eDP, but you can close the lid... */
4582 if (is_edp(intel_dp
))
4583 status
= edp_detect(intel_dp
);
4584 else if (HAS_PCH_SPLIT(dev
))
4585 status
= ironlake_dp_detect(intel_dp
);
4587 status
= g4x_dp_detect(intel_dp
);
4588 if (status
!= connector_status_connected
)
4591 intel_dp_probe_oui(intel_dp
);
4593 ret
= intel_dp_probe_mst(intel_dp
);
4595 /* if we are in MST mode then this connector
4596 won't appear connected or have anything with EDID on it */
4597 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4598 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4599 status
= connector_status_disconnected
;
4603 intel_dp_set_edid(intel_dp
);
4605 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4606 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4607 status
= connector_status_connected
;
4609 /* Try to read the source of the interrupt */
4610 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4611 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4612 /* Clear interrupt source */
4613 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4614 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4617 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4618 intel_dp_handle_test_request(intel_dp
);
4619 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4620 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4624 intel_dp_power_put(intel_dp
, power_domain
);
4629 intel_dp_force(struct drm_connector
*connector
)
4631 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4632 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4633 enum intel_display_power_domain power_domain
;
4635 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4636 connector
->base
.id
, connector
->name
);
4637 intel_dp_unset_edid(intel_dp
);
4639 if (connector
->status
!= connector_status_connected
)
4642 power_domain
= intel_dp_power_get(intel_dp
);
4644 intel_dp_set_edid(intel_dp
);
4646 intel_dp_power_put(intel_dp
, power_domain
);
4648 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4649 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4652 static int intel_dp_get_modes(struct drm_connector
*connector
)
4654 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4657 edid
= intel_connector
->detect_edid
;
4659 int ret
= intel_connector_update_modes(connector
, edid
);
4664 /* if eDP has no EDID, fall back to fixed mode */
4665 if (is_edp(intel_attached_dp(connector
)) &&
4666 intel_connector
->panel
.fixed_mode
) {
4667 struct drm_display_mode
*mode
;
4669 mode
= drm_mode_duplicate(connector
->dev
,
4670 intel_connector
->panel
.fixed_mode
);
4672 drm_mode_probed_add(connector
, mode
);
4681 intel_dp_detect_audio(struct drm_connector
*connector
)
4683 bool has_audio
= false;
4686 edid
= to_intel_connector(connector
)->detect_edid
;
4688 has_audio
= drm_detect_monitor_audio(edid
);
4694 intel_dp_set_property(struct drm_connector
*connector
,
4695 struct drm_property
*property
,
4698 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
4699 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4700 struct intel_encoder
*intel_encoder
= intel_attached_encoder(connector
);
4701 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4704 ret
= drm_object_property_set_value(&connector
->base
, property
, val
);
4708 if (property
== dev_priv
->force_audio_property
) {
4712 if (i
== intel_dp
->force_audio
)
4715 intel_dp
->force_audio
= i
;
4717 if (i
== HDMI_AUDIO_AUTO
)
4718 has_audio
= intel_dp_detect_audio(connector
);
4720 has_audio
= (i
== HDMI_AUDIO_ON
);
4722 if (has_audio
== intel_dp
->has_audio
)
4725 intel_dp
->has_audio
= has_audio
;
4729 if (property
== dev_priv
->broadcast_rgb_property
) {
4730 bool old_auto
= intel_dp
->color_range_auto
;
4731 uint32_t old_range
= intel_dp
->color_range
;
4734 case INTEL_BROADCAST_RGB_AUTO
:
4735 intel_dp
->color_range_auto
= true;
4737 case INTEL_BROADCAST_RGB_FULL
:
4738 intel_dp
->color_range_auto
= false;
4739 intel_dp
->color_range
= 0;
4741 case INTEL_BROADCAST_RGB_LIMITED
:
4742 intel_dp
->color_range_auto
= false;
4743 intel_dp
->color_range
= DP_COLOR_RANGE_16_235
;
4749 if (old_auto
== intel_dp
->color_range_auto
&&
4750 old_range
== intel_dp
->color_range
)
4756 if (is_edp(intel_dp
) &&
4757 property
== connector
->dev
->mode_config
.scaling_mode_property
) {
4758 if (val
== DRM_MODE_SCALE_NONE
) {
4759 DRM_DEBUG_KMS("no scaling not supported\n");
4763 if (intel_connector
->panel
.fitting_mode
== val
) {
4764 /* the eDP scaling property is not changed */
4767 intel_connector
->panel
.fitting_mode
= val
;
4775 if (intel_encoder
->base
.crtc
)
4776 intel_crtc_restore_mode(intel_encoder
->base
.crtc
);
4782 intel_dp_connector_destroy(struct drm_connector
*connector
)
4784 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4786 kfree(intel_connector
->detect_edid
);
4788 if (!IS_ERR_OR_NULL(intel_connector
->edid
))
4789 kfree(intel_connector
->edid
);
4791 /* Can't call is_edp() since the encoder may have been destroyed
4793 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4794 intel_panel_fini(&intel_connector
->panel
);
4796 drm_connector_cleanup(connector
);
4800 void intel_dp_encoder_destroy(struct drm_encoder
*encoder
)
4802 struct intel_digital_port
*intel_dig_port
= enc_to_dig_port(encoder
);
4803 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4805 drm_dp_aux_unregister(&intel_dp
->aux
);
4806 intel_dp_mst_encoder_cleanup(intel_dig_port
);
4807 if (is_edp(intel_dp
)) {
4808 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4810 * vdd might still be enabled do to the delayed vdd off.
4811 * Make sure vdd is actually turned off here.
4814 edp_panel_vdd_off_sync(intel_dp
);
4815 pps_unlock(intel_dp
);
4817 if (intel_dp
->edp_notifier
.notifier_call
) {
4818 unregister_reboot_notifier(&intel_dp
->edp_notifier
);
4819 intel_dp
->edp_notifier
.notifier_call
= NULL
;
4822 drm_encoder_cleanup(encoder
);
4823 kfree(intel_dig_port
);
4826 static void intel_dp_encoder_suspend(struct intel_encoder
*intel_encoder
)
4828 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4830 if (!is_edp(intel_dp
))
4834 * vdd might still be enabled do to the delayed vdd off.
4835 * Make sure vdd is actually turned off here.
4837 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4839 edp_panel_vdd_off_sync(intel_dp
);
4840 pps_unlock(intel_dp
);
4843 static void intel_edp_panel_vdd_sanitize(struct intel_dp
*intel_dp
)
4845 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4846 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4847 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4848 enum intel_display_power_domain power_domain
;
4850 lockdep_assert_held(&dev_priv
->pps_mutex
);
4852 if (!edp_have_panel_vdd(intel_dp
))
4856 * The VDD bit needs a power domain reference, so if the bit is
4857 * already enabled when we boot or resume, grab this reference and
4858 * schedule a vdd off, so we don't hold on to the reference
4861 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4862 power_domain
= intel_display_port_power_domain(&intel_dig_port
->base
);
4863 intel_display_power_get(dev_priv
, power_domain
);
4865 edp_panel_vdd_schedule_off(intel_dp
);
4868 static void intel_dp_encoder_reset(struct drm_encoder
*encoder
)
4870 struct intel_dp
*intel_dp
;
4872 if (to_intel_encoder(encoder
)->type
!= INTEL_OUTPUT_EDP
)
4875 intel_dp
= enc_to_intel_dp(encoder
);
4880 * Read out the current power sequencer assignment,
4881 * in case the BIOS did something with it.
4883 if (IS_VALLEYVIEW(encoder
->dev
))
4884 vlv_initial_power_sequencer_setup(intel_dp
);
4886 intel_edp_panel_vdd_sanitize(intel_dp
);
4888 pps_unlock(intel_dp
);
4891 static const struct drm_connector_funcs intel_dp_connector_funcs
= {
4892 .dpms
= drm_atomic_helper_connector_dpms
,
4893 .detect
= intel_dp_detect
,
4894 .force
= intel_dp_force
,
4895 .fill_modes
= drm_helper_probe_single_connector_modes
,
4896 .set_property
= intel_dp_set_property
,
4897 .atomic_get_property
= intel_connector_atomic_get_property
,
4898 .destroy
= intel_dp_connector_destroy
,
4899 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4900 .atomic_duplicate_state
= drm_atomic_helper_connector_duplicate_state
,
4903 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs
= {
4904 .get_modes
= intel_dp_get_modes
,
4905 .mode_valid
= intel_dp_mode_valid
,
4906 .best_encoder
= intel_best_encoder
,
4909 static const struct drm_encoder_funcs intel_dp_enc_funcs
= {
4910 .reset
= intel_dp_encoder_reset
,
4911 .destroy
= intel_dp_encoder_destroy
,
4915 intel_dp_hpd_pulse(struct intel_digital_port
*intel_dig_port
, bool long_hpd
)
4917 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4918 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4919 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4920 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4921 enum intel_display_power_domain power_domain
;
4922 enum irqreturn ret
= IRQ_NONE
;
4924 if (intel_dig_port
->base
.type
!= INTEL_OUTPUT_EDP
)
4925 intel_dig_port
->base
.type
= INTEL_OUTPUT_DISPLAYPORT
;
4927 if (long_hpd
&& intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
) {
4929 * vdd off can generate a long pulse on eDP which
4930 * would require vdd on to handle it, and thus we
4931 * would end up in an endless cycle of
4932 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4934 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4935 port_name(intel_dig_port
->port
));
4939 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4940 port_name(intel_dig_port
->port
),
4941 long_hpd
? "long" : "short");
4943 power_domain
= intel_display_port_power_domain(intel_encoder
);
4944 intel_display_power_get(dev_priv
, power_domain
);
4947 /* indicate that we need to restart link training */
4948 intel_dp
->train_set_valid
= false;
4950 if (HAS_PCH_SPLIT(dev
)) {
4951 if (!ibx_digital_port_connected(dev_priv
, intel_dig_port
))
4954 if (g4x_digital_port_connected(dev
, intel_dig_port
) != 1)
4958 if (!intel_dp_get_dpcd(intel_dp
)) {
4962 intel_dp_probe_oui(intel_dp
);
4964 if (!intel_dp_probe_mst(intel_dp
)) {
4965 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
4966 intel_dp_check_link_status(intel_dp
);
4967 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
4971 if (intel_dp
->is_mst
) {
4972 if (intel_dp_check_mst_status(intel_dp
) == -EINVAL
)
4976 if (!intel_dp
->is_mst
) {
4977 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
4978 intel_dp_check_link_status(intel_dp
);
4979 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
4987 /* if we were in MST mode, and device is not there get out of MST mode */
4988 if (intel_dp
->is_mst
) {
4989 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp
->is_mst
, intel_dp
->mst_mgr
.mst_state
);
4990 intel_dp
->is_mst
= false;
4991 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4994 intel_display_power_put(dev_priv
, power_domain
);
4999 /* Return which DP Port should be selected for Transcoder DP control */
5001 intel_trans_dp_port_sel(struct drm_crtc
*crtc
)
5003 struct drm_device
*dev
= crtc
->dev
;
5004 struct intel_encoder
*intel_encoder
;
5005 struct intel_dp
*intel_dp
;
5007 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
) {
5008 intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
5010 if (intel_encoder
->type
== INTEL_OUTPUT_DISPLAYPORT
||
5011 intel_encoder
->type
== INTEL_OUTPUT_EDP
)
5012 return intel_dp
->output_reg
;
5018 /* check the VBT to see whether the eDP is on another port */
5019 bool intel_dp_is_edp(struct drm_device
*dev
, enum port port
)
5021 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5022 union child_device_config
*p_child
;
5024 static const short port_mapping
[] = {
5025 [PORT_B
] = DVO_PORT_DPB
,
5026 [PORT_C
] = DVO_PORT_DPC
,
5027 [PORT_D
] = DVO_PORT_DPD
,
5028 [PORT_E
] = DVO_PORT_DPE
,
5034 if (!dev_priv
->vbt
.child_dev_num
)
5037 for (i
= 0; i
< dev_priv
->vbt
.child_dev_num
; i
++) {
5038 p_child
= dev_priv
->vbt
.child_dev
+ i
;
5040 if (p_child
->common
.dvo_port
== port_mapping
[port
] &&
5041 (p_child
->common
.device_type
& DEVICE_TYPE_eDP_BITS
) ==
5042 (DEVICE_TYPE_eDP
& DEVICE_TYPE_eDP_BITS
))
5049 intel_dp_add_properties(struct intel_dp
*intel_dp
, struct drm_connector
*connector
)
5051 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
5053 intel_attach_force_audio_property(connector
);
5054 intel_attach_broadcast_rgb_property(connector
);
5055 intel_dp
->color_range_auto
= true;
5057 if (is_edp(intel_dp
)) {
5058 drm_mode_create_scaling_mode_property(connector
->dev
);
5059 drm_object_attach_property(
5061 connector
->dev
->mode_config
.scaling_mode_property
,
5062 DRM_MODE_SCALE_ASPECT
);
5063 intel_connector
->panel
.fitting_mode
= DRM_MODE_SCALE_ASPECT
;
5067 static void intel_dp_init_panel_power_timestamps(struct intel_dp
*intel_dp
)
5069 intel_dp
->last_power_cycle
= jiffies
;
5070 intel_dp
->last_power_on
= jiffies
;
5071 intel_dp
->last_backlight_off
= jiffies
;
5075 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
5076 struct intel_dp
*intel_dp
)
5078 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5079 struct edp_power_seq cur
, vbt
, spec
,
5080 *final
= &intel_dp
->pps_delays
;
5081 u32 pp_on
, pp_off
, pp_div
= 0, pp_ctl
= 0;
5082 int pp_ctrl_reg
, pp_on_reg
, pp_off_reg
, pp_div_reg
= 0;
5084 lockdep_assert_held(&dev_priv
->pps_mutex
);
5086 /* already initialized? */
5087 if (final
->t11_t12
!= 0)
5090 if (IS_BROXTON(dev
)) {
5092 * TODO: BXT has 2 sets of PPS registers.
5093 * Correct Register for Broxton need to be identified
5094 * using VBT. hardcoding for now
5096 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5097 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5098 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5099 } else if (HAS_PCH_SPLIT(dev
)) {
5100 pp_ctrl_reg
= PCH_PP_CONTROL
;
5101 pp_on_reg
= PCH_PP_ON_DELAYS
;
5102 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5103 pp_div_reg
= PCH_PP_DIVISOR
;
5105 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5107 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
5108 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5109 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5110 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5113 /* Workaround: Need to write PP_CONTROL with the unlock key as
5114 * the very first thing. */
5115 pp_ctl
= ironlake_get_pp_control(intel_dp
);
5117 pp_on
= I915_READ(pp_on_reg
);
5118 pp_off
= I915_READ(pp_off_reg
);
5119 if (!IS_BROXTON(dev
)) {
5120 I915_WRITE(pp_ctrl_reg
, pp_ctl
);
5121 pp_div
= I915_READ(pp_div_reg
);
5124 /* Pull timing values out of registers */
5125 cur
.t1_t3
= (pp_on
& PANEL_POWER_UP_DELAY_MASK
) >>
5126 PANEL_POWER_UP_DELAY_SHIFT
;
5128 cur
.t8
= (pp_on
& PANEL_LIGHT_ON_DELAY_MASK
) >>
5129 PANEL_LIGHT_ON_DELAY_SHIFT
;
5131 cur
.t9
= (pp_off
& PANEL_LIGHT_OFF_DELAY_MASK
) >>
5132 PANEL_LIGHT_OFF_DELAY_SHIFT
;
5134 cur
.t10
= (pp_off
& PANEL_POWER_DOWN_DELAY_MASK
) >>
5135 PANEL_POWER_DOWN_DELAY_SHIFT
;
5137 if (IS_BROXTON(dev
)) {
5138 u16 tmp
= (pp_ctl
& BXT_POWER_CYCLE_DELAY_MASK
) >>
5139 BXT_POWER_CYCLE_DELAY_SHIFT
;
5141 cur
.t11_t12
= (tmp
- 1) * 1000;
5145 cur
.t11_t12
= ((pp_div
& PANEL_POWER_CYCLE_DELAY_MASK
) >>
5146 PANEL_POWER_CYCLE_DELAY_SHIFT
) * 1000;
5149 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5150 cur
.t1_t3
, cur
.t8
, cur
.t9
, cur
.t10
, cur
.t11_t12
);
5152 vbt
= dev_priv
->vbt
.edp_pps
;
5154 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5155 * our hw here, which are all in 100usec. */
5156 spec
.t1_t3
= 210 * 10;
5157 spec
.t8
= 50 * 10; /* no limit for t8, use t7 instead */
5158 spec
.t9
= 50 * 10; /* no limit for t9, make it symmetric with t8 */
5159 spec
.t10
= 500 * 10;
5160 /* This one is special and actually in units of 100ms, but zero
5161 * based in the hw (so we need to add 100 ms). But the sw vbt
5162 * table multiplies it with 1000 to make it in units of 100usec,
5164 spec
.t11_t12
= (510 + 100) * 10;
5166 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5167 vbt
.t1_t3
, vbt
.t8
, vbt
.t9
, vbt
.t10
, vbt
.t11_t12
);
5169 /* Use the max of the register settings and vbt. If both are
5170 * unset, fall back to the spec limits. */
5171 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5173 max(cur.field, vbt.field))
5174 assign_final(t1_t3
);
5178 assign_final(t11_t12
);
5181 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5182 intel_dp
->panel_power_up_delay
= get_delay(t1_t3
);
5183 intel_dp
->backlight_on_delay
= get_delay(t8
);
5184 intel_dp
->backlight_off_delay
= get_delay(t9
);
5185 intel_dp
->panel_power_down_delay
= get_delay(t10
);
5186 intel_dp
->panel_power_cycle_delay
= get_delay(t11_t12
);
5189 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5190 intel_dp
->panel_power_up_delay
, intel_dp
->panel_power_down_delay
,
5191 intel_dp
->panel_power_cycle_delay
);
5193 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5194 intel_dp
->backlight_on_delay
, intel_dp
->backlight_off_delay
);
5198 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
5199 struct intel_dp
*intel_dp
)
5201 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5202 u32 pp_on
, pp_off
, pp_div
, port_sel
= 0;
5203 int div
= HAS_PCH_SPLIT(dev
) ? intel_pch_rawclk(dev
) : intel_hrawclk(dev
);
5204 int pp_on_reg
, pp_off_reg
, pp_div_reg
= 0, pp_ctrl_reg
;
5205 enum port port
= dp_to_dig_port(intel_dp
)->port
;
5206 const struct edp_power_seq
*seq
= &intel_dp
->pps_delays
;
5208 lockdep_assert_held(&dev_priv
->pps_mutex
);
5210 if (IS_BROXTON(dev
)) {
5212 * TODO: BXT has 2 sets of PPS registers.
5213 * Correct Register for Broxton need to be identified
5214 * using VBT. hardcoding for now
5216 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5217 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5218 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5220 } else if (HAS_PCH_SPLIT(dev
)) {
5221 pp_on_reg
= PCH_PP_ON_DELAYS
;
5222 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5223 pp_div_reg
= PCH_PP_DIVISOR
;
5225 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5227 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5228 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5229 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5233 * And finally store the new values in the power sequencer. The
5234 * backlight delays are set to 1 because we do manual waits on them. For
5235 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5236 * we'll end up waiting for the backlight off delay twice: once when we
5237 * do the manual sleep, and once when we disable the panel and wait for
5238 * the PP_STATUS bit to become zero.
5240 pp_on
= (seq
->t1_t3
<< PANEL_POWER_UP_DELAY_SHIFT
) |
5241 (1 << PANEL_LIGHT_ON_DELAY_SHIFT
);
5242 pp_off
= (1 << PANEL_LIGHT_OFF_DELAY_SHIFT
) |
5243 (seq
->t10
<< PANEL_POWER_DOWN_DELAY_SHIFT
);
5244 /* Compute the divisor for the pp clock, simply match the Bspec
5246 if (IS_BROXTON(dev
)) {
5247 pp_div
= I915_READ(pp_ctrl_reg
);
5248 pp_div
&= ~BXT_POWER_CYCLE_DELAY_MASK
;
5249 pp_div
|= (DIV_ROUND_UP((seq
->t11_t12
+ 1), 1000)
5250 << BXT_POWER_CYCLE_DELAY_SHIFT
);
5252 pp_div
= ((100 * div
)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT
;
5253 pp_div
|= (DIV_ROUND_UP(seq
->t11_t12
, 1000)
5254 << PANEL_POWER_CYCLE_DELAY_SHIFT
);
5257 /* Haswell doesn't have any port selection bits for the panel
5258 * power sequencer any more. */
5259 if (IS_VALLEYVIEW(dev
)) {
5260 port_sel
= PANEL_PORT_SELECT_VLV(port
);
5261 } else if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
)) {
5263 port_sel
= PANEL_PORT_SELECT_DPA
;
5265 port_sel
= PANEL_PORT_SELECT_DPD
;
5270 I915_WRITE(pp_on_reg
, pp_on
);
5271 I915_WRITE(pp_off_reg
, pp_off
);
5272 if (IS_BROXTON(dev
))
5273 I915_WRITE(pp_ctrl_reg
, pp_div
);
5275 I915_WRITE(pp_div_reg
, pp_div
);
5277 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5278 I915_READ(pp_on_reg
),
5279 I915_READ(pp_off_reg
),
5281 (I915_READ(pp_ctrl_reg
) & BXT_POWER_CYCLE_DELAY_MASK
) :
5282 I915_READ(pp_div_reg
));
5286 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5288 * @refresh_rate: RR to be programmed
5290 * This function gets called when refresh rate (RR) has to be changed from
5291 * one frequency to another. Switches can be between high and low RR
5292 * supported by the panel or to any other RR based on media playback (in
5293 * this case, RR value needs to be passed from user space).
5295 * The caller of this function needs to take a lock on dev_priv->drrs.
5297 static void intel_dp_set_drrs_state(struct drm_device
*dev
, int refresh_rate
)
5299 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5300 struct intel_encoder
*encoder
;
5301 struct intel_digital_port
*dig_port
= NULL
;
5302 struct intel_dp
*intel_dp
= dev_priv
->drrs
.dp
;
5303 struct intel_crtc_state
*config
= NULL
;
5304 struct intel_crtc
*intel_crtc
= NULL
;
5306 enum drrs_refresh_rate_type index
= DRRS_HIGH_RR
;
5308 if (refresh_rate
<= 0) {
5309 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5313 if (intel_dp
== NULL
) {
5314 DRM_DEBUG_KMS("DRRS not supported.\n");
5319 * FIXME: This needs proper synchronization with psr state for some
5320 * platforms that cannot have PSR and DRRS enabled at the same time.
5323 dig_port
= dp_to_dig_port(intel_dp
);
5324 encoder
= &dig_port
->base
;
5325 intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
5328 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5332 config
= intel_crtc
->config
;
5334 if (dev_priv
->drrs
.type
< SEAMLESS_DRRS_SUPPORT
) {
5335 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5339 if (intel_dp
->attached_connector
->panel
.downclock_mode
->vrefresh
==
5341 index
= DRRS_LOW_RR
;
5343 if (index
== dev_priv
->drrs
.refresh_rate_type
) {
5345 "DRRS requested for previously set RR...ignoring\n");
5349 if (!intel_crtc
->active
) {
5350 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5354 if (INTEL_INFO(dev
)->gen
>= 8 && !IS_CHERRYVIEW(dev
)) {
5357 intel_dp_set_m_n(intel_crtc
, M1_N1
);
5360 intel_dp_set_m_n(intel_crtc
, M2_N2
);
5364 DRM_ERROR("Unsupported refreshrate type\n");
5366 } else if (INTEL_INFO(dev
)->gen
> 6) {
5367 reg
= PIPECONF(intel_crtc
->config
->cpu_transcoder
);
5368 val
= I915_READ(reg
);
5370 if (index
> DRRS_HIGH_RR
) {
5371 if (IS_VALLEYVIEW(dev
))
5372 val
|= PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5374 val
|= PIPECONF_EDP_RR_MODE_SWITCH
;
5376 if (IS_VALLEYVIEW(dev
))
5377 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5379 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH
;
5381 I915_WRITE(reg
, val
);
5384 dev_priv
->drrs
.refresh_rate_type
= index
;
5386 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate
);
5390 * intel_edp_drrs_enable - init drrs struct if supported
5391 * @intel_dp: DP struct
5393 * Initializes frontbuffer_bits and drrs.dp
5395 void intel_edp_drrs_enable(struct intel_dp
*intel_dp
)
5397 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5398 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5399 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5400 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5401 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5403 if (!intel_crtc
->config
->has_drrs
) {
5404 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5408 mutex_lock(&dev_priv
->drrs
.mutex
);
5409 if (WARN_ON(dev_priv
->drrs
.dp
)) {
5410 DRM_ERROR("DRRS already enabled\n");
5414 dev_priv
->drrs
.busy_frontbuffer_bits
= 0;
5416 dev_priv
->drrs
.dp
= intel_dp
;
5419 mutex_unlock(&dev_priv
->drrs
.mutex
);
5423 * intel_edp_drrs_disable - Disable DRRS
5424 * @intel_dp: DP struct
5427 void intel_edp_drrs_disable(struct intel_dp
*intel_dp
)
5429 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5430 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5431 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5432 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5433 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5435 if (!intel_crtc
->config
->has_drrs
)
5438 mutex_lock(&dev_priv
->drrs
.mutex
);
5439 if (!dev_priv
->drrs
.dp
) {
5440 mutex_unlock(&dev_priv
->drrs
.mutex
);
5444 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5445 intel_dp_set_drrs_state(dev_priv
->dev
,
5446 intel_dp
->attached_connector
->panel
.
5447 fixed_mode
->vrefresh
);
5449 dev_priv
->drrs
.dp
= NULL
;
5450 mutex_unlock(&dev_priv
->drrs
.mutex
);
5452 cancel_delayed_work_sync(&dev_priv
->drrs
.work
);
5455 static void intel_edp_drrs_downclock_work(struct work_struct
*work
)
5457 struct drm_i915_private
*dev_priv
=
5458 container_of(work
, typeof(*dev_priv
), drrs
.work
.work
);
5459 struct intel_dp
*intel_dp
;
5461 mutex_lock(&dev_priv
->drrs
.mutex
);
5463 intel_dp
= dev_priv
->drrs
.dp
;
5469 * The delayed work can race with an invalidate hence we need to
5473 if (dev_priv
->drrs
.busy_frontbuffer_bits
)
5476 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
)
5477 intel_dp_set_drrs_state(dev_priv
->dev
,
5478 intel_dp
->attached_connector
->panel
.
5479 downclock_mode
->vrefresh
);
5482 mutex_unlock(&dev_priv
->drrs
.mutex
);
5486 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5488 * @frontbuffer_bits: frontbuffer plane tracking bits
5490 * This function gets called everytime rendering on the given planes start.
5491 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5493 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5495 void intel_edp_drrs_invalidate(struct drm_device
*dev
,
5496 unsigned frontbuffer_bits
)
5498 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5499 struct drm_crtc
*crtc
;
5502 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5505 cancel_delayed_work(&dev_priv
->drrs
.work
);
5507 mutex_lock(&dev_priv
->drrs
.mutex
);
5508 if (!dev_priv
->drrs
.dp
) {
5509 mutex_unlock(&dev_priv
->drrs
.mutex
);
5513 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5514 pipe
= to_intel_crtc(crtc
)->pipe
;
5516 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5517 dev_priv
->drrs
.busy_frontbuffer_bits
|= frontbuffer_bits
;
5519 /* invalidate means busy screen hence upclock */
5520 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5521 intel_dp_set_drrs_state(dev_priv
->dev
,
5522 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5523 fixed_mode
->vrefresh
);
5525 mutex_unlock(&dev_priv
->drrs
.mutex
);
5529 * intel_edp_drrs_flush - Restart Idleness DRRS
5531 * @frontbuffer_bits: frontbuffer plane tracking bits
5533 * This function gets called every time rendering on the given planes has
5534 * completed or flip on a crtc is completed. So DRRS should be upclocked
5535 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5536 * if no other planes are dirty.
5538 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5540 void intel_edp_drrs_flush(struct drm_device
*dev
,
5541 unsigned frontbuffer_bits
)
5543 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5544 struct drm_crtc
*crtc
;
5547 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5550 cancel_delayed_work(&dev_priv
->drrs
.work
);
5552 mutex_lock(&dev_priv
->drrs
.mutex
);
5553 if (!dev_priv
->drrs
.dp
) {
5554 mutex_unlock(&dev_priv
->drrs
.mutex
);
5558 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5559 pipe
= to_intel_crtc(crtc
)->pipe
;
5561 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5562 dev_priv
->drrs
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
5564 /* flush means busy screen hence upclock */
5565 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5566 intel_dp_set_drrs_state(dev_priv
->dev
,
5567 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5568 fixed_mode
->vrefresh
);
5571 * flush also means no more activity hence schedule downclock, if all
5572 * other fbs are quiescent too
5574 if (!dev_priv
->drrs
.busy_frontbuffer_bits
)
5575 schedule_delayed_work(&dev_priv
->drrs
.work
,
5576 msecs_to_jiffies(1000));
5577 mutex_unlock(&dev_priv
->drrs
.mutex
);
5581 * DOC: Display Refresh Rate Switching (DRRS)
5583 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5584 * which enables swtching between low and high refresh rates,
5585 * dynamically, based on the usage scenario. This feature is applicable
5586 * for internal panels.
5588 * Indication that the panel supports DRRS is given by the panel EDID, which
5589 * would list multiple refresh rates for one resolution.
5591 * DRRS is of 2 types - static and seamless.
5592 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5593 * (may appear as a blink on screen) and is used in dock-undock scenario.
5594 * Seamless DRRS involves changing RR without any visual effect to the user
5595 * and can be used during normal system usage. This is done by programming
5596 * certain registers.
5598 * Support for static/seamless DRRS may be indicated in the VBT based on
5599 * inputs from the panel spec.
5601 * DRRS saves power by switching to low RR based on usage scenarios.
5604 * The implementation is based on frontbuffer tracking implementation.
5605 * When there is a disturbance on the screen triggered by user activity or a
5606 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5607 * When there is no movement on screen, after a timeout of 1 second, a switch
5608 * to low RR is made.
5609 * For integration with frontbuffer tracking code,
5610 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5612 * DRRS can be further extended to support other internal panels and also
5613 * the scenario of video playback wherein RR is set based on the rate
5614 * requested by userspace.
5618 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5619 * @intel_connector: eDP connector
5620 * @fixed_mode: preferred mode of panel
5622 * This function is called only once at driver load to initialize basic
5626 * Downclock mode if panel supports it, else return NULL.
5627 * DRRS support is determined by the presence of downclock mode (apart
5628 * from VBT setting).
5630 static struct drm_display_mode
*
5631 intel_dp_drrs_init(struct intel_connector
*intel_connector
,
5632 struct drm_display_mode
*fixed_mode
)
5634 struct drm_connector
*connector
= &intel_connector
->base
;
5635 struct drm_device
*dev
= connector
->dev
;
5636 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5637 struct drm_display_mode
*downclock_mode
= NULL
;
5639 INIT_DELAYED_WORK(&dev_priv
->drrs
.work
, intel_edp_drrs_downclock_work
);
5640 mutex_init(&dev_priv
->drrs
.mutex
);
5642 if (INTEL_INFO(dev
)->gen
<= 6) {
5643 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5647 if (dev_priv
->vbt
.drrs_type
!= SEAMLESS_DRRS_SUPPORT
) {
5648 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5652 downclock_mode
= intel_find_panel_downclock
5653 (dev
, fixed_mode
, connector
);
5655 if (!downclock_mode
) {
5656 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5660 dev_priv
->drrs
.type
= dev_priv
->vbt
.drrs_type
;
5662 dev_priv
->drrs
.refresh_rate_type
= DRRS_HIGH_RR
;
5663 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5664 return downclock_mode
;
5667 static bool intel_edp_init_connector(struct intel_dp
*intel_dp
,
5668 struct intel_connector
*intel_connector
)
5670 struct drm_connector
*connector
= &intel_connector
->base
;
5671 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
5672 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5673 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5674 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5675 struct drm_display_mode
*fixed_mode
= NULL
;
5676 struct drm_display_mode
*downclock_mode
= NULL
;
5678 struct drm_display_mode
*scan
;
5680 enum pipe pipe
= INVALID_PIPE
;
5682 if (!is_edp(intel_dp
))
5686 intel_edp_panel_vdd_sanitize(intel_dp
);
5687 pps_unlock(intel_dp
);
5689 /* Cache DPCD and EDID for edp. */
5690 has_dpcd
= intel_dp_get_dpcd(intel_dp
);
5693 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11)
5694 dev_priv
->no_aux_handshake
=
5695 intel_dp
->dpcd
[DP_MAX_DOWNSPREAD
] &
5696 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
;
5698 /* if this fails, presume the device is a ghost */
5699 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5703 /* We now know it's not a ghost, init power sequence regs. */
5705 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
5706 pps_unlock(intel_dp
);
5708 mutex_lock(&dev
->mode_config
.mutex
);
5709 edid
= drm_get_edid(connector
, &intel_dp
->aux
.ddc
);
5711 if (drm_add_edid_modes(connector
, edid
)) {
5712 drm_mode_connector_update_edid_property(connector
,
5714 drm_edid_to_eld(connector
, edid
);
5717 edid
= ERR_PTR(-EINVAL
);
5720 edid
= ERR_PTR(-ENOENT
);
5722 intel_connector
->edid
= edid
;
5724 /* prefer fixed mode from EDID if available */
5725 list_for_each_entry(scan
, &connector
->probed_modes
, head
) {
5726 if ((scan
->type
& DRM_MODE_TYPE_PREFERRED
)) {
5727 fixed_mode
= drm_mode_duplicate(dev
, scan
);
5728 downclock_mode
= intel_dp_drrs_init(
5729 intel_connector
, fixed_mode
);
5734 /* fallback to VBT if available for eDP */
5735 if (!fixed_mode
&& dev_priv
->vbt
.lfp_lvds_vbt_mode
) {
5736 fixed_mode
= drm_mode_duplicate(dev
,
5737 dev_priv
->vbt
.lfp_lvds_vbt_mode
);
5739 fixed_mode
->type
|= DRM_MODE_TYPE_PREFERRED
;
5741 mutex_unlock(&dev
->mode_config
.mutex
);
5743 if (IS_VALLEYVIEW(dev
)) {
5744 intel_dp
->edp_notifier
.notifier_call
= edp_notify_handler
;
5745 register_reboot_notifier(&intel_dp
->edp_notifier
);
5748 * Figure out the current pipe for the initial backlight setup.
5749 * If the current pipe isn't valid, try the PPS pipe, and if that
5750 * fails just assume pipe A.
5752 if (IS_CHERRYVIEW(dev
))
5753 pipe
= DP_PORT_TO_PIPE_CHV(intel_dp
->DP
);
5755 pipe
= PORT_TO_PIPE(intel_dp
->DP
);
5757 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5758 pipe
= intel_dp
->pps_pipe
;
5760 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5763 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5767 intel_panel_init(&intel_connector
->panel
, fixed_mode
, downclock_mode
);
5768 intel_connector
->panel
.backlight_power
= intel_edp_backlight_power
;
5769 intel_panel_setup_backlight(connector
, pipe
);
5775 intel_dp_init_connector(struct intel_digital_port
*intel_dig_port
,
5776 struct intel_connector
*intel_connector
)
5778 struct drm_connector
*connector
= &intel_connector
->base
;
5779 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5780 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5781 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5782 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5783 enum port port
= intel_dig_port
->port
;
5786 intel_dp
->pps_pipe
= INVALID_PIPE
;
5788 /* intel_dp vfuncs */
5789 if (INTEL_INFO(dev
)->gen
>= 9)
5790 intel_dp
->get_aux_clock_divider
= skl_get_aux_clock_divider
;
5791 else if (IS_VALLEYVIEW(dev
))
5792 intel_dp
->get_aux_clock_divider
= vlv_get_aux_clock_divider
;
5793 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
5794 intel_dp
->get_aux_clock_divider
= hsw_get_aux_clock_divider
;
5795 else if (HAS_PCH_SPLIT(dev
))
5796 intel_dp
->get_aux_clock_divider
= ilk_get_aux_clock_divider
;
5798 intel_dp
->get_aux_clock_divider
= i9xx_get_aux_clock_divider
;
5800 if (INTEL_INFO(dev
)->gen
>= 9)
5801 intel_dp
->get_aux_send_ctl
= skl_get_aux_send_ctl
;
5803 intel_dp
->get_aux_send_ctl
= i9xx_get_aux_send_ctl
;
5805 /* Preserve the current hw state. */
5806 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
);
5807 intel_dp
->attached_connector
= intel_connector
;
5809 if (intel_dp_is_edp(dev
, port
))
5810 type
= DRM_MODE_CONNECTOR_eDP
;
5812 type
= DRM_MODE_CONNECTOR_DisplayPort
;
5815 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5816 * for DP the encoder type can be set by the caller to
5817 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5819 if (type
== DRM_MODE_CONNECTOR_eDP
)
5820 intel_encoder
->type
= INTEL_OUTPUT_EDP
;
5822 /* eDP only on port B and/or C on vlv/chv */
5823 if (WARN_ON(IS_VALLEYVIEW(dev
) && is_edp(intel_dp
) &&
5824 port
!= PORT_B
&& port
!= PORT_C
))
5827 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5828 type
== DRM_MODE_CONNECTOR_eDP
? "eDP" : "DP",
5831 drm_connector_init(dev
, connector
, &intel_dp_connector_funcs
, type
);
5832 drm_connector_helper_add(connector
, &intel_dp_connector_helper_funcs
);
5834 connector
->interlace_allowed
= true;
5835 connector
->doublescan_allowed
= 0;
5837 INIT_DELAYED_WORK(&intel_dp
->panel_vdd_work
,
5838 edp_panel_vdd_work
);
5840 intel_connector_attach_encoder(intel_connector
, intel_encoder
);
5841 drm_connector_register(connector
);
5844 intel_connector
->get_hw_state
= intel_ddi_connector_get_hw_state
;
5846 intel_connector
->get_hw_state
= intel_connector_get_hw_state
;
5847 intel_connector
->unregister
= intel_dp_connector_unregister
;
5849 /* Set up the hotplug pin. */
5852 intel_encoder
->hpd_pin
= HPD_PORT_A
;
5855 intel_encoder
->hpd_pin
= HPD_PORT_B
;
5858 intel_encoder
->hpd_pin
= HPD_PORT_C
;
5861 intel_encoder
->hpd_pin
= HPD_PORT_D
;
5864 intel_encoder
->hpd_pin
= HPD_PORT_E
;
5870 if (is_edp(intel_dp
)) {
5872 intel_dp_init_panel_power_timestamps(intel_dp
);
5873 if (IS_VALLEYVIEW(dev
))
5874 vlv_initial_power_sequencer_setup(intel_dp
);
5876 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
5877 pps_unlock(intel_dp
);
5880 intel_dp_aux_init(intel_dp
, intel_connector
);
5882 /* init MST on ports that can support it */
5883 if (HAS_DP_MST(dev
) &&
5884 (port
== PORT_B
|| port
== PORT_C
|| port
== PORT_D
))
5885 intel_dp_mst_encoder_init(intel_dig_port
,
5886 intel_connector
->base
.base
.id
);
5888 if (!intel_edp_init_connector(intel_dp
, intel_connector
)) {
5889 drm_dp_aux_unregister(&intel_dp
->aux
);
5890 if (is_edp(intel_dp
)) {
5891 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
5893 * vdd might still be enabled do to the delayed vdd off.
5894 * Make sure vdd is actually turned off here.
5897 edp_panel_vdd_off_sync(intel_dp
);
5898 pps_unlock(intel_dp
);
5900 drm_connector_unregister(connector
);
5901 drm_connector_cleanup(connector
);
5905 intel_dp_add_properties(intel_dp
, connector
);
5907 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5908 * 0xd. Failure to do so will result in spurious interrupts being
5909 * generated on the port when a cable is not attached.
5911 if (IS_G4X(dev
) && !IS_GM45(dev
)) {
5912 u32 temp
= I915_READ(PEG_BAND_GAP_DATA
);
5913 I915_WRITE(PEG_BAND_GAP_DATA
, (temp
& ~0xf) | 0xd);
5916 i915_debugfs_connector_add(connector
);
5922 intel_dp_init(struct drm_device
*dev
, int output_reg
, enum port port
)
5924 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5925 struct intel_digital_port
*intel_dig_port
;
5926 struct intel_encoder
*intel_encoder
;
5927 struct drm_encoder
*encoder
;
5928 struct intel_connector
*intel_connector
;
5930 intel_dig_port
= kzalloc(sizeof(*intel_dig_port
), GFP_KERNEL
);
5931 if (!intel_dig_port
)
5934 intel_connector
= intel_connector_alloc();
5935 if (!intel_connector
) {
5936 kfree(intel_dig_port
);
5940 intel_encoder
= &intel_dig_port
->base
;
5941 encoder
= &intel_encoder
->base
;
5943 drm_encoder_init(dev
, &intel_encoder
->base
, &intel_dp_enc_funcs
,
5944 DRM_MODE_ENCODER_TMDS
);
5946 intel_encoder
->compute_config
= intel_dp_compute_config
;
5947 intel_encoder
->disable
= intel_disable_dp
;
5948 intel_encoder
->get_hw_state
= intel_dp_get_hw_state
;
5949 intel_encoder
->get_config
= intel_dp_get_config
;
5950 intel_encoder
->suspend
= intel_dp_encoder_suspend
;
5951 if (IS_CHERRYVIEW(dev
)) {
5952 intel_encoder
->pre_pll_enable
= chv_dp_pre_pll_enable
;
5953 intel_encoder
->pre_enable
= chv_pre_enable_dp
;
5954 intel_encoder
->enable
= vlv_enable_dp
;
5955 intel_encoder
->post_disable
= chv_post_disable_dp
;
5956 } else if (IS_VALLEYVIEW(dev
)) {
5957 intel_encoder
->pre_pll_enable
= vlv_dp_pre_pll_enable
;
5958 intel_encoder
->pre_enable
= vlv_pre_enable_dp
;
5959 intel_encoder
->enable
= vlv_enable_dp
;
5960 intel_encoder
->post_disable
= vlv_post_disable_dp
;
5962 intel_encoder
->pre_enable
= g4x_pre_enable_dp
;
5963 intel_encoder
->enable
= g4x_enable_dp
;
5964 if (INTEL_INFO(dev
)->gen
>= 5)
5965 intel_encoder
->post_disable
= ilk_post_disable_dp
;
5968 intel_dig_port
->port
= port
;
5969 intel_dig_port
->dp
.output_reg
= output_reg
;
5971 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
5972 if (IS_CHERRYVIEW(dev
)) {
5974 intel_encoder
->crtc_mask
= 1 << 2;
5976 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1);
5978 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1) | (1 << 2);
5980 intel_encoder
->cloneable
= 0;
5982 intel_dig_port
->hpd_pulse
= intel_dp_hpd_pulse
;
5983 dev_priv
->hotplug
.irq_port
[port
] = intel_dig_port
;
5985 if (!intel_dp_init_connector(intel_dig_port
, intel_connector
)) {
5986 drm_encoder_cleanup(encoder
);
5987 kfree(intel_dig_port
);
5988 kfree(intel_connector
);
5992 void intel_dp_mst_suspend(struct drm_device
*dev
)
5994 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5998 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
5999 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6000 if (!intel_dig_port
)
6003 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6004 if (!intel_dig_port
->dp
.can_mst
)
6006 if (intel_dig_port
->dp
.is_mst
)
6007 drm_dp_mst_topology_mgr_suspend(&intel_dig_port
->dp
.mst_mgr
);
6012 void intel_dp_mst_resume(struct drm_device
*dev
)
6014 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6017 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6018 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6019 if (!intel_dig_port
)
6021 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6024 if (!intel_dig_port
->dp
.can_mst
)
6027 ret
= drm_dp_mst_topology_mgr_resume(&intel_dig_port
->dp
.mst_mgr
);
6029 intel_dp_check_mst_status(&intel_dig_port
->dp
);