2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll
[] = {
57 { .p1
= 2, .p2
= 10, .n
= 2, .m1
= 23, .m2
= 8 } },
59 { .p1
= 1, .p2
= 10, .n
= 1, .m1
= 14, .m2
= 2 } }
62 static const struct dp_link_dpll pch_dpll
[] = {
64 { .p1
= 2, .p2
= 10, .n
= 1, .m1
= 12, .m2
= 9 } },
66 { .p1
= 1, .p2
= 10, .n
= 2, .m1
= 14, .m2
= 8 } }
69 static const struct dp_link_dpll vlv_dpll
[] = {
71 { .p1
= 3, .p2
= 2, .n
= 5, .m1
= 3, .m2
= 81 } },
73 { .p1
= 2, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll
[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1
= 4, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1
= 4, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1
= 2, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } }
94 static const int bxt_rates
[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates
[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates
[] = { 162000, 270000, 540000 };
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
107 static bool is_edp(struct intel_dp
*intel_dp
)
109 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
111 return intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
;
114 static struct drm_device
*intel_dp_to_dev(struct intel_dp
*intel_dp
)
116 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
118 return intel_dig_port
->base
.base
.dev
;
121 static struct intel_dp
*intel_attached_dp(struct drm_connector
*connector
)
123 return enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
126 static void intel_dp_link_down(struct intel_dp
*intel_dp
);
127 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
);
128 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
);
129 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
);
130 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
133 static unsigned int intel_dp_unused_lane_mask(int lane_count
)
135 return ~((1 << lane_count
) - 1) & 0xf;
139 intel_dp_max_link_bw(struct intel_dp
*intel_dp
)
141 int max_link_bw
= intel_dp
->dpcd
[DP_MAX_LINK_RATE
];
143 switch (max_link_bw
) {
144 case DP_LINK_BW_1_62
:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
151 max_link_bw
= DP_LINK_BW_1_62
;
157 static u8
intel_dp_max_lane_count(struct intel_dp
*intel_dp
)
159 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
160 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
161 u8 source_max
, sink_max
;
164 if (HAS_DDI(dev
) && intel_dig_port
->port
== PORT_A
&&
165 (intel_dig_port
->saved_port_bits
& DDI_A_4_LANES
) == 0)
168 sink_max
= drm_dp_max_lane_count(intel_dp
->dpcd
);
170 return min(source_max
, sink_max
);
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
179 * 270000 * 1 * 8 / 10 == 216000
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
191 intel_dp_link_required(int pixel_clock
, int bpp
)
193 return (pixel_clock
* bpp
+ 9) / 10;
197 intel_dp_max_data_rate(int max_link_clock
, int max_lanes
)
199 return (max_link_clock
* max_lanes
* 8) / 10;
202 static enum drm_mode_status
203 intel_dp_mode_valid(struct drm_connector
*connector
,
204 struct drm_display_mode
*mode
)
206 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
207 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
208 struct drm_display_mode
*fixed_mode
= intel_connector
->panel
.fixed_mode
;
209 int target_clock
= mode
->clock
;
210 int max_rate
, mode_rate
, max_lanes
, max_link_clock
;
212 if (is_edp(intel_dp
) && fixed_mode
) {
213 if (mode
->hdisplay
> fixed_mode
->hdisplay
)
216 if (mode
->vdisplay
> fixed_mode
->vdisplay
)
219 target_clock
= fixed_mode
->clock
;
222 max_link_clock
= intel_dp_max_link_rate(intel_dp
);
223 max_lanes
= intel_dp_max_lane_count(intel_dp
);
225 max_rate
= intel_dp_max_data_rate(max_link_clock
, max_lanes
);
226 mode_rate
= intel_dp_link_required(target_clock
, 18);
228 if (mode_rate
> max_rate
)
229 return MODE_CLOCK_HIGH
;
231 if (mode
->clock
< 10000)
232 return MODE_CLOCK_LOW
;
234 if (mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
235 return MODE_H_ILLEGAL
;
240 uint32_t intel_dp_pack_aux(const uint8_t *src
, int src_bytes
)
247 for (i
= 0; i
< src_bytes
; i
++)
248 v
|= ((uint32_t) src
[i
]) << ((3-i
) * 8);
252 static void intel_dp_unpack_aux(uint32_t src
, uint8_t *dst
, int dst_bytes
)
257 for (i
= 0; i
< dst_bytes
; i
++)
258 dst
[i
] = src
>> ((3-i
) * 8);
262 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
263 struct intel_dp
*intel_dp
);
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
266 struct intel_dp
*intel_dp
);
268 static void pps_lock(struct intel_dp
*intel_dp
)
270 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
271 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
272 struct drm_device
*dev
= encoder
->base
.dev
;
273 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
274 enum intel_display_power_domain power_domain
;
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
280 power_domain
= intel_display_port_power_domain(encoder
);
281 intel_display_power_get(dev_priv
, power_domain
);
283 mutex_lock(&dev_priv
->pps_mutex
);
286 static void pps_unlock(struct intel_dp
*intel_dp
)
288 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
289 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
290 struct drm_device
*dev
= encoder
->base
.dev
;
291 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
292 enum intel_display_power_domain power_domain
;
294 mutex_unlock(&dev_priv
->pps_mutex
);
296 power_domain
= intel_display_port_power_domain(encoder
);
297 intel_display_power_put(dev_priv
, power_domain
);
301 vlv_power_sequencer_kick(struct intel_dp
*intel_dp
)
303 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
304 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
305 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
306 enum pipe pipe
= intel_dp
->pps_pipe
;
307 bool pll_enabled
, release_cl_override
= false;
308 enum dpio_phy phy
= DPIO_PHY(pipe
);
309 enum dpio_channel ch
= vlv_pipe_to_channel(pipe
);
312 if (WARN(I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe
), port_name(intel_dig_port
->port
)))
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe
), port_name(intel_dig_port
->port
));
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
323 DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
324 DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
325 DP
|= DP_PORT_WIDTH(1);
326 DP
|= DP_LINK_TRAIN_PAT_1
;
328 if (IS_CHERRYVIEW(dev
))
329 DP
|= DP_PIPE_SELECT_CHV(pipe
);
330 else if (pipe
== PIPE_B
)
331 DP
|= DP_PIPEB_SELECT
;
333 pll_enabled
= I915_READ(DPLL(pipe
)) & DPLL_VCO_ENABLE
;
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
340 release_cl_override
= IS_CHERRYVIEW(dev
) &&
341 !chv_phy_powergate_ch(dev_priv
, phy
, ch
, true);
343 vlv_force_pll_on(dev
, pipe
, IS_CHERRYVIEW(dev
) ?
344 &chv_dpll
[0].dpll
: &vlv_dpll
[0].dpll
);
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
353 I915_WRITE(intel_dp
->output_reg
, DP
);
354 POSTING_READ(intel_dp
->output_reg
);
356 I915_WRITE(intel_dp
->output_reg
, DP
| DP_PORT_EN
);
357 POSTING_READ(intel_dp
->output_reg
);
359 I915_WRITE(intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
360 POSTING_READ(intel_dp
->output_reg
);
363 vlv_force_pll_off(dev
, pipe
);
365 if (release_cl_override
)
366 chv_phy_powergate_ch(dev_priv
, phy
, ch
, false);
371 vlv_power_sequencer_pipe(struct intel_dp
*intel_dp
)
373 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
374 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
376 struct intel_encoder
*encoder
;
377 unsigned int pipes
= (1 << PIPE_A
) | (1 << PIPE_B
);
380 lockdep_assert_held(&dev_priv
->pps_mutex
);
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp
));
385 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
386 return intel_dp
->pps_pipe
;
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
392 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
394 struct intel_dp
*tmp
;
396 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
399 tmp
= enc_to_intel_dp(&encoder
->base
);
401 if (tmp
->pps_pipe
!= INVALID_PIPE
)
402 pipes
&= ~(1 << tmp
->pps_pipe
);
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
409 if (WARN_ON(pipes
== 0))
412 pipe
= ffs(pipes
) - 1;
414 vlv_steal_power_sequencer(dev
, pipe
);
415 intel_dp
->pps_pipe
= pipe
;
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp
->pps_pipe
),
419 port_name(intel_dig_port
->port
));
421 /* init power sequencer on this pipe and port */
422 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
423 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
429 vlv_power_sequencer_kick(intel_dp
);
431 return intel_dp
->pps_pipe
;
434 typedef bool (*vlv_pipe_check
)(struct drm_i915_private
*dev_priv
,
437 static bool vlv_pipe_has_pp_on(struct drm_i915_private
*dev_priv
,
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe
)) & PP_ON
;
443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private
*dev_priv
,
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe
)) & EDP_FORCE_VDD
;
449 static bool vlv_pipe_any(struct drm_i915_private
*dev_priv
,
456 vlv_initial_pps_pipe(struct drm_i915_private
*dev_priv
,
458 vlv_pipe_check pipe_check
)
462 for (pipe
= PIPE_A
; pipe
<= PIPE_B
; pipe
++) {
463 u32 port_sel
= I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe
)) &
464 PANEL_PORT_SELECT_MASK
;
466 if (port_sel
!= PANEL_PORT_SELECT_VLV(port
))
469 if (!pipe_check(dev_priv
, pipe
))
479 vlv_initial_power_sequencer_setup(struct intel_dp
*intel_dp
)
481 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
482 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
483 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
484 enum port port
= intel_dig_port
->port
;
486 lockdep_assert_held(&dev_priv
->pps_mutex
);
488 /* try to find a pipe with this port selected */
489 /* first pick one where the panel is on */
490 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
494 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
495 vlv_pipe_has_vdd_on
);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
498 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp
->pps_pipe
== INVALID_PIPE
) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port
), pipe_name(intel_dp
->pps_pipe
));
511 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
512 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
515 void vlv_power_sequencer_reset(struct drm_i915_private
*dev_priv
)
517 struct drm_device
*dev
= dev_priv
->dev
;
518 struct intel_encoder
*encoder
;
520 if (WARN_ON(!IS_VALLEYVIEW(dev
)))
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
533 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
534 struct intel_dp
*intel_dp
;
536 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
539 intel_dp
= enc_to_intel_dp(&encoder
->base
);
540 intel_dp
->pps_pipe
= INVALID_PIPE
;
544 static u32
_pp_ctrl_reg(struct intel_dp
*intel_dp
)
546 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev
))
551 return PCH_PP_CONTROL
;
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp
));
556 static u32
_pp_stat_reg(struct intel_dp
*intel_dp
)
558 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev
))
563 return PCH_PP_STATUS
;
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp
));
568 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570 static int edp_notify_handler(struct notifier_block
*this, unsigned long code
,
573 struct intel_dp
*intel_dp
= container_of(this, typeof(* intel_dp
),
575 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
576 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
578 if (!is_edp(intel_dp
) || code
!= SYS_RESTART
)
583 if (IS_VALLEYVIEW(dev
)) {
584 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
585 u32 pp_ctrl_reg
, pp_div_reg
;
588 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
589 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
590 pp_div
= I915_READ(pp_div_reg
);
591 pp_div
&= PP_REFERENCE_DIVIDER_MASK
;
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg
, pp_div
| 0x1F);
595 I915_WRITE(pp_ctrl_reg
, PANEL_UNLOCK_REGS
| PANEL_POWER_OFF
);
596 msleep(intel_dp
->panel_power_cycle_delay
);
599 pps_unlock(intel_dp
);
604 static bool edp_have_panel_power(struct intel_dp
*intel_dp
)
606 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
607 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
609 lockdep_assert_held(&dev_priv
->pps_mutex
);
611 if (IS_VALLEYVIEW(dev
) &&
612 intel_dp
->pps_pipe
== INVALID_PIPE
)
615 return (I915_READ(_pp_stat_reg(intel_dp
)) & PP_ON
) != 0;
618 static bool edp_have_panel_vdd(struct intel_dp
*intel_dp
)
620 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
621 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
623 lockdep_assert_held(&dev_priv
->pps_mutex
);
625 if (IS_VALLEYVIEW(dev
) &&
626 intel_dp
->pps_pipe
== INVALID_PIPE
)
629 return I915_READ(_pp_ctrl_reg(intel_dp
)) & EDP_FORCE_VDD
;
633 intel_dp_check_edp(struct intel_dp
*intel_dp
)
635 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
636 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
638 if (!is_edp(intel_dp
))
641 if (!edp_have_panel_power(intel_dp
) && !edp_have_panel_vdd(intel_dp
)) {
642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
644 I915_READ(_pp_stat_reg(intel_dp
)),
645 I915_READ(_pp_ctrl_reg(intel_dp
)));
650 intel_dp_aux_wait_done(struct intel_dp
*intel_dp
, bool has_aux_irq
)
652 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
653 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
654 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
655 uint32_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
659 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
661 done
= wait_event_timeout(dev_priv
->gmbus_wait_queue
, C
,
662 msecs_to_jiffies_timeout(10));
664 done
= wait_for_atomic(C
, 10) == 0;
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
673 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
675 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
676 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
682 return index
? 0 : intel_hrawclk(dev
) / 2;
685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
687 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
688 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
689 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
694 if (intel_dig_port
->port
== PORT_A
) {
695 return DIV_ROUND_UP(dev_priv
->cdclk_freq
, 2000);
698 return DIV_ROUND_UP(intel_pch_rawclk(dev
), 2);
702 static uint32_t hsw_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
704 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
705 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
706 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
708 if (intel_dig_port
->port
== PORT_A
) {
711 return DIV_ROUND_CLOSEST(dev_priv
->cdclk_freq
, 2000);
712 } else if (dev_priv
->pch_id
== INTEL_PCH_LPT_DEVICE_ID_TYPE
) {
713 /* Workaround for non-ULT HSW */
720 return index
? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev
), 2);
724 static uint32_t vlv_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
726 return index
? 0 : 100;
729 static uint32_t skl_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 return index
? 0 : 1;
739 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp
*intel_dp
,
742 uint32_t aux_clock_divider
)
744 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
745 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
746 uint32_t precharge
, timeout
;
753 if (IS_BROADWELL(dev
) && intel_dig_port
->port
== PORT_A
)
754 timeout
= DP_AUX_CH_CTL_TIME_OUT_600us
;
756 timeout
= DP_AUX_CH_CTL_TIME_OUT_400us
;
758 return DP_AUX_CH_CTL_SEND_BUSY
|
760 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
761 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
763 DP_AUX_CH_CTL_RECEIVE_ERROR
|
764 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
765 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
766 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
);
769 static uint32_t skl_get_aux_send_ctl(struct intel_dp
*intel_dp
,
774 return DP_AUX_CH_CTL_SEND_BUSY
|
776 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
778 DP_AUX_CH_CTL_TIME_OUT_1600us
|
779 DP_AUX_CH_CTL_RECEIVE_ERROR
|
780 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
785 intel_dp_aux_ch(struct intel_dp
*intel_dp
,
786 const uint8_t *send
, int send_bytes
,
787 uint8_t *recv
, int recv_size
)
789 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
790 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
791 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
792 uint32_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
793 uint32_t aux_clock_divider
;
794 int i
, ret
, recv_bytes
;
797 bool has_aux_irq
= HAS_AUX_IRQ(dev
);
803 * We will be called with VDD already enabled for dpcd/edid/oui reads.
804 * In such cases we want to leave VDD enabled and it's up to upper layers
805 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808 vdd
= edp_panel_vdd_on(intel_dp
);
810 /* dp aux is extremely sensitive to irq latency, hence request the
811 * lowest possible wakeup latency and so prevent the cpu from going into
814 pm_qos_update_request(&dev_priv
->pm_qos
, 0);
816 intel_dp_check_edp(intel_dp
);
818 intel_aux_display_runtime_get(dev_priv
);
820 /* Try to wait for any previous AUX channel activity */
821 for (try = 0; try < 3; try++) {
822 status
= I915_READ_NOTRACE(ch_ctl
);
823 if ((status
& DP_AUX_CH_CTL_SEND_BUSY
) == 0)
829 static u32 last_status
= -1;
830 const u32 status
= I915_READ(ch_ctl
);
832 if (status
!= last_status
) {
833 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 last_status
= status
;
842 /* Only 5 data registers! */
843 if (WARN_ON(send_bytes
> 20 || recv_size
> 20)) {
848 while ((aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, clock
++))) {
849 u32 send_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
,
854 /* Must try at least 3 times according to DP spec */
855 for (try = 0; try < 5; try++) {
856 /* Load the send data into the aux channel data registers */
857 for (i
= 0; i
< send_bytes
; i
+= 4)
858 I915_WRITE(intel_dp
->aux_ch_data_reg
[i
>> 2],
859 intel_dp_pack_aux(send
+ i
,
862 /* Send the command and wait for it to complete */
863 I915_WRITE(ch_ctl
, send_ctl
);
865 status
= intel_dp_aux_wait_done(intel_dp
, has_aux_irq
);
867 /* Clear done status and any errors */
871 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
872 DP_AUX_CH_CTL_RECEIVE_ERROR
);
874 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
)
877 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
878 * 400us delay required for errors and timeouts
879 * Timeout errors from the HW already meet this
880 * requirement so skip to next iteration
882 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
883 usleep_range(400, 500);
886 if (status
& DP_AUX_CH_CTL_DONE
)
891 if ((status
& DP_AUX_CH_CTL_DONE
) == 0) {
892 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status
);
898 /* Check for timeout or receive error.
899 * Timeouts occur when the sink is not connected
901 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
902 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status
);
907 /* Timeouts occur when the device isn't connected, so they're
908 * "normal" -- don't fill the kernel log with these */
909 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
) {
910 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status
);
915 /* Unload any bytes sent back from the other side */
916 recv_bytes
= ((status
& DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
) >>
917 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
918 if (recv_bytes
> recv_size
)
919 recv_bytes
= recv_size
;
921 for (i
= 0; i
< recv_bytes
; i
+= 4)
922 intel_dp_unpack_aux(I915_READ(intel_dp
->aux_ch_data_reg
[i
>> 2]),
923 recv
+ i
, recv_bytes
- i
);
927 pm_qos_update_request(&dev_priv
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
928 intel_aux_display_runtime_put(dev_priv
);
931 edp_panel_vdd_off(intel_dp
, false);
933 pps_unlock(intel_dp
);
938 #define BARE_ADDRESS_SIZE 3
939 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
941 intel_dp_aux_transfer(struct drm_dp_aux
*aux
, struct drm_dp_aux_msg
*msg
)
943 struct intel_dp
*intel_dp
= container_of(aux
, struct intel_dp
, aux
);
944 uint8_t txbuf
[20], rxbuf
[20];
945 size_t txsize
, rxsize
;
948 txbuf
[0] = (msg
->request
<< 4) |
949 ((msg
->address
>> 16) & 0xf);
950 txbuf
[1] = (msg
->address
>> 8) & 0xff;
951 txbuf
[2] = msg
->address
& 0xff;
952 txbuf
[3] = msg
->size
- 1;
954 switch (msg
->request
& ~DP_AUX_I2C_MOT
) {
955 case DP_AUX_NATIVE_WRITE
:
956 case DP_AUX_I2C_WRITE
:
957 case DP_AUX_I2C_WRITE_STATUS_UPDATE
:
958 txsize
= msg
->size
? HEADER_SIZE
+ msg
->size
: BARE_ADDRESS_SIZE
;
959 rxsize
= 2; /* 0 or 1 data bytes */
961 if (WARN_ON(txsize
> 20))
964 memcpy(txbuf
+ HEADER_SIZE
, msg
->buffer
, msg
->size
);
966 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
968 msg
->reply
= rxbuf
[0] >> 4;
971 /* Number of bytes written in a short write. */
972 ret
= clamp_t(int, rxbuf
[1], 0, msg
->size
);
974 /* Return payload size. */
980 case DP_AUX_NATIVE_READ
:
981 case DP_AUX_I2C_READ
:
982 txsize
= msg
->size
? HEADER_SIZE
: BARE_ADDRESS_SIZE
;
983 rxsize
= msg
->size
+ 1;
985 if (WARN_ON(rxsize
> 20))
988 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
990 msg
->reply
= rxbuf
[0] >> 4;
992 * Assume happy day, and copy the data. The caller is
993 * expected to check msg->reply before touching it.
995 * Return payload size.
998 memcpy(msg
->buffer
, rxbuf
+ 1, ret
);
1010 static uint32_t g4x_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1017 return DP_AUX_CH_CTL(port
);
1020 return DP_AUX_CH_CTL(PORT_B
);
1024 static uint32_t g4x_aux_data_reg(struct drm_i915_private
*dev_priv
,
1025 enum port port
, int index
)
1031 return DP_AUX_CH_DATA(port
, index
);
1034 return DP_AUX_CH_DATA(PORT_B
, index
);
1038 static uint32_t ilk_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1043 return DP_AUX_CH_CTL(port
);
1047 return PCH_DP_AUX_CH_CTL(port
);
1050 return DP_AUX_CH_CTL(PORT_A
);
1054 static uint32_t ilk_aux_data_reg(struct drm_i915_private
*dev_priv
,
1055 enum port port
, int index
)
1059 return DP_AUX_CH_DATA(port
, index
);
1063 return PCH_DP_AUX_CH_DATA(port
, index
);
1066 return DP_AUX_CH_DATA(PORT_A
, index
);
1071 * On SKL we don't have Aux for port E so we rely
1072 * on VBT to set a proper alternate aux channel.
1074 static enum port
skl_porte_aux_port(struct drm_i915_private
*dev_priv
)
1076 const struct ddi_vbt_port_info
*info
=
1077 &dev_priv
->vbt
.ddi_port_info
[PORT_E
];
1079 switch (info
->alternate_aux_channel
) {
1089 MISSING_CASE(info
->alternate_aux_channel
);
1094 static uint32_t skl_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1098 port
= skl_porte_aux_port(dev_priv
);
1105 return DP_AUX_CH_CTL(port
);
1108 return DP_AUX_CH_CTL(PORT_A
);
1112 static uint32_t skl_aux_data_reg(struct drm_i915_private
*dev_priv
,
1113 enum port port
, int index
)
1116 port
= skl_porte_aux_port(dev_priv
);
1123 return DP_AUX_CH_DATA(port
, index
);
1126 return DP_AUX_CH_DATA(PORT_A
, index
);
1130 static uint32_t intel_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1133 if (INTEL_INFO(dev_priv
)->gen
>= 9)
1134 return skl_aux_ctl_reg(dev_priv
, port
);
1135 else if (HAS_PCH_SPLIT(dev_priv
))
1136 return ilk_aux_ctl_reg(dev_priv
, port
);
1138 return g4x_aux_ctl_reg(dev_priv
, port
);
1141 static uint32_t intel_aux_data_reg(struct drm_i915_private
*dev_priv
,
1142 enum port port
, int index
)
1144 if (INTEL_INFO(dev_priv
)->gen
>= 9)
1145 return skl_aux_data_reg(dev_priv
, port
, index
);
1146 else if (HAS_PCH_SPLIT(dev_priv
))
1147 return ilk_aux_data_reg(dev_priv
, port
, index
);
1149 return g4x_aux_data_reg(dev_priv
, port
, index
);
1152 static void intel_aux_reg_init(struct intel_dp
*intel_dp
)
1154 struct drm_i915_private
*dev_priv
= to_i915(intel_dp_to_dev(intel_dp
));
1155 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1158 intel_dp
->aux_ch_ctl_reg
= intel_aux_ctl_reg(dev_priv
, port
);
1159 for (i
= 0; i
< ARRAY_SIZE(intel_dp
->aux_ch_data_reg
); i
++)
1160 intel_dp
->aux_ch_data_reg
[i
] = intel_aux_data_reg(dev_priv
, port
, i
);
1164 intel_dp_aux_fini(struct intel_dp
*intel_dp
)
1166 drm_dp_aux_unregister(&intel_dp
->aux
);
1167 kfree(intel_dp
->aux
.name
);
1171 intel_dp_aux_init(struct intel_dp
*intel_dp
, struct intel_connector
*connector
)
1173 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1174 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1175 enum port port
= intel_dig_port
->port
;
1178 intel_aux_reg_init(intel_dp
);
1180 intel_dp
->aux
.name
= kasprintf(GFP_KERNEL
, "DPDDC-%c", port_name(port
));
1181 if (!intel_dp
->aux
.name
)
1184 intel_dp
->aux
.dev
= dev
->dev
;
1185 intel_dp
->aux
.transfer
= intel_dp_aux_transfer
;
1187 DRM_DEBUG_KMS("registering %s bus for %s\n",
1189 connector
->base
.kdev
->kobj
.name
);
1191 ret
= drm_dp_aux_register(&intel_dp
->aux
);
1193 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1194 intel_dp
->aux
.name
, ret
);
1195 kfree(intel_dp
->aux
.name
);
1199 ret
= sysfs_create_link(&connector
->base
.kdev
->kobj
,
1200 &intel_dp
->aux
.ddc
.dev
.kobj
,
1201 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1203 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1204 intel_dp
->aux
.name
, ret
);
1205 intel_dp_aux_fini(intel_dp
);
1213 intel_dp_connector_unregister(struct intel_connector
*intel_connector
)
1215 struct intel_dp
*intel_dp
= intel_attached_dp(&intel_connector
->base
);
1217 if (!intel_connector
->mst_port
)
1218 sysfs_remove_link(&intel_connector
->base
.kdev
->kobj
,
1219 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1220 intel_connector_unregister(intel_connector
);
1224 skl_edp_set_pll_config(struct intel_crtc_state
*pipe_config
)
1228 memset(&pipe_config
->dpll_hw_state
, 0,
1229 sizeof(pipe_config
->dpll_hw_state
));
1231 pipe_config
->ddi_pll_sel
= SKL_DPLL0
;
1232 pipe_config
->dpll_hw_state
.cfgcr1
= 0;
1233 pipe_config
->dpll_hw_state
.cfgcr2
= 0;
1235 ctrl1
= DPLL_CTRL1_OVERRIDE(SKL_DPLL0
);
1236 switch (pipe_config
->port_clock
/ 2) {
1238 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810
,
1242 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350
,
1246 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700
,
1250 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620
,
1253 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1254 results in CDCLK change. Need to handle the change of CDCLK by
1255 disabling pipes and re-enabling them */
1257 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080
,
1261 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160
,
1266 pipe_config
->dpll_hw_state
.ctrl1
= ctrl1
;
1270 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state
*pipe_config
)
1272 memset(&pipe_config
->dpll_hw_state
, 0,
1273 sizeof(pipe_config
->dpll_hw_state
));
1275 switch (pipe_config
->port_clock
/ 2) {
1277 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_810
;
1280 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_1350
;
1283 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_2700
;
1289 intel_dp_sink_rates(struct intel_dp
*intel_dp
, const int **sink_rates
)
1291 if (intel_dp
->num_sink_rates
) {
1292 *sink_rates
= intel_dp
->sink_rates
;
1293 return intel_dp
->num_sink_rates
;
1296 *sink_rates
= default_rates
;
1298 return (intel_dp_max_link_bw(intel_dp
) >> 3) + 1;
1301 bool intel_dp_source_supports_hbr2(struct intel_dp
*intel_dp
)
1303 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1304 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
1306 /* WaDisableHBR2:skl */
1307 if (IS_SKL_REVID(dev
, 0, SKL_REVID_B0
))
1310 if ((IS_HASWELL(dev
) && !IS_HSW_ULX(dev
)) || IS_BROADWELL(dev
) ||
1311 (INTEL_INFO(dev
)->gen
>= 9))
1318 intel_dp_source_rates(struct intel_dp
*intel_dp
, const int **source_rates
)
1320 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1321 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
1324 if (IS_BROXTON(dev
)) {
1325 *source_rates
= bxt_rates
;
1326 size
= ARRAY_SIZE(bxt_rates
);
1327 } else if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
1328 *source_rates
= skl_rates
;
1329 size
= ARRAY_SIZE(skl_rates
);
1331 *source_rates
= default_rates
;
1332 size
= ARRAY_SIZE(default_rates
);
1335 /* This depends on the fact that 5.4 is last value in the array */
1336 if (!intel_dp_source_supports_hbr2(intel_dp
))
1343 intel_dp_set_clock(struct intel_encoder
*encoder
,
1344 struct intel_crtc_state
*pipe_config
)
1346 struct drm_device
*dev
= encoder
->base
.dev
;
1347 const struct dp_link_dpll
*divisor
= NULL
;
1351 divisor
= gen4_dpll
;
1352 count
= ARRAY_SIZE(gen4_dpll
);
1353 } else if (HAS_PCH_SPLIT(dev
)) {
1355 count
= ARRAY_SIZE(pch_dpll
);
1356 } else if (IS_CHERRYVIEW(dev
)) {
1358 count
= ARRAY_SIZE(chv_dpll
);
1359 } else if (IS_VALLEYVIEW(dev
)) {
1361 count
= ARRAY_SIZE(vlv_dpll
);
1364 if (divisor
&& count
) {
1365 for (i
= 0; i
< count
; i
++) {
1366 if (pipe_config
->port_clock
== divisor
[i
].clock
) {
1367 pipe_config
->dpll
= divisor
[i
].dpll
;
1368 pipe_config
->clock_set
= true;
1375 static int intersect_rates(const int *source_rates
, int source_len
,
1376 const int *sink_rates
, int sink_len
,
1379 int i
= 0, j
= 0, k
= 0;
1381 while (i
< source_len
&& j
< sink_len
) {
1382 if (source_rates
[i
] == sink_rates
[j
]) {
1383 if (WARN_ON(k
>= DP_MAX_SUPPORTED_RATES
))
1385 common_rates
[k
] = source_rates
[i
];
1389 } else if (source_rates
[i
] < sink_rates
[j
]) {
1398 static int intel_dp_common_rates(struct intel_dp
*intel_dp
,
1401 const int *source_rates
, *sink_rates
;
1402 int source_len
, sink_len
;
1404 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1405 source_len
= intel_dp_source_rates(intel_dp
, &source_rates
);
1407 return intersect_rates(source_rates
, source_len
,
1408 sink_rates
, sink_len
,
1412 static void snprintf_int_array(char *str
, size_t len
,
1413 const int *array
, int nelem
)
1419 for (i
= 0; i
< nelem
; i
++) {
1420 int r
= snprintf(str
, len
, "%s%d", i
? ", " : "", array
[i
]);
1428 static void intel_dp_print_rates(struct intel_dp
*intel_dp
)
1430 const int *source_rates
, *sink_rates
;
1431 int source_len
, sink_len
, common_len
;
1432 int common_rates
[DP_MAX_SUPPORTED_RATES
];
1433 char str
[128]; /* FIXME: too big for stack? */
1435 if ((drm_debug
& DRM_UT_KMS
) == 0)
1438 source_len
= intel_dp_source_rates(intel_dp
, &source_rates
);
1439 snprintf_int_array(str
, sizeof(str
), source_rates
, source_len
);
1440 DRM_DEBUG_KMS("source rates: %s\n", str
);
1442 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1443 snprintf_int_array(str
, sizeof(str
), sink_rates
, sink_len
);
1444 DRM_DEBUG_KMS("sink rates: %s\n", str
);
1446 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1447 snprintf_int_array(str
, sizeof(str
), common_rates
, common_len
);
1448 DRM_DEBUG_KMS("common rates: %s\n", str
);
1451 static int rate_to_index(int find
, const int *rates
)
1455 for (i
= 0; i
< DP_MAX_SUPPORTED_RATES
; ++i
)
1456 if (find
== rates
[i
])
1463 intel_dp_max_link_rate(struct intel_dp
*intel_dp
)
1465 int rates
[DP_MAX_SUPPORTED_RATES
] = {};
1468 len
= intel_dp_common_rates(intel_dp
, rates
);
1469 if (WARN_ON(len
<= 0))
1472 return rates
[rate_to_index(0, rates
) - 1];
1475 int intel_dp_rate_select(struct intel_dp
*intel_dp
, int rate
)
1477 return rate_to_index(rate
, intel_dp
->sink_rates
);
1480 void intel_dp_compute_rate(struct intel_dp
*intel_dp
, int port_clock
,
1481 uint8_t *link_bw
, uint8_t *rate_select
)
1483 if (intel_dp
->num_sink_rates
) {
1486 intel_dp_rate_select(intel_dp
, port_clock
);
1488 *link_bw
= drm_dp_link_rate_to_bw_code(port_clock
);
1494 intel_dp_compute_config(struct intel_encoder
*encoder
,
1495 struct intel_crtc_state
*pipe_config
)
1497 struct drm_device
*dev
= encoder
->base
.dev
;
1498 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1499 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1500 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1501 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1502 struct intel_crtc
*intel_crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
1503 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
1504 int lane_count
, clock
;
1505 int min_lane_count
= 1;
1506 int max_lane_count
= intel_dp_max_lane_count(intel_dp
);
1507 /* Conveniently, the link BW constants become indices with a shift...*/
1511 int link_avail
, link_clock
;
1512 int common_rates
[DP_MAX_SUPPORTED_RATES
] = {};
1514 uint8_t link_bw
, rate_select
;
1516 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1518 /* No common link rates between source and sink */
1519 WARN_ON(common_len
<= 0);
1521 max_clock
= common_len
- 1;
1523 if (HAS_PCH_SPLIT(dev
) && !HAS_DDI(dev
) && port
!= PORT_A
)
1524 pipe_config
->has_pch_encoder
= true;
1526 pipe_config
->has_dp_encoder
= true;
1527 pipe_config
->has_drrs
= false;
1528 pipe_config
->has_audio
= intel_dp
->has_audio
&& port
!= PORT_A
;
1530 if (is_edp(intel_dp
) && intel_connector
->panel
.fixed_mode
) {
1531 intel_fixed_panel_mode(intel_connector
->panel
.fixed_mode
,
1534 if (INTEL_INFO(dev
)->gen
>= 9) {
1536 ret
= skl_update_scaler_crtc(pipe_config
);
1541 if (HAS_GMCH_DISPLAY(dev
))
1542 intel_gmch_panel_fitting(intel_crtc
, pipe_config
,
1543 intel_connector
->panel
.fitting_mode
);
1545 intel_pch_panel_fitting(intel_crtc
, pipe_config
,
1546 intel_connector
->panel
.fitting_mode
);
1549 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
1552 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1553 "max bw %d pixel clock %iKHz\n",
1554 max_lane_count
, common_rates
[max_clock
],
1555 adjusted_mode
->crtc_clock
);
1557 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1558 * bpc in between. */
1559 bpp
= pipe_config
->pipe_bpp
;
1560 if (is_edp(intel_dp
)) {
1562 /* Get bpp from vbt only for panels that dont have bpp in edid */
1563 if (intel_connector
->base
.display_info
.bpc
== 0 &&
1564 (dev_priv
->vbt
.edp_bpp
&& dev_priv
->vbt
.edp_bpp
< bpp
)) {
1565 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1566 dev_priv
->vbt
.edp_bpp
);
1567 bpp
= dev_priv
->vbt
.edp_bpp
;
1571 * Use the maximum clock and number of lanes the eDP panel
1572 * advertizes being capable of. The panels are generally
1573 * designed to support only a single clock and lane
1574 * configuration, and typically these values correspond to the
1575 * native resolution of the panel.
1577 min_lane_count
= max_lane_count
;
1578 min_clock
= max_clock
;
1581 for (; bpp
>= 6*3; bpp
-= 2*3) {
1582 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
1585 for (clock
= min_clock
; clock
<= max_clock
; clock
++) {
1586 for (lane_count
= min_lane_count
;
1587 lane_count
<= max_lane_count
;
1590 link_clock
= common_rates
[clock
];
1591 link_avail
= intel_dp_max_data_rate(link_clock
,
1594 if (mode_rate
<= link_avail
) {
1604 if (intel_dp
->color_range_auto
) {
1607 * CEA-861-E - 5.1 Default Encoding Parameters
1608 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1610 pipe_config
->limited_color_range
=
1611 bpp
!= 18 && drm_match_cea_mode(adjusted_mode
) > 1;
1613 pipe_config
->limited_color_range
=
1614 intel_dp
->limited_color_range
;
1617 pipe_config
->lane_count
= lane_count
;
1619 pipe_config
->pipe_bpp
= bpp
;
1620 pipe_config
->port_clock
= common_rates
[clock
];
1622 intel_dp_compute_rate(intel_dp
, pipe_config
->port_clock
,
1623 &link_bw
, &rate_select
);
1625 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1626 link_bw
, rate_select
, pipe_config
->lane_count
,
1627 pipe_config
->port_clock
, bpp
);
1628 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1629 mode_rate
, link_avail
);
1631 intel_link_compute_m_n(bpp
, lane_count
,
1632 adjusted_mode
->crtc_clock
,
1633 pipe_config
->port_clock
,
1634 &pipe_config
->dp_m_n
);
1636 if (intel_connector
->panel
.downclock_mode
!= NULL
&&
1637 dev_priv
->drrs
.type
== SEAMLESS_DRRS_SUPPORT
) {
1638 pipe_config
->has_drrs
= true;
1639 intel_link_compute_m_n(bpp
, lane_count
,
1640 intel_connector
->panel
.downclock_mode
->clock
,
1641 pipe_config
->port_clock
,
1642 &pipe_config
->dp_m2_n2
);
1645 if ((IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) && is_edp(intel_dp
))
1646 skl_edp_set_pll_config(pipe_config
);
1647 else if (IS_BROXTON(dev
))
1648 /* handled in ddi */;
1649 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1650 hsw_dp_set_ddi_pll_sel(pipe_config
);
1652 intel_dp_set_clock(encoder
, pipe_config
);
1657 void intel_dp_set_link_params(struct intel_dp
*intel_dp
,
1658 const struct intel_crtc_state
*pipe_config
)
1660 intel_dp
->link_rate
= pipe_config
->port_clock
;
1661 intel_dp
->lane_count
= pipe_config
->lane_count
;
1664 static void intel_dp_prepare(struct intel_encoder
*encoder
)
1666 struct drm_device
*dev
= encoder
->base
.dev
;
1667 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1668 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1669 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1670 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
1671 const struct drm_display_mode
*adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
1673 intel_dp_set_link_params(intel_dp
, crtc
->config
);
1676 * There are four kinds of DP registers:
1683 * IBX PCH and CPU are the same for almost everything,
1684 * except that the CPU DP PLL is configured in this
1687 * CPT PCH is quite different, having many bits moved
1688 * to the TRANS_DP_CTL register instead. That
1689 * configuration happens (oddly) in ironlake_pch_enable
1692 /* Preserve the BIOS-computed detected bit. This is
1693 * supposed to be read-only.
1695 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
1697 /* Handle DP bits in common between all three register formats */
1698 intel_dp
->DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
1699 intel_dp
->DP
|= DP_PORT_WIDTH(crtc
->config
->lane_count
);
1701 /* Split out the IBX/CPU vs CPT settings */
1703 if (IS_GEN7(dev
) && port
== PORT_A
) {
1704 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1705 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1706 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1707 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1708 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1710 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1711 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1713 intel_dp
->DP
|= crtc
->pipe
<< 29;
1714 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
1717 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1719 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
1720 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1721 trans_dp
|= TRANS_DP_ENH_FRAMING
;
1723 trans_dp
&= ~TRANS_DP_ENH_FRAMING
;
1724 I915_WRITE(TRANS_DP_CTL(crtc
->pipe
), trans_dp
);
1726 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
1727 crtc
->config
->limited_color_range
)
1728 intel_dp
->DP
|= DP_COLOR_RANGE_16_235
;
1730 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1731 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1732 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1733 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1734 intel_dp
->DP
|= DP_LINK_TRAIN_OFF
;
1736 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1737 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1739 if (IS_CHERRYVIEW(dev
))
1740 intel_dp
->DP
|= DP_PIPE_SELECT_CHV(crtc
->pipe
);
1741 else if (crtc
->pipe
== PIPE_B
)
1742 intel_dp
->DP
|= DP_PIPEB_SELECT
;
1746 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1747 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1749 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1750 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1752 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1753 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1755 static void wait_panel_status(struct intel_dp
*intel_dp
,
1759 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1760 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1761 u32 pp_stat_reg
, pp_ctrl_reg
;
1763 lockdep_assert_held(&dev_priv
->pps_mutex
);
1765 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1766 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1768 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1770 I915_READ(pp_stat_reg
),
1771 I915_READ(pp_ctrl_reg
));
1773 if (_wait_for((I915_READ(pp_stat_reg
) & mask
) == value
, 5000, 10)) {
1774 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1775 I915_READ(pp_stat_reg
),
1776 I915_READ(pp_ctrl_reg
));
1779 DRM_DEBUG_KMS("Wait complete\n");
1782 static void wait_panel_on(struct intel_dp
*intel_dp
)
1784 DRM_DEBUG_KMS("Wait for panel power on\n");
1785 wait_panel_status(intel_dp
, IDLE_ON_MASK
, IDLE_ON_VALUE
);
1788 static void wait_panel_off(struct intel_dp
*intel_dp
)
1790 DRM_DEBUG_KMS("Wait for panel power off time\n");
1791 wait_panel_status(intel_dp
, IDLE_OFF_MASK
, IDLE_OFF_VALUE
);
1794 static void wait_panel_power_cycle(struct intel_dp
*intel_dp
)
1796 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1798 /* When we disable the VDD override bit last we have to do the manual
1800 wait_remaining_ms_from_jiffies(intel_dp
->last_power_cycle
,
1801 intel_dp
->panel_power_cycle_delay
);
1803 wait_panel_status(intel_dp
, IDLE_CYCLE_MASK
, IDLE_CYCLE_VALUE
);
1806 static void wait_backlight_on(struct intel_dp
*intel_dp
)
1808 wait_remaining_ms_from_jiffies(intel_dp
->last_power_on
,
1809 intel_dp
->backlight_on_delay
);
1812 static void edp_wait_backlight_off(struct intel_dp
*intel_dp
)
1814 wait_remaining_ms_from_jiffies(intel_dp
->last_backlight_off
,
1815 intel_dp
->backlight_off_delay
);
1818 /* Read the current pp_control value, unlocking the register if it
1822 static u32
ironlake_get_pp_control(struct intel_dp
*intel_dp
)
1824 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1825 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1828 lockdep_assert_held(&dev_priv
->pps_mutex
);
1830 control
= I915_READ(_pp_ctrl_reg(intel_dp
));
1831 if (!IS_BROXTON(dev
)) {
1832 control
&= ~PANEL_UNLOCK_MASK
;
1833 control
|= PANEL_UNLOCK_REGS
;
1839 * Must be paired with edp_panel_vdd_off().
1840 * Must hold pps_mutex around the whole on/off sequence.
1841 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1843 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1845 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1846 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1847 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1848 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1849 enum intel_display_power_domain power_domain
;
1851 u32 pp_stat_reg
, pp_ctrl_reg
;
1852 bool need_to_disable
= !intel_dp
->want_panel_vdd
;
1854 lockdep_assert_held(&dev_priv
->pps_mutex
);
1856 if (!is_edp(intel_dp
))
1859 cancel_delayed_work(&intel_dp
->panel_vdd_work
);
1860 intel_dp
->want_panel_vdd
= true;
1862 if (edp_have_panel_vdd(intel_dp
))
1863 return need_to_disable
;
1865 power_domain
= intel_display_port_power_domain(intel_encoder
);
1866 intel_display_power_get(dev_priv
, power_domain
);
1868 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1869 port_name(intel_dig_port
->port
));
1871 if (!edp_have_panel_power(intel_dp
))
1872 wait_panel_power_cycle(intel_dp
);
1874 pp
= ironlake_get_pp_control(intel_dp
);
1875 pp
|= EDP_FORCE_VDD
;
1877 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1878 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1880 I915_WRITE(pp_ctrl_reg
, pp
);
1881 POSTING_READ(pp_ctrl_reg
);
1882 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1883 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1885 * If the panel wasn't on, delay before accessing aux channel
1887 if (!edp_have_panel_power(intel_dp
)) {
1888 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1889 port_name(intel_dig_port
->port
));
1890 msleep(intel_dp
->panel_power_up_delay
);
1893 return need_to_disable
;
1897 * Must be paired with intel_edp_panel_vdd_off() or
1898 * intel_edp_panel_off().
1899 * Nested calls to these functions are not allowed since
1900 * we drop the lock. Caller must use some higher level
1901 * locking to prevent nested calls from other threads.
1903 void intel_edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1907 if (!is_edp(intel_dp
))
1911 vdd
= edp_panel_vdd_on(intel_dp
);
1912 pps_unlock(intel_dp
);
1914 I915_STATE_WARN(!vdd
, "eDP port %c VDD already requested on\n",
1915 port_name(dp_to_dig_port(intel_dp
)->port
));
1918 static void edp_panel_vdd_off_sync(struct intel_dp
*intel_dp
)
1920 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1921 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1922 struct intel_digital_port
*intel_dig_port
=
1923 dp_to_dig_port(intel_dp
);
1924 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1925 enum intel_display_power_domain power_domain
;
1927 u32 pp_stat_reg
, pp_ctrl_reg
;
1929 lockdep_assert_held(&dev_priv
->pps_mutex
);
1931 WARN_ON(intel_dp
->want_panel_vdd
);
1933 if (!edp_have_panel_vdd(intel_dp
))
1936 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1937 port_name(intel_dig_port
->port
));
1939 pp
= ironlake_get_pp_control(intel_dp
);
1940 pp
&= ~EDP_FORCE_VDD
;
1942 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1943 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1945 I915_WRITE(pp_ctrl_reg
, pp
);
1946 POSTING_READ(pp_ctrl_reg
);
1948 /* Make sure sequencer is idle before allowing subsequent activity */
1949 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1950 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1952 if ((pp
& POWER_TARGET_ON
) == 0)
1953 intel_dp
->last_power_cycle
= jiffies
;
1955 power_domain
= intel_display_port_power_domain(intel_encoder
);
1956 intel_display_power_put(dev_priv
, power_domain
);
1959 static void edp_panel_vdd_work(struct work_struct
*__work
)
1961 struct intel_dp
*intel_dp
= container_of(to_delayed_work(__work
),
1962 struct intel_dp
, panel_vdd_work
);
1965 if (!intel_dp
->want_panel_vdd
)
1966 edp_panel_vdd_off_sync(intel_dp
);
1967 pps_unlock(intel_dp
);
1970 static void edp_panel_vdd_schedule_off(struct intel_dp
*intel_dp
)
1972 unsigned long delay
;
1975 * Queue the timer to fire a long time from now (relative to the power
1976 * down delay) to keep the panel power up across a sequence of
1979 delay
= msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5);
1980 schedule_delayed_work(&intel_dp
->panel_vdd_work
, delay
);
1984 * Must be paired with edp_panel_vdd_on().
1985 * Must hold pps_mutex around the whole on/off sequence.
1986 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1988 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
)
1990 struct drm_i915_private
*dev_priv
=
1991 intel_dp_to_dev(intel_dp
)->dev_private
;
1993 lockdep_assert_held(&dev_priv
->pps_mutex
);
1995 if (!is_edp(intel_dp
))
1998 I915_STATE_WARN(!intel_dp
->want_panel_vdd
, "eDP port %c VDD not forced on",
1999 port_name(dp_to_dig_port(intel_dp
)->port
));
2001 intel_dp
->want_panel_vdd
= false;
2004 edp_panel_vdd_off_sync(intel_dp
);
2006 edp_panel_vdd_schedule_off(intel_dp
);
2009 static void edp_panel_on(struct intel_dp
*intel_dp
)
2011 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2012 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2016 lockdep_assert_held(&dev_priv
->pps_mutex
);
2018 if (!is_edp(intel_dp
))
2021 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2022 port_name(dp_to_dig_port(intel_dp
)->port
));
2024 if (WARN(edp_have_panel_power(intel_dp
),
2025 "eDP port %c panel power already on\n",
2026 port_name(dp_to_dig_port(intel_dp
)->port
)))
2029 wait_panel_power_cycle(intel_dp
);
2031 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2032 pp
= ironlake_get_pp_control(intel_dp
);
2034 /* ILK workaround: disable reset around power sequence */
2035 pp
&= ~PANEL_POWER_RESET
;
2036 I915_WRITE(pp_ctrl_reg
, pp
);
2037 POSTING_READ(pp_ctrl_reg
);
2040 pp
|= POWER_TARGET_ON
;
2042 pp
|= PANEL_POWER_RESET
;
2044 I915_WRITE(pp_ctrl_reg
, pp
);
2045 POSTING_READ(pp_ctrl_reg
);
2047 wait_panel_on(intel_dp
);
2048 intel_dp
->last_power_on
= jiffies
;
2051 pp
|= PANEL_POWER_RESET
; /* restore panel reset bit */
2052 I915_WRITE(pp_ctrl_reg
, pp
);
2053 POSTING_READ(pp_ctrl_reg
);
2057 void intel_edp_panel_on(struct intel_dp
*intel_dp
)
2059 if (!is_edp(intel_dp
))
2063 edp_panel_on(intel_dp
);
2064 pps_unlock(intel_dp
);
2068 static void edp_panel_off(struct intel_dp
*intel_dp
)
2070 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2071 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
2072 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2073 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2074 enum intel_display_power_domain power_domain
;
2078 lockdep_assert_held(&dev_priv
->pps_mutex
);
2080 if (!is_edp(intel_dp
))
2083 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2084 port_name(dp_to_dig_port(intel_dp
)->port
));
2086 WARN(!intel_dp
->want_panel_vdd
, "Need eDP port %c VDD to turn off panel\n",
2087 port_name(dp_to_dig_port(intel_dp
)->port
));
2089 pp
= ironlake_get_pp_control(intel_dp
);
2090 /* We need to switch off panel power _and_ force vdd, for otherwise some
2091 * panels get very unhappy and cease to work. */
2092 pp
&= ~(POWER_TARGET_ON
| PANEL_POWER_RESET
| EDP_FORCE_VDD
|
2095 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2097 intel_dp
->want_panel_vdd
= false;
2099 I915_WRITE(pp_ctrl_reg
, pp
);
2100 POSTING_READ(pp_ctrl_reg
);
2102 intel_dp
->last_power_cycle
= jiffies
;
2103 wait_panel_off(intel_dp
);
2105 /* We got a reference when we enabled the VDD. */
2106 power_domain
= intel_display_port_power_domain(intel_encoder
);
2107 intel_display_power_put(dev_priv
, power_domain
);
2110 void intel_edp_panel_off(struct intel_dp
*intel_dp
)
2112 if (!is_edp(intel_dp
))
2116 edp_panel_off(intel_dp
);
2117 pps_unlock(intel_dp
);
2120 /* Enable backlight in the panel power control. */
2121 static void _intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2123 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2124 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2125 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2130 * If we enable the backlight right away following a panel power
2131 * on, we may see slight flicker as the panel syncs with the eDP
2132 * link. So delay a bit to make sure the image is solid before
2133 * allowing it to appear.
2135 wait_backlight_on(intel_dp
);
2139 pp
= ironlake_get_pp_control(intel_dp
);
2140 pp
|= EDP_BLC_ENABLE
;
2142 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2144 I915_WRITE(pp_ctrl_reg
, pp
);
2145 POSTING_READ(pp_ctrl_reg
);
2147 pps_unlock(intel_dp
);
2150 /* Enable backlight PWM and backlight PP control. */
2151 void intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2153 if (!is_edp(intel_dp
))
2156 DRM_DEBUG_KMS("\n");
2158 intel_panel_enable_backlight(intel_dp
->attached_connector
);
2159 _intel_edp_backlight_on(intel_dp
);
2162 /* Disable backlight in the panel power control. */
2163 static void _intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2165 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2166 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2170 if (!is_edp(intel_dp
))
2175 pp
= ironlake_get_pp_control(intel_dp
);
2176 pp
&= ~EDP_BLC_ENABLE
;
2178 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2180 I915_WRITE(pp_ctrl_reg
, pp
);
2181 POSTING_READ(pp_ctrl_reg
);
2183 pps_unlock(intel_dp
);
2185 intel_dp
->last_backlight_off
= jiffies
;
2186 edp_wait_backlight_off(intel_dp
);
2189 /* Disable backlight PP control and backlight PWM. */
2190 void intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2192 if (!is_edp(intel_dp
))
2195 DRM_DEBUG_KMS("\n");
2197 _intel_edp_backlight_off(intel_dp
);
2198 intel_panel_disable_backlight(intel_dp
->attached_connector
);
2202 * Hook for controlling the panel power control backlight through the bl_power
2203 * sysfs attribute. Take care to handle multiple calls.
2205 static void intel_edp_backlight_power(struct intel_connector
*connector
,
2208 struct intel_dp
*intel_dp
= intel_attached_dp(&connector
->base
);
2212 is_enabled
= ironlake_get_pp_control(intel_dp
) & EDP_BLC_ENABLE
;
2213 pps_unlock(intel_dp
);
2215 if (is_enabled
== enable
)
2218 DRM_DEBUG_KMS("panel power control backlight %s\n",
2219 enable
? "enable" : "disable");
2222 _intel_edp_backlight_on(intel_dp
);
2224 _intel_edp_backlight_off(intel_dp
);
2227 static const char *state_string(bool enabled
)
2229 return enabled
? "on" : "off";
2232 static void assert_dp_port(struct intel_dp
*intel_dp
, bool state
)
2234 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
2235 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
2236 bool cur_state
= I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
;
2238 I915_STATE_WARN(cur_state
!= state
,
2239 "DP port %c state assertion failure (expected %s, current %s)\n",
2240 port_name(dig_port
->port
),
2241 state_string(state
), state_string(cur_state
));
2243 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2245 static void assert_edp_pll(struct drm_i915_private
*dev_priv
, bool state
)
2247 bool cur_state
= I915_READ(DP_A
) & DP_PLL_ENABLE
;
2249 I915_STATE_WARN(cur_state
!= state
,
2250 "eDP PLL state assertion failure (expected %s, current %s)\n",
2251 state_string(state
), state_string(cur_state
));
2253 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2254 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2256 static void ironlake_edp_pll_on(struct intel_dp
*intel_dp
)
2258 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2259 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
2260 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2262 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
2263 assert_dp_port_disabled(intel_dp
);
2264 assert_edp_pll_disabled(dev_priv
);
2266 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2267 crtc
->config
->port_clock
);
2269 intel_dp
->DP
&= ~DP_PLL_FREQ_MASK
;
2271 if (crtc
->config
->port_clock
== 162000)
2272 intel_dp
->DP
|= DP_PLL_FREQ_162MHZ
;
2274 intel_dp
->DP
|= DP_PLL_FREQ_270MHZ
;
2276 I915_WRITE(DP_A
, intel_dp
->DP
);
2280 intel_dp
->DP
|= DP_PLL_ENABLE
;
2282 I915_WRITE(DP_A
, intel_dp
->DP
);
2287 static void ironlake_edp_pll_off(struct intel_dp
*intel_dp
)
2289 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2290 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
2291 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2293 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
2294 assert_dp_port_disabled(intel_dp
);
2295 assert_edp_pll_enabled(dev_priv
);
2297 DRM_DEBUG_KMS("disabling eDP PLL\n");
2299 intel_dp
->DP
&= ~DP_PLL_ENABLE
;
2301 I915_WRITE(DP_A
, intel_dp
->DP
);
2306 /* If the sink supports it, try to set the power state appropriately */
2307 void intel_dp_sink_dpms(struct intel_dp
*intel_dp
, int mode
)
2311 /* Should have a valid DPCD by this point */
2312 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
2315 if (mode
!= DRM_MODE_DPMS_ON
) {
2316 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2320 * When turning on, we need to retry for 1ms to give the sink
2323 for (i
= 0; i
< 3; i
++) {
2324 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2333 DRM_DEBUG_KMS("failed to %s sink power state\n",
2334 mode
== DRM_MODE_DPMS_ON
? "enable" : "disable");
2337 static bool intel_dp_get_hw_state(struct intel_encoder
*encoder
,
2340 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2341 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2342 struct drm_device
*dev
= encoder
->base
.dev
;
2343 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2344 enum intel_display_power_domain power_domain
;
2347 power_domain
= intel_display_port_power_domain(encoder
);
2348 if (!intel_display_power_is_enabled(dev_priv
, power_domain
))
2351 tmp
= I915_READ(intel_dp
->output_reg
);
2353 if (!(tmp
& DP_PORT_EN
))
2356 if (IS_GEN7(dev
) && port
== PORT_A
) {
2357 *pipe
= PORT_TO_PIPE_CPT(tmp
);
2358 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2361 for_each_pipe(dev_priv
, p
) {
2362 u32 trans_dp
= I915_READ(TRANS_DP_CTL(p
));
2363 if (TRANS_DP_PIPE_TO_PORT(trans_dp
) == port
) {
2369 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2370 intel_dp
->output_reg
);
2371 } else if (IS_CHERRYVIEW(dev
)) {
2372 *pipe
= DP_PORT_TO_PIPE_CHV(tmp
);
2374 *pipe
= PORT_TO_PIPE(tmp
);
2380 static void intel_dp_get_config(struct intel_encoder
*encoder
,
2381 struct intel_crtc_state
*pipe_config
)
2383 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2385 struct drm_device
*dev
= encoder
->base
.dev
;
2386 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2387 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2388 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2391 tmp
= I915_READ(intel_dp
->output_reg
);
2393 pipe_config
->has_audio
= tmp
& DP_AUDIO_OUTPUT_ENABLE
&& port
!= PORT_A
;
2395 if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2396 u32 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
2398 if (trans_dp
& TRANS_DP_HSYNC_ACTIVE_HIGH
)
2399 flags
|= DRM_MODE_FLAG_PHSYNC
;
2401 flags
|= DRM_MODE_FLAG_NHSYNC
;
2403 if (trans_dp
& TRANS_DP_VSYNC_ACTIVE_HIGH
)
2404 flags
|= DRM_MODE_FLAG_PVSYNC
;
2406 flags
|= DRM_MODE_FLAG_NVSYNC
;
2408 if (tmp
& DP_SYNC_HS_HIGH
)
2409 flags
|= DRM_MODE_FLAG_PHSYNC
;
2411 flags
|= DRM_MODE_FLAG_NHSYNC
;
2413 if (tmp
& DP_SYNC_VS_HIGH
)
2414 flags
|= DRM_MODE_FLAG_PVSYNC
;
2416 flags
|= DRM_MODE_FLAG_NVSYNC
;
2419 pipe_config
->base
.adjusted_mode
.flags
|= flags
;
2421 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
2422 tmp
& DP_COLOR_RANGE_16_235
)
2423 pipe_config
->limited_color_range
= true;
2425 pipe_config
->has_dp_encoder
= true;
2427 pipe_config
->lane_count
=
2428 ((tmp
& DP_PORT_WIDTH_MASK
) >> DP_PORT_WIDTH_SHIFT
) + 1;
2430 intel_dp_get_m_n(crtc
, pipe_config
);
2432 if (port
== PORT_A
) {
2433 if ((I915_READ(DP_A
) & DP_PLL_FREQ_MASK
) == DP_PLL_FREQ_162MHZ
)
2434 pipe_config
->port_clock
= 162000;
2436 pipe_config
->port_clock
= 270000;
2439 dotclock
= intel_dotclock_calculate(pipe_config
->port_clock
,
2440 &pipe_config
->dp_m_n
);
2442 if (HAS_PCH_SPLIT(dev_priv
->dev
) && port
!= PORT_A
)
2443 ironlake_check_encoder_dotclock(pipe_config
, dotclock
);
2445 pipe_config
->base
.adjusted_mode
.crtc_clock
= dotclock
;
2447 if (is_edp(intel_dp
) && dev_priv
->vbt
.edp_bpp
&&
2448 pipe_config
->pipe_bpp
> dev_priv
->vbt
.edp_bpp
) {
2450 * This is a big fat ugly hack.
2452 * Some machines in UEFI boot mode provide us a VBT that has 18
2453 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2454 * unknown we fail to light up. Yet the same BIOS boots up with
2455 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2456 * max, not what it tells us to use.
2458 * Note: This will still be broken if the eDP panel is not lit
2459 * up by the BIOS, and thus we can't get the mode at module
2462 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2463 pipe_config
->pipe_bpp
, dev_priv
->vbt
.edp_bpp
);
2464 dev_priv
->vbt
.edp_bpp
= pipe_config
->pipe_bpp
;
2468 static void intel_disable_dp(struct intel_encoder
*encoder
)
2470 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2471 struct drm_device
*dev
= encoder
->base
.dev
;
2472 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2474 if (crtc
->config
->has_audio
)
2475 intel_audio_codec_disable(encoder
);
2477 if (HAS_PSR(dev
) && !HAS_DDI(dev
))
2478 intel_psr_disable(intel_dp
);
2480 /* Make sure the panel is off before trying to change the mode. But also
2481 * ensure that we have vdd while we switch off the panel. */
2482 intel_edp_panel_vdd_on(intel_dp
);
2483 intel_edp_backlight_off(intel_dp
);
2484 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_OFF
);
2485 intel_edp_panel_off(intel_dp
);
2487 /* disable the port before the pipe on g4x */
2488 if (INTEL_INFO(dev
)->gen
< 5)
2489 intel_dp_link_down(intel_dp
);
2492 static void ilk_post_disable_dp(struct intel_encoder
*encoder
)
2494 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2495 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2497 intel_dp_link_down(intel_dp
);
2499 /* Only ilk+ has port A */
2501 ironlake_edp_pll_off(intel_dp
);
2504 static void vlv_post_disable_dp(struct intel_encoder
*encoder
)
2506 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2508 intel_dp_link_down(intel_dp
);
2511 static void chv_data_lane_soft_reset(struct intel_encoder
*encoder
,
2514 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2515 enum dpio_channel ch
= vlv_dport_to_channel(enc_to_dig_port(&encoder
->base
));
2516 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2517 enum pipe pipe
= crtc
->pipe
;
2520 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2522 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2524 val
|= DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
;
2525 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2527 if (crtc
->config
->lane_count
> 2) {
2528 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2530 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2532 val
|= DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
;
2533 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2536 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2537 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2539 val
&= ~DPIO_PCS_CLK_SOFT_RESET
;
2541 val
|= DPIO_PCS_CLK_SOFT_RESET
;
2542 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2544 if (crtc
->config
->lane_count
> 2) {
2545 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2546 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2548 val
&= ~DPIO_PCS_CLK_SOFT_RESET
;
2550 val
|= DPIO_PCS_CLK_SOFT_RESET
;
2551 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2555 static void chv_post_disable_dp(struct intel_encoder
*encoder
)
2557 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2558 struct drm_device
*dev
= encoder
->base
.dev
;
2559 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2561 intel_dp_link_down(intel_dp
);
2563 mutex_lock(&dev_priv
->sb_lock
);
2565 /* Assert data lane reset */
2566 chv_data_lane_soft_reset(encoder
, true);
2568 mutex_unlock(&dev_priv
->sb_lock
);
2572 _intel_dp_set_link_train(struct intel_dp
*intel_dp
,
2574 uint8_t dp_train_pat
)
2576 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2577 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2578 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2579 enum port port
= intel_dig_port
->port
;
2582 uint32_t temp
= I915_READ(DP_TP_CTL(port
));
2584 if (dp_train_pat
& DP_LINK_SCRAMBLING_DISABLE
)
2585 temp
|= DP_TP_CTL_SCRAMBLE_DISABLE
;
2587 temp
&= ~DP_TP_CTL_SCRAMBLE_DISABLE
;
2589 temp
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
2590 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2591 case DP_TRAINING_PATTERN_DISABLE
:
2592 temp
|= DP_TP_CTL_LINK_TRAIN_NORMAL
;
2595 case DP_TRAINING_PATTERN_1
:
2596 temp
|= DP_TP_CTL_LINK_TRAIN_PAT1
;
2598 case DP_TRAINING_PATTERN_2
:
2599 temp
|= DP_TP_CTL_LINK_TRAIN_PAT2
;
2601 case DP_TRAINING_PATTERN_3
:
2602 temp
|= DP_TP_CTL_LINK_TRAIN_PAT3
;
2605 I915_WRITE(DP_TP_CTL(port
), temp
);
2607 } else if ((IS_GEN7(dev
) && port
== PORT_A
) ||
2608 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
2609 *DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
2611 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2612 case DP_TRAINING_PATTERN_DISABLE
:
2613 *DP
|= DP_LINK_TRAIN_OFF_CPT
;
2615 case DP_TRAINING_PATTERN_1
:
2616 *DP
|= DP_LINK_TRAIN_PAT_1_CPT
;
2618 case DP_TRAINING_PATTERN_2
:
2619 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2621 case DP_TRAINING_PATTERN_3
:
2622 DRM_ERROR("DP training pattern 3 not supported\n");
2623 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2628 if (IS_CHERRYVIEW(dev
))
2629 *DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
2631 *DP
&= ~DP_LINK_TRAIN_MASK
;
2633 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2634 case DP_TRAINING_PATTERN_DISABLE
:
2635 *DP
|= DP_LINK_TRAIN_OFF
;
2637 case DP_TRAINING_PATTERN_1
:
2638 *DP
|= DP_LINK_TRAIN_PAT_1
;
2640 case DP_TRAINING_PATTERN_2
:
2641 *DP
|= DP_LINK_TRAIN_PAT_2
;
2643 case DP_TRAINING_PATTERN_3
:
2644 if (IS_CHERRYVIEW(dev
)) {
2645 *DP
|= DP_LINK_TRAIN_PAT_3_CHV
;
2647 DRM_ERROR("DP training pattern 3 not supported\n");
2648 *DP
|= DP_LINK_TRAIN_PAT_2
;
2655 static void intel_dp_enable_port(struct intel_dp
*intel_dp
)
2657 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2658 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2659 struct intel_crtc
*crtc
=
2660 to_intel_crtc(dp_to_dig_port(intel_dp
)->base
.base
.crtc
);
2662 /* enable with pattern 1 (as per spec) */
2663 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
2664 DP_TRAINING_PATTERN_1
);
2666 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2667 POSTING_READ(intel_dp
->output_reg
);
2670 * Magic for VLV/CHV. We _must_ first set up the register
2671 * without actually enabling the port, and then do another
2672 * write to enable the port. Otherwise link training will
2673 * fail when the power sequencer is freshly used for this port.
2675 intel_dp
->DP
|= DP_PORT_EN
;
2676 if (crtc
->config
->has_audio
)
2677 intel_dp
->DP
|= DP_AUDIO_OUTPUT_ENABLE
;
2679 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2680 POSTING_READ(intel_dp
->output_reg
);
2683 static void intel_enable_dp(struct intel_encoder
*encoder
)
2685 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2686 struct drm_device
*dev
= encoder
->base
.dev
;
2687 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2688 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2689 uint32_t dp_reg
= I915_READ(intel_dp
->output_reg
);
2690 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2691 enum pipe pipe
= crtc
->pipe
;
2693 if (WARN_ON(dp_reg
& DP_PORT_EN
))
2698 if (IS_VALLEYVIEW(dev
))
2699 vlv_init_panel_power_sequencer(intel_dp
);
2701 intel_dp_enable_port(intel_dp
);
2703 if (port
== PORT_A
&& IS_GEN5(dev_priv
)) {
2705 * Underrun reporting for the other pipe was disabled in
2706 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2707 * enabled, so it's now safe to re-enable underrun reporting.
2709 intel_wait_for_vblank_if_active(dev_priv
->dev
, !pipe
);
2710 intel_set_cpu_fifo_underrun_reporting(dev_priv
, !pipe
, true);
2711 intel_set_pch_fifo_underrun_reporting(dev_priv
, !pipe
, true);
2714 edp_panel_vdd_on(intel_dp
);
2715 edp_panel_on(intel_dp
);
2716 edp_panel_vdd_off(intel_dp
, true);
2718 pps_unlock(intel_dp
);
2720 if (IS_VALLEYVIEW(dev
)) {
2721 unsigned int lane_mask
= 0x0;
2723 if (IS_CHERRYVIEW(dev
))
2724 lane_mask
= intel_dp_unused_lane_mask(crtc
->config
->lane_count
);
2726 vlv_wait_port_ready(dev_priv
, dp_to_dig_port(intel_dp
),
2730 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_ON
);
2731 intel_dp_start_link_train(intel_dp
);
2732 intel_dp_stop_link_train(intel_dp
);
2734 if (crtc
->config
->has_audio
) {
2735 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2737 intel_audio_codec_enable(encoder
);
2741 static void g4x_enable_dp(struct intel_encoder
*encoder
)
2743 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2745 intel_enable_dp(encoder
);
2746 intel_edp_backlight_on(intel_dp
);
2749 static void vlv_enable_dp(struct intel_encoder
*encoder
)
2751 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2753 intel_edp_backlight_on(intel_dp
);
2754 intel_psr_enable(intel_dp
);
2757 static void g4x_pre_enable_dp(struct intel_encoder
*encoder
)
2759 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2760 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2761 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2762 enum pipe pipe
= to_intel_crtc(encoder
->base
.crtc
)->pipe
;
2764 intel_dp_prepare(encoder
);
2766 if (port
== PORT_A
&& IS_GEN5(dev_priv
)) {
2768 * We get FIFO underruns on the other pipe when
2769 * enabling the CPU eDP PLL, and when enabling CPU
2770 * eDP port. We could potentially avoid the PLL
2771 * underrun with a vblank wait just prior to enabling
2772 * the PLL, but that doesn't appear to help the port
2773 * enable case. Just sweep it all under the rug.
2775 intel_set_cpu_fifo_underrun_reporting(dev_priv
, !pipe
, false);
2776 intel_set_pch_fifo_underrun_reporting(dev_priv
, !pipe
, false);
2779 /* Only ilk+ has port A */
2781 ironlake_edp_pll_on(intel_dp
);
2784 static void vlv_detach_power_sequencer(struct intel_dp
*intel_dp
)
2786 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2787 struct drm_i915_private
*dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
2788 enum pipe pipe
= intel_dp
->pps_pipe
;
2789 int pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
2791 edp_panel_vdd_off_sync(intel_dp
);
2794 * VLV seems to get confused when multiple power seqeuencers
2795 * have the same port selected (even if only one has power/vdd
2796 * enabled). The failure manifests as vlv_wait_port_ready() failing
2797 * CHV on the other hand doesn't seem to mind having the same port
2798 * selected in multiple power seqeuencers, but let's clear the
2799 * port select always when logically disconnecting a power sequencer
2802 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2803 pipe_name(pipe
), port_name(intel_dig_port
->port
));
2804 I915_WRITE(pp_on_reg
, 0);
2805 POSTING_READ(pp_on_reg
);
2807 intel_dp
->pps_pipe
= INVALID_PIPE
;
2810 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
2813 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2814 struct intel_encoder
*encoder
;
2816 lockdep_assert_held(&dev_priv
->pps_mutex
);
2818 if (WARN_ON(pipe
!= PIPE_A
&& pipe
!= PIPE_B
))
2821 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
2823 struct intel_dp
*intel_dp
;
2826 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2829 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2830 port
= dp_to_dig_port(intel_dp
)->port
;
2832 if (intel_dp
->pps_pipe
!= pipe
)
2835 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2836 pipe_name(pipe
), port_name(port
));
2838 WARN(encoder
->base
.crtc
,
2839 "stealing pipe %c power sequencer from active eDP port %c\n",
2840 pipe_name(pipe
), port_name(port
));
2842 /* make sure vdd is off before we steal it */
2843 vlv_detach_power_sequencer(intel_dp
);
2847 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
)
2849 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2850 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
2851 struct drm_device
*dev
= encoder
->base
.dev
;
2852 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2853 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2855 lockdep_assert_held(&dev_priv
->pps_mutex
);
2857 if (!is_edp(intel_dp
))
2860 if (intel_dp
->pps_pipe
== crtc
->pipe
)
2864 * If another power sequencer was being used on this
2865 * port previously make sure to turn off vdd there while
2866 * we still have control of it.
2868 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
2869 vlv_detach_power_sequencer(intel_dp
);
2872 * We may be stealing the power
2873 * sequencer from another port.
2875 vlv_steal_power_sequencer(dev
, crtc
->pipe
);
2877 /* now it's all ours */
2878 intel_dp
->pps_pipe
= crtc
->pipe
;
2880 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2881 pipe_name(intel_dp
->pps_pipe
), port_name(intel_dig_port
->port
));
2883 /* init power sequencer on this pipe and port */
2884 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
2885 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
2888 static void vlv_pre_enable_dp(struct intel_encoder
*encoder
)
2890 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2891 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2892 struct drm_device
*dev
= encoder
->base
.dev
;
2893 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2894 struct intel_crtc
*intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
2895 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2896 int pipe
= intel_crtc
->pipe
;
2899 mutex_lock(&dev_priv
->sb_lock
);
2901 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(port
));
2908 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW8(port
), val
);
2909 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW14(port
), 0x00760018);
2910 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW23(port
), 0x00400888);
2912 mutex_unlock(&dev_priv
->sb_lock
);
2914 intel_enable_dp(encoder
);
2917 static void vlv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2919 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2920 struct drm_device
*dev
= encoder
->base
.dev
;
2921 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2922 struct intel_crtc
*intel_crtc
=
2923 to_intel_crtc(encoder
->base
.crtc
);
2924 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2925 int pipe
= intel_crtc
->pipe
;
2927 intel_dp_prepare(encoder
);
2929 /* Program Tx lane resets to default */
2930 mutex_lock(&dev_priv
->sb_lock
);
2931 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW0(port
),
2932 DPIO_PCS_TX_LANE2_RESET
|
2933 DPIO_PCS_TX_LANE1_RESET
);
2934 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW1(port
),
2935 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN
|
2936 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN
|
2937 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT
) |
2938 DPIO_PCS_CLK_SOFT_RESET
);
2940 /* Fix up inter-pair skew failure */
2941 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW12(port
), 0x00750f00);
2942 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW11(port
), 0x00001500);
2943 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW14(port
), 0x40400000);
2944 mutex_unlock(&dev_priv
->sb_lock
);
2947 static void chv_pre_enable_dp(struct intel_encoder
*encoder
)
2949 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2950 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2951 struct drm_device
*dev
= encoder
->base
.dev
;
2952 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2953 struct intel_crtc
*intel_crtc
=
2954 to_intel_crtc(encoder
->base
.crtc
);
2955 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2956 int pipe
= intel_crtc
->pipe
;
2957 int data
, i
, stagger
;
2960 mutex_lock(&dev_priv
->sb_lock
);
2962 /* allow hardware to manage TX FIFO reset source */
2963 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
2964 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2965 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
2967 if (intel_crtc
->config
->lane_count
> 2) {
2968 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
2969 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2970 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
2973 /* Program Tx lane latency optimal setting*/
2974 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
2975 /* Set the upar bit */
2976 if (intel_crtc
->config
->lane_count
== 1)
2979 data
= (i
== 1) ? 0x0 : 0x1;
2980 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW14(ch
, i
),
2981 data
<< DPIO_UPAR_SHIFT
);
2984 /* Data lane stagger programming */
2985 if (intel_crtc
->config
->port_clock
> 270000)
2987 else if (intel_crtc
->config
->port_clock
> 135000)
2989 else if (intel_crtc
->config
->port_clock
> 67500)
2991 else if (intel_crtc
->config
->port_clock
> 33750)
2996 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
2997 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
2998 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
3000 if (intel_crtc
->config
->lane_count
> 2) {
3001 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
3002 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
3003 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
3006 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW12(ch
),
3007 DPIO_LANESTAGGER_STRAP(stagger
) |
3008 DPIO_LANESTAGGER_STRAP_OVRD
|
3009 DPIO_TX1_STAGGER_MASK(0x1f) |
3010 DPIO_TX1_STAGGER_MULT(6) |
3011 DPIO_TX2_STAGGER_MULT(0));
3013 if (intel_crtc
->config
->lane_count
> 2) {
3014 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW12(ch
),
3015 DPIO_LANESTAGGER_STRAP(stagger
) |
3016 DPIO_LANESTAGGER_STRAP_OVRD
|
3017 DPIO_TX1_STAGGER_MASK(0x1f) |
3018 DPIO_TX1_STAGGER_MULT(7) |
3019 DPIO_TX2_STAGGER_MULT(5));
3022 /* Deassert data lane reset */
3023 chv_data_lane_soft_reset(encoder
, false);
3025 mutex_unlock(&dev_priv
->sb_lock
);
3027 intel_enable_dp(encoder
);
3029 /* Second common lane will stay alive on its own now */
3030 if (dport
->release_cl2_override
) {
3031 chv_phy_powergate_ch(dev_priv
, DPIO_PHY0
, DPIO_CH1
, false);
3032 dport
->release_cl2_override
= false;
3036 static void chv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
3038 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
3039 struct drm_device
*dev
= encoder
->base
.dev
;
3040 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3041 struct intel_crtc
*intel_crtc
=
3042 to_intel_crtc(encoder
->base
.crtc
);
3043 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3044 enum pipe pipe
= intel_crtc
->pipe
;
3045 unsigned int lane_mask
=
3046 intel_dp_unused_lane_mask(intel_crtc
->config
->lane_count
);
3049 intel_dp_prepare(encoder
);
3052 * Must trick the second common lane into life.
3053 * Otherwise we can't even access the PLL.
3055 if (ch
== DPIO_CH0
&& pipe
== PIPE_B
)
3056 dport
->release_cl2_override
=
3057 !chv_phy_powergate_ch(dev_priv
, DPIO_PHY0
, DPIO_CH1
, true);
3059 chv_phy_powergate_lanes(encoder
, true, lane_mask
);
3061 mutex_lock(&dev_priv
->sb_lock
);
3063 /* Assert data lane reset */
3064 chv_data_lane_soft_reset(encoder
, true);
3066 /* program left/right clock distribution */
3067 if (pipe
!= PIPE_B
) {
3068 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
3069 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
3071 val
|= CHV_BUFLEFTENA1_FORCE
;
3073 val
|= CHV_BUFRIGHTENA1_FORCE
;
3074 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
3076 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
3077 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
3079 val
|= CHV_BUFLEFTENA2_FORCE
;
3081 val
|= CHV_BUFRIGHTENA2_FORCE
;
3082 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
3085 /* program clock channel usage */
3086 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(ch
));
3087 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
3089 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
3091 val
|= CHV_PCS_USEDCLKCHANNEL
;
3092 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW8(ch
), val
);
3094 if (intel_crtc
->config
->lane_count
> 2) {
3095 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW8(ch
));
3096 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
3098 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
3100 val
|= CHV_PCS_USEDCLKCHANNEL
;
3101 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW8(ch
), val
);
3105 * This a a bit weird since generally CL
3106 * matches the pipe, but here we need to
3107 * pick the CL based on the port.
3109 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW19(ch
));
3111 val
&= ~CHV_CMN_USEDCLKCHANNEL
;
3113 val
|= CHV_CMN_USEDCLKCHANNEL
;
3114 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW19(ch
), val
);
3116 mutex_unlock(&dev_priv
->sb_lock
);
3119 static void chv_dp_post_pll_disable(struct intel_encoder
*encoder
)
3121 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3122 enum pipe pipe
= to_intel_crtc(encoder
->base
.crtc
)->pipe
;
3125 mutex_lock(&dev_priv
->sb_lock
);
3127 /* disable left/right clock distribution */
3128 if (pipe
!= PIPE_B
) {
3129 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
3130 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
3131 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
3133 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
3134 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
3135 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
3138 mutex_unlock(&dev_priv
->sb_lock
);
3141 * Leave the power down bit cleared for at least one
3142 * lane so that chv_powergate_phy_ch() will power
3143 * on something when the channel is otherwise unused.
3144 * When the port is off and the override is removed
3145 * the lanes power down anyway, so otherwise it doesn't
3146 * really matter what the state of power down bits is
3149 chv_phy_powergate_lanes(encoder
, false, 0x0);
3153 * Native read with retry for link status and receiver capability reads for
3154 * cases where the sink may still be asleep.
3156 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3157 * supposed to retry 3 times per the spec.
3160 intel_dp_dpcd_read_wake(struct drm_dp_aux
*aux
, unsigned int offset
,
3161 void *buffer
, size_t size
)
3167 * Sometime we just get the same incorrect byte repeated
3168 * over the entire buffer. Doing just one throw away read
3169 * initially seems to "solve" it.
3171 drm_dp_dpcd_read(aux
, DP_DPCD_REV
, buffer
, 1);
3173 for (i
= 0; i
< 3; i
++) {
3174 ret
= drm_dp_dpcd_read(aux
, offset
, buffer
, size
);
3184 * Fetch AUX CH registers 0x202 - 0x207 which contain
3185 * link status information
3188 intel_dp_get_link_status(struct intel_dp
*intel_dp
, uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3190 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3193 DP_LINK_STATUS_SIZE
) == DP_LINK_STATUS_SIZE
;
3196 /* These are source-specific values. */
3198 intel_dp_voltage_max(struct intel_dp
*intel_dp
)
3200 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3201 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3202 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3204 if (IS_BROXTON(dev
))
3205 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3206 else if (INTEL_INFO(dev
)->gen
>= 9) {
3207 if (dev_priv
->edp_low_vswing
&& port
== PORT_A
)
3208 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3209 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3210 } else if (IS_VALLEYVIEW(dev
))
3211 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3212 else if (IS_GEN7(dev
) && port
== PORT_A
)
3213 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3214 else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
)
3215 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3217 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3221 intel_dp_pre_emphasis_max(struct intel_dp
*intel_dp
, uint8_t voltage_swing
)
3223 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3224 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3226 if (INTEL_INFO(dev
)->gen
>= 9) {
3227 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3228 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3229 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3231 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3232 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3233 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3235 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3237 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3239 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
3240 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3242 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3244 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3246 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3247 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3249 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3251 } else if (IS_VALLEYVIEW(dev
)) {
3252 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3254 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3256 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3258 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3261 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3263 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3264 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3266 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3267 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3271 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3274 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3276 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3278 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3280 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3288 static uint32_t vlv_signal_levels(struct intel_dp
*intel_dp
)
3290 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3291 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3292 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3293 struct intel_crtc
*intel_crtc
=
3294 to_intel_crtc(dport
->base
.base
.crtc
);
3295 unsigned long demph_reg_value
, preemph_reg_value
,
3296 uniqtranscale_reg_value
;
3297 uint8_t train_set
= intel_dp
->train_set
[0];
3298 enum dpio_channel port
= vlv_dport_to_channel(dport
);
3299 int pipe
= intel_crtc
->pipe
;
3301 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3302 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3303 preemph_reg_value
= 0x0004000;
3304 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3306 demph_reg_value
= 0x2B405555;
3307 uniqtranscale_reg_value
= 0x552AB83A;
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3310 demph_reg_value
= 0x2B404040;
3311 uniqtranscale_reg_value
= 0x5548B83A;
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3314 demph_reg_value
= 0x2B245555;
3315 uniqtranscale_reg_value
= 0x5560B83A;
3317 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3318 demph_reg_value
= 0x2B405555;
3319 uniqtranscale_reg_value
= 0x5598DA3A;
3325 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3326 preemph_reg_value
= 0x0002000;
3327 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3329 demph_reg_value
= 0x2B404040;
3330 uniqtranscale_reg_value
= 0x5552B83A;
3332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3333 demph_reg_value
= 0x2B404848;
3334 uniqtranscale_reg_value
= 0x5580B83A;
3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3337 demph_reg_value
= 0x2B404040;
3338 uniqtranscale_reg_value
= 0x55ADDA3A;
3344 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3345 preemph_reg_value
= 0x0000000;
3346 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3348 demph_reg_value
= 0x2B305555;
3349 uniqtranscale_reg_value
= 0x5570B83A;
3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3352 demph_reg_value
= 0x2B2B4040;
3353 uniqtranscale_reg_value
= 0x55ADDA3A;
3359 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3360 preemph_reg_value
= 0x0006000;
3361 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3363 demph_reg_value
= 0x1B405555;
3364 uniqtranscale_reg_value
= 0x55ADDA3A;
3374 mutex_lock(&dev_priv
->sb_lock
);
3375 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x00000000);
3376 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW4(port
), demph_reg_value
);
3377 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW2(port
),
3378 uniqtranscale_reg_value
);
3379 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW3(port
), 0x0C782040);
3380 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW11(port
), 0x00030000);
3381 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW9(port
), preemph_reg_value
);
3382 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x80000000);
3383 mutex_unlock(&dev_priv
->sb_lock
);
3388 static bool chv_need_uniq_trans_scale(uint8_t train_set
)
3390 return (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) == DP_TRAIN_PRE_EMPH_LEVEL_0
&&
3391 (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3394 static uint32_t chv_signal_levels(struct intel_dp
*intel_dp
)
3396 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3397 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3398 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3399 struct intel_crtc
*intel_crtc
= to_intel_crtc(dport
->base
.base
.crtc
);
3400 u32 deemph_reg_value
, margin_reg_value
, val
;
3401 uint8_t train_set
= intel_dp
->train_set
[0];
3402 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3403 enum pipe pipe
= intel_crtc
->pipe
;
3406 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3407 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3408 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3409 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3410 deemph_reg_value
= 128;
3411 margin_reg_value
= 52;
3413 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3414 deemph_reg_value
= 128;
3415 margin_reg_value
= 77;
3417 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3418 deemph_reg_value
= 128;
3419 margin_reg_value
= 102;
3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3422 deemph_reg_value
= 128;
3423 margin_reg_value
= 154;
3424 /* FIXME extra to set for 1200 */
3430 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3431 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3433 deemph_reg_value
= 85;
3434 margin_reg_value
= 78;
3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3437 deemph_reg_value
= 85;
3438 margin_reg_value
= 116;
3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3441 deemph_reg_value
= 85;
3442 margin_reg_value
= 154;
3448 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3449 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3451 deemph_reg_value
= 64;
3452 margin_reg_value
= 104;
3454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3455 deemph_reg_value
= 64;
3456 margin_reg_value
= 154;
3462 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3463 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3464 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3465 deemph_reg_value
= 43;
3466 margin_reg_value
= 154;
3476 mutex_lock(&dev_priv
->sb_lock
);
3478 /* Clear calc init */
3479 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3480 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3481 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3482 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3483 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3485 if (intel_crtc
->config
->lane_count
> 2) {
3486 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3487 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3488 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3489 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3490 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3493 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW9(ch
));
3494 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3495 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3496 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW9(ch
), val
);
3498 if (intel_crtc
->config
->lane_count
> 2) {
3499 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW9(ch
));
3500 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3501 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3502 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW9(ch
), val
);
3505 /* Program swing deemph */
3506 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3507 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
));
3508 val
&= ~DPIO_SWING_DEEMPH9P5_MASK
;
3509 val
|= deemph_reg_value
<< DPIO_SWING_DEEMPH9P5_SHIFT
;
3510 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
), val
);
3513 /* Program swing margin */
3514 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3515 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
));
3517 val
&= ~DPIO_SWING_MARGIN000_MASK
;
3518 val
|= margin_reg_value
<< DPIO_SWING_MARGIN000_SHIFT
;
3521 * Supposedly this value shouldn't matter when unique transition
3522 * scale is disabled, but in fact it does matter. Let's just
3523 * always program the same value and hope it's OK.
3525 val
&= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT
);
3526 val
|= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT
;
3528 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
), val
);
3532 * The document said it needs to set bit 27 for ch0 and bit 26
3533 * for ch1. Might be a typo in the doc.
3534 * For now, for this unique transition scale selection, set bit
3535 * 27 for ch0 and ch1.
3537 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3538 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
));
3539 if (chv_need_uniq_trans_scale(train_set
))
3540 val
|= DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3542 val
&= ~DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3543 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
), val
);
3546 /* Start swing calculation */
3547 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3548 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3549 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3551 if (intel_crtc
->config
->lane_count
> 2) {
3552 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3553 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3554 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3557 mutex_unlock(&dev_priv
->sb_lock
);
3563 gen4_signal_levels(uint8_t train_set
)
3565 uint32_t signal_levels
= 0;
3567 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3570 signal_levels
|= DP_VOLTAGE_0_4
;
3572 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3573 signal_levels
|= DP_VOLTAGE_0_6
;
3575 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3576 signal_levels
|= DP_VOLTAGE_0_8
;
3578 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3579 signal_levels
|= DP_VOLTAGE_1_2
;
3582 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3583 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3585 signal_levels
|= DP_PRE_EMPHASIS_0
;
3587 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3588 signal_levels
|= DP_PRE_EMPHASIS_3_5
;
3590 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3591 signal_levels
|= DP_PRE_EMPHASIS_6
;
3593 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3594 signal_levels
|= DP_PRE_EMPHASIS_9_5
;
3597 return signal_levels
;
3600 /* Gen6's DP voltage swing and pre-emphasis control */
3602 gen6_edp_signal_levels(uint8_t train_set
)
3604 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3605 DP_TRAIN_PRE_EMPHASIS_MASK
);
3606 switch (signal_levels
) {
3607 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3608 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3609 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3610 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3611 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B
;
3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3613 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3614 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B
;
3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3616 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3617 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B
;
3618 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3619 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3620 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B
;
3622 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3623 "0x%x\n", signal_levels
);
3624 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3628 /* Gen7's DP voltage swing and pre-emphasis control */
3630 gen7_edp_signal_levels(uint8_t train_set
)
3632 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3633 DP_TRAIN_PRE_EMPHASIS_MASK
);
3634 switch (signal_levels
) {
3635 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3636 return EDP_LINK_TRAIN_400MV_0DB_IVB
;
3637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3638 return EDP_LINK_TRAIN_400MV_3_5DB_IVB
;
3639 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3640 return EDP_LINK_TRAIN_400MV_6DB_IVB
;
3642 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3643 return EDP_LINK_TRAIN_600MV_0DB_IVB
;
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3645 return EDP_LINK_TRAIN_600MV_3_5DB_IVB
;
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3648 return EDP_LINK_TRAIN_800MV_0DB_IVB
;
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3650 return EDP_LINK_TRAIN_800MV_3_5DB_IVB
;
3653 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3654 "0x%x\n", signal_levels
);
3655 return EDP_LINK_TRAIN_500MV_0DB_IVB
;
3660 intel_dp_set_signal_levels(struct intel_dp
*intel_dp
)
3662 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3663 enum port port
= intel_dig_port
->port
;
3664 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3665 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3666 uint32_t signal_levels
, mask
= 0;
3667 uint8_t train_set
= intel_dp
->train_set
[0];
3670 signal_levels
= ddi_signal_levels(intel_dp
);
3672 if (IS_BROXTON(dev
))
3675 mask
= DDI_BUF_EMP_MASK
;
3676 } else if (IS_CHERRYVIEW(dev
)) {
3677 signal_levels
= chv_signal_levels(intel_dp
);
3678 } else if (IS_VALLEYVIEW(dev
)) {
3679 signal_levels
= vlv_signal_levels(intel_dp
);
3680 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3681 signal_levels
= gen7_edp_signal_levels(train_set
);
3682 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_IVB
;
3683 } else if (IS_GEN6(dev
) && port
== PORT_A
) {
3684 signal_levels
= gen6_edp_signal_levels(train_set
);
3685 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_SNB
;
3687 signal_levels
= gen4_signal_levels(train_set
);
3688 mask
= DP_VOLTAGE_MASK
| DP_PRE_EMPHASIS_MASK
;
3692 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels
);
3694 DRM_DEBUG_KMS("Using vswing level %d\n",
3695 train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
);
3696 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3697 (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) >>
3698 DP_TRAIN_PRE_EMPHASIS_SHIFT
);
3700 intel_dp
->DP
= (intel_dp
->DP
& ~mask
) | signal_levels
;
3702 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3703 POSTING_READ(intel_dp
->output_reg
);
3707 intel_dp_program_link_training_pattern(struct intel_dp
*intel_dp
,
3708 uint8_t dp_train_pat
)
3710 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3711 struct drm_i915_private
*dev_priv
=
3712 to_i915(intel_dig_port
->base
.base
.dev
);
3714 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
, dp_train_pat
);
3716 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3717 POSTING_READ(intel_dp
->output_reg
);
3720 void intel_dp_set_idle_link_train(struct intel_dp
*intel_dp
)
3722 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3723 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3724 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3725 enum port port
= intel_dig_port
->port
;
3731 val
= I915_READ(DP_TP_CTL(port
));
3732 val
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
3733 val
|= DP_TP_CTL_LINK_TRAIN_IDLE
;
3734 I915_WRITE(DP_TP_CTL(port
), val
);
3737 * On PORT_A we can have only eDP in SST mode. There the only reason
3738 * we need to set idle transmission mode is to work around a HW issue
3739 * where we enable the pipe while not in idle link-training mode.
3740 * In this case there is requirement to wait for a minimum number of
3741 * idle patterns to be sent.
3746 if (wait_for((I915_READ(DP_TP_STATUS(port
)) & DP_TP_STATUS_IDLE_DONE
),
3748 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3752 intel_dp_link_down(struct intel_dp
*intel_dp
)
3754 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3755 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
3756 enum port port
= intel_dig_port
->port
;
3757 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3758 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3759 uint32_t DP
= intel_dp
->DP
;
3761 if (WARN_ON(HAS_DDI(dev
)))
3764 if (WARN_ON((I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
) == 0))
3767 DRM_DEBUG_KMS("\n");
3769 if ((IS_GEN7(dev
) && port
== PORT_A
) ||
3770 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
3771 DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3772 DP
|= DP_LINK_TRAIN_PAT_IDLE_CPT
;
3774 if (IS_CHERRYVIEW(dev
))
3775 DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
3777 DP
&= ~DP_LINK_TRAIN_MASK
;
3778 DP
|= DP_LINK_TRAIN_PAT_IDLE
;
3780 I915_WRITE(intel_dp
->output_reg
, DP
);
3781 POSTING_READ(intel_dp
->output_reg
);
3783 DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
3784 I915_WRITE(intel_dp
->output_reg
, DP
);
3785 POSTING_READ(intel_dp
->output_reg
);
3788 * HW workaround for IBX, we need to move the port
3789 * to transcoder A after disabling it to allow the
3790 * matching HDMI port to be enabled on transcoder A.
3792 if (HAS_PCH_IBX(dev
) && crtc
->pipe
== PIPE_B
&& port
!= PORT_A
) {
3794 * We get CPU/PCH FIFO underruns on the other pipe when
3795 * doing the workaround. Sweep them under the rug.
3797 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
3798 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
3800 /* always enable with pattern 1 (as per spec) */
3801 DP
&= ~(DP_PIPEB_SELECT
| DP_LINK_TRAIN_MASK
);
3802 DP
|= DP_PORT_EN
| DP_LINK_TRAIN_PAT_1
;
3803 I915_WRITE(intel_dp
->output_reg
, DP
);
3804 POSTING_READ(intel_dp
->output_reg
);
3807 I915_WRITE(intel_dp
->output_reg
, DP
);
3808 POSTING_READ(intel_dp
->output_reg
);
3810 intel_wait_for_vblank_if_active(dev_priv
->dev
, PIPE_A
);
3811 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
3812 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
3815 msleep(intel_dp
->panel_power_down_delay
);
3821 intel_dp_get_dpcd(struct intel_dp
*intel_dp
)
3823 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3824 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
3825 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3828 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, 0x000, intel_dp
->dpcd
,
3829 sizeof(intel_dp
->dpcd
)) < 0)
3830 return false; /* aux transfer failed */
3832 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp
->dpcd
), intel_dp
->dpcd
);
3834 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0)
3835 return false; /* DPCD not present */
3837 /* Check if the panel supports PSR */
3838 memset(intel_dp
->psr_dpcd
, 0, sizeof(intel_dp
->psr_dpcd
));
3839 if (is_edp(intel_dp
)) {
3840 intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_PSR_SUPPORT
,
3842 sizeof(intel_dp
->psr_dpcd
));
3843 if (intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
) {
3844 dev_priv
->psr
.sink_support
= true;
3845 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3848 if (INTEL_INFO(dev
)->gen
>= 9 &&
3849 (intel_dp
->psr_dpcd
[0] & DP_PSR2_IS_SUPPORTED
)) {
3850 uint8_t frame_sync_cap
;
3852 dev_priv
->psr
.sink_support
= true;
3853 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3854 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP
,
3855 &frame_sync_cap
, 1);
3856 dev_priv
->psr
.aux_frame_sync
= frame_sync_cap
? true : false;
3857 /* PSR2 needs frame sync as well */
3858 dev_priv
->psr
.psr2_support
= dev_priv
->psr
.aux_frame_sync
;
3859 DRM_DEBUG_KMS("PSR2 %s on sink",
3860 dev_priv
->psr
.psr2_support
? "supported" : "not supported");
3864 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3865 yesno(intel_dp_source_supports_hbr2(intel_dp
)),
3866 yesno(drm_dp_tps3_supported(intel_dp
->dpcd
)));
3868 /* Intermediate frequency support */
3869 if (is_edp(intel_dp
) &&
3870 (intel_dp
->dpcd
[DP_EDP_CONFIGURATION_CAP
] & DP_DPCD_DISPLAY_CONTROL_CAPABLE
) &&
3871 (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_EDP_DPCD_REV
, &rev
, 1) == 1) &&
3872 (rev
>= 0x03)) { /* eDp v1.4 or higher */
3873 __le16 sink_rates
[DP_MAX_SUPPORTED_RATES
];
3876 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3877 DP_SUPPORTED_LINK_RATES
,
3879 sizeof(sink_rates
));
3881 for (i
= 0; i
< ARRAY_SIZE(sink_rates
); i
++) {
3882 int val
= le16_to_cpu(sink_rates
[i
]);
3887 /* Value read is in kHz while drm clock is saved in deca-kHz */
3888 intel_dp
->sink_rates
[i
] = (val
* 200) / 10;
3890 intel_dp
->num_sink_rates
= i
;
3893 intel_dp_print_rates(intel_dp
);
3895 if (!(intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
3896 DP_DWN_STRM_PORT_PRESENT
))
3897 return true; /* native DP sink */
3899 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0x10)
3900 return true; /* no per-port downstream info */
3902 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_DOWNSTREAM_PORT_0
,
3903 intel_dp
->downstream_ports
,
3904 DP_MAX_DOWNSTREAM_PORTS
) < 0)
3905 return false; /* downstream port status fetch failed */
3911 intel_dp_probe_oui(struct intel_dp
*intel_dp
)
3915 if (!(intel_dp
->dpcd
[DP_DOWN_STREAM_PORT_COUNT
] & DP_OUI_SUPPORT
))
3918 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_OUI
, buf
, 3) == 3)
3919 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3920 buf
[0], buf
[1], buf
[2]);
3922 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_BRANCH_OUI
, buf
, 3) == 3)
3923 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3924 buf
[0], buf
[1], buf
[2]);
3928 intel_dp_probe_mst(struct intel_dp
*intel_dp
)
3932 if (!intel_dp
->can_mst
)
3935 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x12)
3938 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_MSTM_CAP
, buf
, 1)) {
3939 if (buf
[0] & DP_MST_CAP
) {
3940 DRM_DEBUG_KMS("Sink is MST capable\n");
3941 intel_dp
->is_mst
= true;
3943 DRM_DEBUG_KMS("Sink is not MST capable\n");
3944 intel_dp
->is_mst
= false;
3948 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
3949 return intel_dp
->is_mst
;
3952 static int intel_dp_sink_crc_stop(struct intel_dp
*intel_dp
)
3954 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3955 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
3959 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0) {
3960 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3965 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
3966 buf
& ~DP_TEST_SINK_START
) < 0) {
3967 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3972 intel_dp
->sink_crc
.started
= false;
3974 hsw_enable_ips(intel_crtc
);
3978 static int intel_dp_sink_crc_start(struct intel_dp
*intel_dp
)
3980 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3981 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
3985 if (intel_dp
->sink_crc
.started
) {
3986 ret
= intel_dp_sink_crc_stop(intel_dp
);
3991 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK_MISC
, &buf
) < 0)
3994 if (!(buf
& DP_TEST_CRC_SUPPORTED
))
3997 intel_dp
->sink_crc
.last_count
= buf
& DP_TEST_COUNT_MASK
;
3999 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0)
4002 hsw_disable_ips(intel_crtc
);
4004 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
4005 buf
| DP_TEST_SINK_START
) < 0) {
4006 hsw_enable_ips(intel_crtc
);
4010 intel_dp
->sink_crc
.started
= true;
4014 int intel_dp_sink_crc(struct intel_dp
*intel_dp
, u8
*crc
)
4016 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4017 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
4018 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4024 ret
= intel_dp_sink_crc_start(intel_dp
);
4029 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
4031 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
4032 DP_TEST_SINK_MISC
, &buf
) < 0) {
4036 count
= buf
& DP_TEST_COUNT_MASK
;
4039 * Count might be reset during the loop. In this case
4040 * last known count needs to be reset as well.
4043 intel_dp
->sink_crc
.last_count
= 0;
4045 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_CRC_R_CR
, crc
, 6) < 0) {
4050 old_equal_new
= (count
== intel_dp
->sink_crc
.last_count
&&
4051 !memcmp(intel_dp
->sink_crc
.last_crc
, crc
,
4054 } while (--attempts
&& (count
== 0 || old_equal_new
));
4056 intel_dp
->sink_crc
.last_count
= buf
& DP_TEST_COUNT_MASK
;
4057 memcpy(intel_dp
->sink_crc
.last_crc
, crc
, 6 * sizeof(u8
));
4059 if (attempts
== 0) {
4060 if (old_equal_new
) {
4061 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4063 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4070 intel_dp_sink_crc_stop(intel_dp
);
4075 intel_dp_get_sink_irq(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4077 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4078 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4079 sink_irq_vector
, 1) == 1;
4083 intel_dp_get_sink_irq_esi(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4087 ret
= intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4089 sink_irq_vector
, 14);
4096 static uint8_t intel_dp_autotest_link_training(struct intel_dp
*intel_dp
)
4098 uint8_t test_result
= DP_TEST_ACK
;
4102 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp
*intel_dp
)
4104 uint8_t test_result
= DP_TEST_NAK
;
4108 static uint8_t intel_dp_autotest_edid(struct intel_dp
*intel_dp
)
4110 uint8_t test_result
= DP_TEST_NAK
;
4111 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4112 struct drm_connector
*connector
= &intel_connector
->base
;
4114 if (intel_connector
->detect_edid
== NULL
||
4115 connector
->edid_corrupt
||
4116 intel_dp
->aux
.i2c_defer_count
> 6) {
4117 /* Check EDID read for NACKs, DEFERs and corruption
4118 * (DP CTS 1.2 Core r1.1)
4119 * 4.2.2.4 : Failed EDID read, I2C_NAK
4120 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4121 * 4.2.2.6 : EDID corruption detected
4122 * Use failsafe mode for all cases
4124 if (intel_dp
->aux
.i2c_nack_count
> 0 ||
4125 intel_dp
->aux
.i2c_defer_count
> 0)
4126 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4127 intel_dp
->aux
.i2c_nack_count
,
4128 intel_dp
->aux
.i2c_defer_count
);
4129 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_FAILSAFE
;
4131 struct edid
*block
= intel_connector
->detect_edid
;
4133 /* We have to write the checksum
4134 * of the last block read
4136 block
+= intel_connector
->detect_edid
->extensions
;
4138 if (!drm_dp_dpcd_write(&intel_dp
->aux
,
4139 DP_TEST_EDID_CHECKSUM
,
4142 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4144 test_result
= DP_TEST_ACK
| DP_TEST_EDID_CHECKSUM_WRITE
;
4145 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_STANDARD
;
4148 /* Set test active flag here so userspace doesn't interrupt things */
4149 intel_dp
->compliance_test_active
= 1;
4154 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp
*intel_dp
)
4156 uint8_t test_result
= DP_TEST_NAK
;
4160 static void intel_dp_handle_test_request(struct intel_dp
*intel_dp
)
4162 uint8_t response
= DP_TEST_NAK
;
4166 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_REQUEST
, &rxdata
, 1);
4168 DRM_DEBUG_KMS("Could not read test request from sink\n");
4173 case DP_TEST_LINK_TRAINING
:
4174 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4175 intel_dp
->compliance_test_type
= DP_TEST_LINK_TRAINING
;
4176 response
= intel_dp_autotest_link_training(intel_dp
);
4178 case DP_TEST_LINK_VIDEO_PATTERN
:
4179 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4180 intel_dp
->compliance_test_type
= DP_TEST_LINK_VIDEO_PATTERN
;
4181 response
= intel_dp_autotest_video_pattern(intel_dp
);
4183 case DP_TEST_LINK_EDID_READ
:
4184 DRM_DEBUG_KMS("EDID test requested\n");
4185 intel_dp
->compliance_test_type
= DP_TEST_LINK_EDID_READ
;
4186 response
= intel_dp_autotest_edid(intel_dp
);
4188 case DP_TEST_LINK_PHY_TEST_PATTERN
:
4189 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4190 intel_dp
->compliance_test_type
= DP_TEST_LINK_PHY_TEST_PATTERN
;
4191 response
= intel_dp_autotest_phy_pattern(intel_dp
);
4194 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata
);
4199 status
= drm_dp_dpcd_write(&intel_dp
->aux
,
4203 DRM_DEBUG_KMS("Could not write test response to sink\n");
4207 intel_dp_check_mst_status(struct intel_dp
*intel_dp
)
4211 if (intel_dp
->is_mst
) {
4216 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4220 /* check link status - esi[10] = 0x200c */
4221 if (intel_dp
->active_mst_links
&&
4222 !drm_dp_channel_eq_ok(&esi
[10], intel_dp
->lane_count
)) {
4223 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4224 intel_dp_start_link_train(intel_dp
);
4225 intel_dp_stop_link_train(intel_dp
);
4228 DRM_DEBUG_KMS("got esi %3ph\n", esi
);
4229 ret
= drm_dp_mst_hpd_irq(&intel_dp
->mst_mgr
, esi
, &handled
);
4232 for (retry
= 0; retry
< 3; retry
++) {
4234 wret
= drm_dp_dpcd_write(&intel_dp
->aux
,
4235 DP_SINK_COUNT_ESI
+1,
4242 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4244 DRM_DEBUG_KMS("got esi2 %3ph\n", esi
);
4252 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4253 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4254 intel_dp
->is_mst
= false;
4255 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4256 /* send a hotplug event */
4257 drm_kms_helper_hotplug_event(intel_dig_port
->base
.base
.dev
);
4264 * According to DP spec
4267 * 2. Configure link according to Receiver Capabilities
4268 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4269 * 4. Check link status on receipt of hot-plug interrupt
4272 intel_dp_check_link_status(struct intel_dp
*intel_dp
)
4274 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4275 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4277 u8 link_status
[DP_LINK_STATUS_SIZE
];
4279 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
4282 * Clearing compliance test variables to allow capturing
4283 * of values for next automated test request.
4285 intel_dp
->compliance_test_active
= 0;
4286 intel_dp
->compliance_test_type
= 0;
4287 intel_dp
->compliance_test_data
= 0;
4289 if (!intel_encoder
->base
.crtc
)
4292 if (!to_intel_crtc(intel_encoder
->base
.crtc
)->active
)
4295 /* Try to read receiver status if the link appears to be up */
4296 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
4300 /* Now read the DPCD to see if it's actually running */
4301 if (!intel_dp_get_dpcd(intel_dp
)) {
4305 /* Try to read the source of the interrupt */
4306 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4307 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4308 /* Clear interrupt source */
4309 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4310 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4313 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4314 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4315 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4316 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4319 /* if link training is requested we should perform it always */
4320 if ((intel_dp
->compliance_test_type
== DP_TEST_LINK_TRAINING
) ||
4321 (!drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
))) {
4322 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4323 intel_encoder
->base
.name
);
4324 intel_dp_start_link_train(intel_dp
);
4325 intel_dp_stop_link_train(intel_dp
);
4329 /* XXX this is probably wrong for multiple downstream ports */
4330 static enum drm_connector_status
4331 intel_dp_detect_dpcd(struct intel_dp
*intel_dp
)
4333 uint8_t *dpcd
= intel_dp
->dpcd
;
4336 if (!intel_dp_get_dpcd(intel_dp
))
4337 return connector_status_disconnected
;
4339 /* if there's no downstream port, we're done */
4340 if (!(dpcd
[DP_DOWNSTREAMPORT_PRESENT
] & DP_DWN_STRM_PORT_PRESENT
))
4341 return connector_status_connected
;
4343 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4344 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4345 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
) {
4348 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_COUNT
,
4350 return connector_status_unknown
;
4352 return DP_GET_SINK_COUNT(reg
) ? connector_status_connected
4353 : connector_status_disconnected
;
4356 /* If no HPD, poke DDC gently */
4357 if (drm_probe_ddc(&intel_dp
->aux
.ddc
))
4358 return connector_status_connected
;
4360 /* Well we tried, say unknown for unreliable port types */
4361 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11) {
4362 type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
4363 if (type
== DP_DS_PORT_TYPE_VGA
||
4364 type
== DP_DS_PORT_TYPE_NON_EDID
)
4365 return connector_status_unknown
;
4367 type
= intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
4368 DP_DWN_STRM_PORT_TYPE_MASK
;
4369 if (type
== DP_DWN_STRM_PORT_TYPE_ANALOG
||
4370 type
== DP_DWN_STRM_PORT_TYPE_OTHER
)
4371 return connector_status_unknown
;
4374 /* Anything else is out of spec, warn and ignore */
4375 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4376 return connector_status_disconnected
;
4379 static enum drm_connector_status
4380 edp_detect(struct intel_dp
*intel_dp
)
4382 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4383 enum drm_connector_status status
;
4385 status
= intel_panel_detect(dev
);
4386 if (status
== connector_status_unknown
)
4387 status
= connector_status_connected
;
4392 static bool ibx_digital_port_connected(struct drm_i915_private
*dev_priv
,
4393 struct intel_digital_port
*port
)
4397 switch (port
->port
) {
4401 bit
= SDE_PORTB_HOTPLUG
;
4404 bit
= SDE_PORTC_HOTPLUG
;
4407 bit
= SDE_PORTD_HOTPLUG
;
4410 MISSING_CASE(port
->port
);
4414 return I915_READ(SDEISR
) & bit
;
4417 static bool cpt_digital_port_connected(struct drm_i915_private
*dev_priv
,
4418 struct intel_digital_port
*port
)
4422 switch (port
->port
) {
4426 bit
= SDE_PORTB_HOTPLUG_CPT
;
4429 bit
= SDE_PORTC_HOTPLUG_CPT
;
4432 bit
= SDE_PORTD_HOTPLUG_CPT
;
4435 bit
= SDE_PORTE_HOTPLUG_SPT
;
4438 MISSING_CASE(port
->port
);
4442 return I915_READ(SDEISR
) & bit
;
4445 static bool g4x_digital_port_connected(struct drm_i915_private
*dev_priv
,
4446 struct intel_digital_port
*port
)
4450 switch (port
->port
) {
4452 bit
= PORTB_HOTPLUG_LIVE_STATUS_G4X
;
4455 bit
= PORTC_HOTPLUG_LIVE_STATUS_G4X
;
4458 bit
= PORTD_HOTPLUG_LIVE_STATUS_G4X
;
4461 MISSING_CASE(port
->port
);
4465 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
4468 static bool vlv_digital_port_connected(struct drm_i915_private
*dev_priv
,
4469 struct intel_digital_port
*port
)
4473 switch (port
->port
) {
4475 bit
= PORTB_HOTPLUG_LIVE_STATUS_VLV
;
4478 bit
= PORTC_HOTPLUG_LIVE_STATUS_VLV
;
4481 bit
= PORTD_HOTPLUG_LIVE_STATUS_VLV
;
4484 MISSING_CASE(port
->port
);
4488 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
4491 static bool bxt_digital_port_connected(struct drm_i915_private
*dev_priv
,
4492 struct intel_digital_port
*intel_dig_port
)
4494 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4498 intel_hpd_pin_to_port(intel_encoder
->hpd_pin
, &port
);
4501 bit
= BXT_DE_PORT_HP_DDIA
;
4504 bit
= BXT_DE_PORT_HP_DDIB
;
4507 bit
= BXT_DE_PORT_HP_DDIC
;
4514 return I915_READ(GEN8_DE_PORT_ISR
) & bit
;
4518 * intel_digital_port_connected - is the specified port connected?
4519 * @dev_priv: i915 private structure
4520 * @port: the port to test
4522 * Return %true if @port is connected, %false otherwise.
4524 bool intel_digital_port_connected(struct drm_i915_private
*dev_priv
,
4525 struct intel_digital_port
*port
)
4527 if (HAS_PCH_IBX(dev_priv
))
4528 return ibx_digital_port_connected(dev_priv
, port
);
4529 if (HAS_PCH_SPLIT(dev_priv
))
4530 return cpt_digital_port_connected(dev_priv
, port
);
4531 else if (IS_BROXTON(dev_priv
))
4532 return bxt_digital_port_connected(dev_priv
, port
);
4533 else if (IS_VALLEYVIEW(dev_priv
))
4534 return vlv_digital_port_connected(dev_priv
, port
);
4536 return g4x_digital_port_connected(dev_priv
, port
);
4539 static enum drm_connector_status
4540 ironlake_dp_detect(struct intel_dp
*intel_dp
)
4542 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4543 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4544 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4546 if (!intel_digital_port_connected(dev_priv
, intel_dig_port
))
4547 return connector_status_disconnected
;
4549 return intel_dp_detect_dpcd(intel_dp
);
4552 static enum drm_connector_status
4553 g4x_dp_detect(struct intel_dp
*intel_dp
)
4555 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4556 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4558 /* Can't disconnect eDP, but you can close the lid... */
4559 if (is_edp(intel_dp
)) {
4560 enum drm_connector_status status
;
4562 status
= intel_panel_detect(dev
);
4563 if (status
== connector_status_unknown
)
4564 status
= connector_status_connected
;
4568 if (!intel_digital_port_connected(dev
->dev_private
, intel_dig_port
))
4569 return connector_status_disconnected
;
4571 return intel_dp_detect_dpcd(intel_dp
);
4574 static struct edid
*
4575 intel_dp_get_edid(struct intel_dp
*intel_dp
)
4577 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4579 /* use cached edid if we have one */
4580 if (intel_connector
->edid
) {
4582 if (IS_ERR(intel_connector
->edid
))
4585 return drm_edid_duplicate(intel_connector
->edid
);
4587 return drm_get_edid(&intel_connector
->base
,
4588 &intel_dp
->aux
.ddc
);
4592 intel_dp_set_edid(struct intel_dp
*intel_dp
)
4594 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4597 edid
= intel_dp_get_edid(intel_dp
);
4598 intel_connector
->detect_edid
= edid
;
4600 if (intel_dp
->force_audio
!= HDMI_AUDIO_AUTO
)
4601 intel_dp
->has_audio
= intel_dp
->force_audio
== HDMI_AUDIO_ON
;
4603 intel_dp
->has_audio
= drm_detect_monitor_audio(edid
);
4607 intel_dp_unset_edid(struct intel_dp
*intel_dp
)
4609 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4611 kfree(intel_connector
->detect_edid
);
4612 intel_connector
->detect_edid
= NULL
;
4614 intel_dp
->has_audio
= false;
4617 static enum intel_display_power_domain
4618 intel_dp_power_get(struct intel_dp
*dp
)
4620 struct intel_encoder
*encoder
= &dp_to_dig_port(dp
)->base
;
4621 enum intel_display_power_domain power_domain
;
4623 power_domain
= intel_display_port_power_domain(encoder
);
4624 intel_display_power_get(to_i915(encoder
->base
.dev
), power_domain
);
4626 return power_domain
;
4630 intel_dp_power_put(struct intel_dp
*dp
,
4631 enum intel_display_power_domain power_domain
)
4633 struct intel_encoder
*encoder
= &dp_to_dig_port(dp
)->base
;
4634 intel_display_power_put(to_i915(encoder
->base
.dev
), power_domain
);
4637 static enum drm_connector_status
4638 intel_dp_detect(struct drm_connector
*connector
, bool force
)
4640 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4641 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4642 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4643 struct drm_device
*dev
= connector
->dev
;
4644 enum drm_connector_status status
;
4645 enum intel_display_power_domain power_domain
;
4649 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4650 connector
->base
.id
, connector
->name
);
4651 intel_dp_unset_edid(intel_dp
);
4653 if (intel_dp
->is_mst
) {
4654 /* MST devices are disconnected from a monitor POV */
4655 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4656 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4657 return connector_status_disconnected
;
4660 power_domain
= intel_dp_power_get(intel_dp
);
4662 /* Can't disconnect eDP, but you can close the lid... */
4663 if (is_edp(intel_dp
))
4664 status
= edp_detect(intel_dp
);
4665 else if (HAS_PCH_SPLIT(dev
))
4666 status
= ironlake_dp_detect(intel_dp
);
4668 status
= g4x_dp_detect(intel_dp
);
4669 if (status
!= connector_status_connected
) {
4670 intel_dp
->compliance_test_active
= 0;
4671 intel_dp
->compliance_test_type
= 0;
4672 intel_dp
->compliance_test_data
= 0;
4677 intel_dp_probe_oui(intel_dp
);
4679 ret
= intel_dp_probe_mst(intel_dp
);
4681 /* if we are in MST mode then this connector
4682 won't appear connected or have anything with EDID on it */
4683 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4684 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4685 status
= connector_status_disconnected
;
4690 * Clearing NACK and defer counts to get their exact values
4691 * while reading EDID which are required by Compliance tests
4692 * 4.2.2.4 and 4.2.2.5
4694 intel_dp
->aux
.i2c_nack_count
= 0;
4695 intel_dp
->aux
.i2c_defer_count
= 0;
4697 intel_dp_set_edid(intel_dp
);
4699 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4700 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4701 status
= connector_status_connected
;
4703 /* Try to read the source of the interrupt */
4704 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4705 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4706 /* Clear interrupt source */
4707 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4708 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4711 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4712 intel_dp_handle_test_request(intel_dp
);
4713 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4714 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4718 intel_dp_power_put(intel_dp
, power_domain
);
4723 intel_dp_force(struct drm_connector
*connector
)
4725 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4726 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4727 enum intel_display_power_domain power_domain
;
4729 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4730 connector
->base
.id
, connector
->name
);
4731 intel_dp_unset_edid(intel_dp
);
4733 if (connector
->status
!= connector_status_connected
)
4736 power_domain
= intel_dp_power_get(intel_dp
);
4738 intel_dp_set_edid(intel_dp
);
4740 intel_dp_power_put(intel_dp
, power_domain
);
4742 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4743 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4746 static int intel_dp_get_modes(struct drm_connector
*connector
)
4748 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4751 edid
= intel_connector
->detect_edid
;
4753 int ret
= intel_connector_update_modes(connector
, edid
);
4758 /* if eDP has no EDID, fall back to fixed mode */
4759 if (is_edp(intel_attached_dp(connector
)) &&
4760 intel_connector
->panel
.fixed_mode
) {
4761 struct drm_display_mode
*mode
;
4763 mode
= drm_mode_duplicate(connector
->dev
,
4764 intel_connector
->panel
.fixed_mode
);
4766 drm_mode_probed_add(connector
, mode
);
4775 intel_dp_detect_audio(struct drm_connector
*connector
)
4777 bool has_audio
= false;
4780 edid
= to_intel_connector(connector
)->detect_edid
;
4782 has_audio
= drm_detect_monitor_audio(edid
);
4788 intel_dp_set_property(struct drm_connector
*connector
,
4789 struct drm_property
*property
,
4792 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
4793 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4794 struct intel_encoder
*intel_encoder
= intel_attached_encoder(connector
);
4795 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4798 ret
= drm_object_property_set_value(&connector
->base
, property
, val
);
4802 if (property
== dev_priv
->force_audio_property
) {
4806 if (i
== intel_dp
->force_audio
)
4809 intel_dp
->force_audio
= i
;
4811 if (i
== HDMI_AUDIO_AUTO
)
4812 has_audio
= intel_dp_detect_audio(connector
);
4814 has_audio
= (i
== HDMI_AUDIO_ON
);
4816 if (has_audio
== intel_dp
->has_audio
)
4819 intel_dp
->has_audio
= has_audio
;
4823 if (property
== dev_priv
->broadcast_rgb_property
) {
4824 bool old_auto
= intel_dp
->color_range_auto
;
4825 bool old_range
= intel_dp
->limited_color_range
;
4828 case INTEL_BROADCAST_RGB_AUTO
:
4829 intel_dp
->color_range_auto
= true;
4831 case INTEL_BROADCAST_RGB_FULL
:
4832 intel_dp
->color_range_auto
= false;
4833 intel_dp
->limited_color_range
= false;
4835 case INTEL_BROADCAST_RGB_LIMITED
:
4836 intel_dp
->color_range_auto
= false;
4837 intel_dp
->limited_color_range
= true;
4843 if (old_auto
== intel_dp
->color_range_auto
&&
4844 old_range
== intel_dp
->limited_color_range
)
4850 if (is_edp(intel_dp
) &&
4851 property
== connector
->dev
->mode_config
.scaling_mode_property
) {
4852 if (val
== DRM_MODE_SCALE_NONE
) {
4853 DRM_DEBUG_KMS("no scaling not supported\n");
4857 if (intel_connector
->panel
.fitting_mode
== val
) {
4858 /* the eDP scaling property is not changed */
4861 intel_connector
->panel
.fitting_mode
= val
;
4869 if (intel_encoder
->base
.crtc
)
4870 intel_crtc_restore_mode(intel_encoder
->base
.crtc
);
4876 intel_dp_connector_destroy(struct drm_connector
*connector
)
4878 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4880 kfree(intel_connector
->detect_edid
);
4882 if (!IS_ERR_OR_NULL(intel_connector
->edid
))
4883 kfree(intel_connector
->edid
);
4885 /* Can't call is_edp() since the encoder may have been destroyed
4887 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4888 intel_panel_fini(&intel_connector
->panel
);
4890 drm_connector_cleanup(connector
);
4894 void intel_dp_encoder_destroy(struct drm_encoder
*encoder
)
4896 struct intel_digital_port
*intel_dig_port
= enc_to_dig_port(encoder
);
4897 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4899 intel_dp_aux_fini(intel_dp
);
4900 intel_dp_mst_encoder_cleanup(intel_dig_port
);
4901 if (is_edp(intel_dp
)) {
4902 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4904 * vdd might still be enabled do to the delayed vdd off.
4905 * Make sure vdd is actually turned off here.
4908 edp_panel_vdd_off_sync(intel_dp
);
4909 pps_unlock(intel_dp
);
4911 if (intel_dp
->edp_notifier
.notifier_call
) {
4912 unregister_reboot_notifier(&intel_dp
->edp_notifier
);
4913 intel_dp
->edp_notifier
.notifier_call
= NULL
;
4916 drm_encoder_cleanup(encoder
);
4917 kfree(intel_dig_port
);
4920 static void intel_dp_encoder_suspend(struct intel_encoder
*intel_encoder
)
4922 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4924 if (!is_edp(intel_dp
))
4928 * vdd might still be enabled do to the delayed vdd off.
4929 * Make sure vdd is actually turned off here.
4931 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4933 edp_panel_vdd_off_sync(intel_dp
);
4934 pps_unlock(intel_dp
);
4937 static void intel_edp_panel_vdd_sanitize(struct intel_dp
*intel_dp
)
4939 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4940 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4941 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4942 enum intel_display_power_domain power_domain
;
4944 lockdep_assert_held(&dev_priv
->pps_mutex
);
4946 if (!edp_have_panel_vdd(intel_dp
))
4950 * The VDD bit needs a power domain reference, so if the bit is
4951 * already enabled when we boot or resume, grab this reference and
4952 * schedule a vdd off, so we don't hold on to the reference
4955 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4956 power_domain
= intel_display_port_power_domain(&intel_dig_port
->base
);
4957 intel_display_power_get(dev_priv
, power_domain
);
4959 edp_panel_vdd_schedule_off(intel_dp
);
4962 static void intel_dp_encoder_reset(struct drm_encoder
*encoder
)
4964 struct intel_dp
*intel_dp
;
4966 if (to_intel_encoder(encoder
)->type
!= INTEL_OUTPUT_EDP
)
4969 intel_dp
= enc_to_intel_dp(encoder
);
4974 * Read out the current power sequencer assignment,
4975 * in case the BIOS did something with it.
4977 if (IS_VALLEYVIEW(encoder
->dev
))
4978 vlv_initial_power_sequencer_setup(intel_dp
);
4980 intel_edp_panel_vdd_sanitize(intel_dp
);
4982 pps_unlock(intel_dp
);
4985 static const struct drm_connector_funcs intel_dp_connector_funcs
= {
4986 .dpms
= drm_atomic_helper_connector_dpms
,
4987 .detect
= intel_dp_detect
,
4988 .force
= intel_dp_force
,
4989 .fill_modes
= drm_helper_probe_single_connector_modes
,
4990 .set_property
= intel_dp_set_property
,
4991 .atomic_get_property
= intel_connector_atomic_get_property
,
4992 .destroy
= intel_dp_connector_destroy
,
4993 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4994 .atomic_duplicate_state
= drm_atomic_helper_connector_duplicate_state
,
4997 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs
= {
4998 .get_modes
= intel_dp_get_modes
,
4999 .mode_valid
= intel_dp_mode_valid
,
5000 .best_encoder
= intel_best_encoder
,
5003 static const struct drm_encoder_funcs intel_dp_enc_funcs
= {
5004 .reset
= intel_dp_encoder_reset
,
5005 .destroy
= intel_dp_encoder_destroy
,
5009 intel_dp_hpd_pulse(struct intel_digital_port
*intel_dig_port
, bool long_hpd
)
5011 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5012 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5013 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
5014 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5015 enum intel_display_power_domain power_domain
;
5016 enum irqreturn ret
= IRQ_NONE
;
5018 if (intel_dig_port
->base
.type
!= INTEL_OUTPUT_EDP
)
5019 intel_dig_port
->base
.type
= INTEL_OUTPUT_DISPLAYPORT
;
5021 if (long_hpd
&& intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
) {
5023 * vdd off can generate a long pulse on eDP which
5024 * would require vdd on to handle it, and thus we
5025 * would end up in an endless cycle of
5026 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5028 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5029 port_name(intel_dig_port
->port
));
5033 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5034 port_name(intel_dig_port
->port
),
5035 long_hpd
? "long" : "short");
5037 power_domain
= intel_display_port_power_domain(intel_encoder
);
5038 intel_display_power_get(dev_priv
, power_domain
);
5041 /* indicate that we need to restart link training */
5042 intel_dp
->train_set_valid
= false;
5044 if (!intel_digital_port_connected(dev_priv
, intel_dig_port
))
5047 if (!intel_dp_get_dpcd(intel_dp
)) {
5051 intel_dp_probe_oui(intel_dp
);
5053 if (!intel_dp_probe_mst(intel_dp
)) {
5054 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
5055 intel_dp_check_link_status(intel_dp
);
5056 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
5060 if (intel_dp
->is_mst
) {
5061 if (intel_dp_check_mst_status(intel_dp
) == -EINVAL
)
5065 if (!intel_dp
->is_mst
) {
5066 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
5067 intel_dp_check_link_status(intel_dp
);
5068 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
5076 /* if we were in MST mode, and device is not there get out of MST mode */
5077 if (intel_dp
->is_mst
) {
5078 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp
->is_mst
, intel_dp
->mst_mgr
.mst_state
);
5079 intel_dp
->is_mst
= false;
5080 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
5083 intel_display_power_put(dev_priv
, power_domain
);
5088 /* Return which DP Port should be selected for Transcoder DP control */
5090 intel_trans_dp_port_sel(struct drm_crtc
*crtc
)
5092 struct drm_device
*dev
= crtc
->dev
;
5093 struct intel_encoder
*intel_encoder
;
5094 struct intel_dp
*intel_dp
;
5096 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
) {
5097 intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
5099 if (intel_encoder
->type
== INTEL_OUTPUT_DISPLAYPORT
||
5100 intel_encoder
->type
== INTEL_OUTPUT_EDP
)
5101 return intel_dp
->output_reg
;
5107 /* check the VBT to see whether the eDP is on another port */
5108 bool intel_dp_is_edp(struct drm_device
*dev
, enum port port
)
5110 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5111 union child_device_config
*p_child
;
5113 static const short port_mapping
[] = {
5114 [PORT_B
] = DVO_PORT_DPB
,
5115 [PORT_C
] = DVO_PORT_DPC
,
5116 [PORT_D
] = DVO_PORT_DPD
,
5117 [PORT_E
] = DVO_PORT_DPE
,
5121 * eDP not supported on g4x. so bail out early just
5122 * for a bit extra safety in case the VBT is bonkers.
5124 if (INTEL_INFO(dev
)->gen
< 5)
5130 if (!dev_priv
->vbt
.child_dev_num
)
5133 for (i
= 0; i
< dev_priv
->vbt
.child_dev_num
; i
++) {
5134 p_child
= dev_priv
->vbt
.child_dev
+ i
;
5136 if (p_child
->common
.dvo_port
== port_mapping
[port
] &&
5137 (p_child
->common
.device_type
& DEVICE_TYPE_eDP_BITS
) ==
5138 (DEVICE_TYPE_eDP
& DEVICE_TYPE_eDP_BITS
))
5145 intel_dp_add_properties(struct intel_dp
*intel_dp
, struct drm_connector
*connector
)
5147 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
5149 intel_attach_force_audio_property(connector
);
5150 intel_attach_broadcast_rgb_property(connector
);
5151 intel_dp
->color_range_auto
= true;
5153 if (is_edp(intel_dp
)) {
5154 drm_mode_create_scaling_mode_property(connector
->dev
);
5155 drm_object_attach_property(
5157 connector
->dev
->mode_config
.scaling_mode_property
,
5158 DRM_MODE_SCALE_ASPECT
);
5159 intel_connector
->panel
.fitting_mode
= DRM_MODE_SCALE_ASPECT
;
5163 static void intel_dp_init_panel_power_timestamps(struct intel_dp
*intel_dp
)
5165 intel_dp
->last_power_cycle
= jiffies
;
5166 intel_dp
->last_power_on
= jiffies
;
5167 intel_dp
->last_backlight_off
= jiffies
;
5171 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
5172 struct intel_dp
*intel_dp
)
5174 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5175 struct edp_power_seq cur
, vbt
, spec
,
5176 *final
= &intel_dp
->pps_delays
;
5177 u32 pp_on
, pp_off
, pp_div
= 0, pp_ctl
= 0;
5178 int pp_ctrl_reg
, pp_on_reg
, pp_off_reg
, pp_div_reg
= 0;
5180 lockdep_assert_held(&dev_priv
->pps_mutex
);
5182 /* already initialized? */
5183 if (final
->t11_t12
!= 0)
5186 if (IS_BROXTON(dev
)) {
5188 * TODO: BXT has 2 sets of PPS registers.
5189 * Correct Register for Broxton need to be identified
5190 * using VBT. hardcoding for now
5192 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5193 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5194 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5195 } else if (HAS_PCH_SPLIT(dev
)) {
5196 pp_ctrl_reg
= PCH_PP_CONTROL
;
5197 pp_on_reg
= PCH_PP_ON_DELAYS
;
5198 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5199 pp_div_reg
= PCH_PP_DIVISOR
;
5201 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5203 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
5204 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5205 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5206 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5209 /* Workaround: Need to write PP_CONTROL with the unlock key as
5210 * the very first thing. */
5211 pp_ctl
= ironlake_get_pp_control(intel_dp
);
5213 pp_on
= I915_READ(pp_on_reg
);
5214 pp_off
= I915_READ(pp_off_reg
);
5215 if (!IS_BROXTON(dev
)) {
5216 I915_WRITE(pp_ctrl_reg
, pp_ctl
);
5217 pp_div
= I915_READ(pp_div_reg
);
5220 /* Pull timing values out of registers */
5221 cur
.t1_t3
= (pp_on
& PANEL_POWER_UP_DELAY_MASK
) >>
5222 PANEL_POWER_UP_DELAY_SHIFT
;
5224 cur
.t8
= (pp_on
& PANEL_LIGHT_ON_DELAY_MASK
) >>
5225 PANEL_LIGHT_ON_DELAY_SHIFT
;
5227 cur
.t9
= (pp_off
& PANEL_LIGHT_OFF_DELAY_MASK
) >>
5228 PANEL_LIGHT_OFF_DELAY_SHIFT
;
5230 cur
.t10
= (pp_off
& PANEL_POWER_DOWN_DELAY_MASK
) >>
5231 PANEL_POWER_DOWN_DELAY_SHIFT
;
5233 if (IS_BROXTON(dev
)) {
5234 u16 tmp
= (pp_ctl
& BXT_POWER_CYCLE_DELAY_MASK
) >>
5235 BXT_POWER_CYCLE_DELAY_SHIFT
;
5237 cur
.t11_t12
= (tmp
- 1) * 1000;
5241 cur
.t11_t12
= ((pp_div
& PANEL_POWER_CYCLE_DELAY_MASK
) >>
5242 PANEL_POWER_CYCLE_DELAY_SHIFT
) * 1000;
5245 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5246 cur
.t1_t3
, cur
.t8
, cur
.t9
, cur
.t10
, cur
.t11_t12
);
5248 vbt
= dev_priv
->vbt
.edp_pps
;
5250 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5251 * our hw here, which are all in 100usec. */
5252 spec
.t1_t3
= 210 * 10;
5253 spec
.t8
= 50 * 10; /* no limit for t8, use t7 instead */
5254 spec
.t9
= 50 * 10; /* no limit for t9, make it symmetric with t8 */
5255 spec
.t10
= 500 * 10;
5256 /* This one is special and actually in units of 100ms, but zero
5257 * based in the hw (so we need to add 100 ms). But the sw vbt
5258 * table multiplies it with 1000 to make it in units of 100usec,
5260 spec
.t11_t12
= (510 + 100) * 10;
5262 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5263 vbt
.t1_t3
, vbt
.t8
, vbt
.t9
, vbt
.t10
, vbt
.t11_t12
);
5265 /* Use the max of the register settings and vbt. If both are
5266 * unset, fall back to the spec limits. */
5267 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5269 max(cur.field, vbt.field))
5270 assign_final(t1_t3
);
5274 assign_final(t11_t12
);
5277 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5278 intel_dp
->panel_power_up_delay
= get_delay(t1_t3
);
5279 intel_dp
->backlight_on_delay
= get_delay(t8
);
5280 intel_dp
->backlight_off_delay
= get_delay(t9
);
5281 intel_dp
->panel_power_down_delay
= get_delay(t10
);
5282 intel_dp
->panel_power_cycle_delay
= get_delay(t11_t12
);
5285 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5286 intel_dp
->panel_power_up_delay
, intel_dp
->panel_power_down_delay
,
5287 intel_dp
->panel_power_cycle_delay
);
5289 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5290 intel_dp
->backlight_on_delay
, intel_dp
->backlight_off_delay
);
5294 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
5295 struct intel_dp
*intel_dp
)
5297 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5298 u32 pp_on
, pp_off
, pp_div
, port_sel
= 0;
5299 int div
= HAS_PCH_SPLIT(dev
) ? intel_pch_rawclk(dev
) : intel_hrawclk(dev
);
5300 int pp_on_reg
, pp_off_reg
, pp_div_reg
= 0, pp_ctrl_reg
;
5301 enum port port
= dp_to_dig_port(intel_dp
)->port
;
5302 const struct edp_power_seq
*seq
= &intel_dp
->pps_delays
;
5304 lockdep_assert_held(&dev_priv
->pps_mutex
);
5306 if (IS_BROXTON(dev
)) {
5308 * TODO: BXT has 2 sets of PPS registers.
5309 * Correct Register for Broxton need to be identified
5310 * using VBT. hardcoding for now
5312 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5313 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5314 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5316 } else if (HAS_PCH_SPLIT(dev
)) {
5317 pp_on_reg
= PCH_PP_ON_DELAYS
;
5318 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5319 pp_div_reg
= PCH_PP_DIVISOR
;
5321 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5323 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5324 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5325 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5329 * And finally store the new values in the power sequencer. The
5330 * backlight delays are set to 1 because we do manual waits on them. For
5331 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5332 * we'll end up waiting for the backlight off delay twice: once when we
5333 * do the manual sleep, and once when we disable the panel and wait for
5334 * the PP_STATUS bit to become zero.
5336 pp_on
= (seq
->t1_t3
<< PANEL_POWER_UP_DELAY_SHIFT
) |
5337 (1 << PANEL_LIGHT_ON_DELAY_SHIFT
);
5338 pp_off
= (1 << PANEL_LIGHT_OFF_DELAY_SHIFT
) |
5339 (seq
->t10
<< PANEL_POWER_DOWN_DELAY_SHIFT
);
5340 /* Compute the divisor for the pp clock, simply match the Bspec
5342 if (IS_BROXTON(dev
)) {
5343 pp_div
= I915_READ(pp_ctrl_reg
);
5344 pp_div
&= ~BXT_POWER_CYCLE_DELAY_MASK
;
5345 pp_div
|= (DIV_ROUND_UP((seq
->t11_t12
+ 1), 1000)
5346 << BXT_POWER_CYCLE_DELAY_SHIFT
);
5348 pp_div
= ((100 * div
)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT
;
5349 pp_div
|= (DIV_ROUND_UP(seq
->t11_t12
, 1000)
5350 << PANEL_POWER_CYCLE_DELAY_SHIFT
);
5353 /* Haswell doesn't have any port selection bits for the panel
5354 * power sequencer any more. */
5355 if (IS_VALLEYVIEW(dev
)) {
5356 port_sel
= PANEL_PORT_SELECT_VLV(port
);
5357 } else if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
)) {
5359 port_sel
= PANEL_PORT_SELECT_DPA
;
5361 port_sel
= PANEL_PORT_SELECT_DPD
;
5366 I915_WRITE(pp_on_reg
, pp_on
);
5367 I915_WRITE(pp_off_reg
, pp_off
);
5368 if (IS_BROXTON(dev
))
5369 I915_WRITE(pp_ctrl_reg
, pp_div
);
5371 I915_WRITE(pp_div_reg
, pp_div
);
5373 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5374 I915_READ(pp_on_reg
),
5375 I915_READ(pp_off_reg
),
5377 (I915_READ(pp_ctrl_reg
) & BXT_POWER_CYCLE_DELAY_MASK
) :
5378 I915_READ(pp_div_reg
));
5382 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5384 * @refresh_rate: RR to be programmed
5386 * This function gets called when refresh rate (RR) has to be changed from
5387 * one frequency to another. Switches can be between high and low RR
5388 * supported by the panel or to any other RR based on media playback (in
5389 * this case, RR value needs to be passed from user space).
5391 * The caller of this function needs to take a lock on dev_priv->drrs.
5393 static void intel_dp_set_drrs_state(struct drm_device
*dev
, int refresh_rate
)
5395 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5396 struct intel_encoder
*encoder
;
5397 struct intel_digital_port
*dig_port
= NULL
;
5398 struct intel_dp
*intel_dp
= dev_priv
->drrs
.dp
;
5399 struct intel_crtc_state
*config
= NULL
;
5400 struct intel_crtc
*intel_crtc
= NULL
;
5401 enum drrs_refresh_rate_type index
= DRRS_HIGH_RR
;
5403 if (refresh_rate
<= 0) {
5404 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5408 if (intel_dp
== NULL
) {
5409 DRM_DEBUG_KMS("DRRS not supported.\n");
5414 * FIXME: This needs proper synchronization with psr state for some
5415 * platforms that cannot have PSR and DRRS enabled at the same time.
5418 dig_port
= dp_to_dig_port(intel_dp
);
5419 encoder
= &dig_port
->base
;
5420 intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
5423 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5427 config
= intel_crtc
->config
;
5429 if (dev_priv
->drrs
.type
< SEAMLESS_DRRS_SUPPORT
) {
5430 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5434 if (intel_dp
->attached_connector
->panel
.downclock_mode
->vrefresh
==
5436 index
= DRRS_LOW_RR
;
5438 if (index
== dev_priv
->drrs
.refresh_rate_type
) {
5440 "DRRS requested for previously set RR...ignoring\n");
5444 if (!intel_crtc
->active
) {
5445 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5449 if (INTEL_INFO(dev
)->gen
>= 8 && !IS_CHERRYVIEW(dev
)) {
5452 intel_dp_set_m_n(intel_crtc
, M1_N1
);
5455 intel_dp_set_m_n(intel_crtc
, M2_N2
);
5459 DRM_ERROR("Unsupported refreshrate type\n");
5461 } else if (INTEL_INFO(dev
)->gen
> 6) {
5462 u32 reg
= PIPECONF(intel_crtc
->config
->cpu_transcoder
);
5465 val
= I915_READ(reg
);
5466 if (index
> DRRS_HIGH_RR
) {
5467 if (IS_VALLEYVIEW(dev
))
5468 val
|= PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5470 val
|= PIPECONF_EDP_RR_MODE_SWITCH
;
5472 if (IS_VALLEYVIEW(dev
))
5473 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5475 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH
;
5477 I915_WRITE(reg
, val
);
5480 dev_priv
->drrs
.refresh_rate_type
= index
;
5482 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate
);
5486 * intel_edp_drrs_enable - init drrs struct if supported
5487 * @intel_dp: DP struct
5489 * Initializes frontbuffer_bits and drrs.dp
5491 void intel_edp_drrs_enable(struct intel_dp
*intel_dp
)
5493 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5494 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5495 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5496 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5497 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5499 if (!intel_crtc
->config
->has_drrs
) {
5500 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5504 mutex_lock(&dev_priv
->drrs
.mutex
);
5505 if (WARN_ON(dev_priv
->drrs
.dp
)) {
5506 DRM_ERROR("DRRS already enabled\n");
5510 dev_priv
->drrs
.busy_frontbuffer_bits
= 0;
5512 dev_priv
->drrs
.dp
= intel_dp
;
5515 mutex_unlock(&dev_priv
->drrs
.mutex
);
5519 * intel_edp_drrs_disable - Disable DRRS
5520 * @intel_dp: DP struct
5523 void intel_edp_drrs_disable(struct intel_dp
*intel_dp
)
5525 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5526 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5527 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5528 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5529 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5531 if (!intel_crtc
->config
->has_drrs
)
5534 mutex_lock(&dev_priv
->drrs
.mutex
);
5535 if (!dev_priv
->drrs
.dp
) {
5536 mutex_unlock(&dev_priv
->drrs
.mutex
);
5540 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5541 intel_dp_set_drrs_state(dev_priv
->dev
,
5542 intel_dp
->attached_connector
->panel
.
5543 fixed_mode
->vrefresh
);
5545 dev_priv
->drrs
.dp
= NULL
;
5546 mutex_unlock(&dev_priv
->drrs
.mutex
);
5548 cancel_delayed_work_sync(&dev_priv
->drrs
.work
);
5551 static void intel_edp_drrs_downclock_work(struct work_struct
*work
)
5553 struct drm_i915_private
*dev_priv
=
5554 container_of(work
, typeof(*dev_priv
), drrs
.work
.work
);
5555 struct intel_dp
*intel_dp
;
5557 mutex_lock(&dev_priv
->drrs
.mutex
);
5559 intel_dp
= dev_priv
->drrs
.dp
;
5565 * The delayed work can race with an invalidate hence we need to
5569 if (dev_priv
->drrs
.busy_frontbuffer_bits
)
5572 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
)
5573 intel_dp_set_drrs_state(dev_priv
->dev
,
5574 intel_dp
->attached_connector
->panel
.
5575 downclock_mode
->vrefresh
);
5578 mutex_unlock(&dev_priv
->drrs
.mutex
);
5582 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5584 * @frontbuffer_bits: frontbuffer plane tracking bits
5586 * This function gets called everytime rendering on the given planes start.
5587 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5589 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5591 void intel_edp_drrs_invalidate(struct drm_device
*dev
,
5592 unsigned frontbuffer_bits
)
5594 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5595 struct drm_crtc
*crtc
;
5598 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5601 cancel_delayed_work(&dev_priv
->drrs
.work
);
5603 mutex_lock(&dev_priv
->drrs
.mutex
);
5604 if (!dev_priv
->drrs
.dp
) {
5605 mutex_unlock(&dev_priv
->drrs
.mutex
);
5609 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5610 pipe
= to_intel_crtc(crtc
)->pipe
;
5612 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5613 dev_priv
->drrs
.busy_frontbuffer_bits
|= frontbuffer_bits
;
5615 /* invalidate means busy screen hence upclock */
5616 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5617 intel_dp_set_drrs_state(dev_priv
->dev
,
5618 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5619 fixed_mode
->vrefresh
);
5621 mutex_unlock(&dev_priv
->drrs
.mutex
);
5625 * intel_edp_drrs_flush - Restart Idleness DRRS
5627 * @frontbuffer_bits: frontbuffer plane tracking bits
5629 * This function gets called every time rendering on the given planes has
5630 * completed or flip on a crtc is completed. So DRRS should be upclocked
5631 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5632 * if no other planes are dirty.
5634 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5636 void intel_edp_drrs_flush(struct drm_device
*dev
,
5637 unsigned frontbuffer_bits
)
5639 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5640 struct drm_crtc
*crtc
;
5643 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5646 cancel_delayed_work(&dev_priv
->drrs
.work
);
5648 mutex_lock(&dev_priv
->drrs
.mutex
);
5649 if (!dev_priv
->drrs
.dp
) {
5650 mutex_unlock(&dev_priv
->drrs
.mutex
);
5654 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5655 pipe
= to_intel_crtc(crtc
)->pipe
;
5657 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5658 dev_priv
->drrs
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
5660 /* flush means busy screen hence upclock */
5661 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5662 intel_dp_set_drrs_state(dev_priv
->dev
,
5663 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5664 fixed_mode
->vrefresh
);
5667 * flush also means no more activity hence schedule downclock, if all
5668 * other fbs are quiescent too
5670 if (!dev_priv
->drrs
.busy_frontbuffer_bits
)
5671 schedule_delayed_work(&dev_priv
->drrs
.work
,
5672 msecs_to_jiffies(1000));
5673 mutex_unlock(&dev_priv
->drrs
.mutex
);
5677 * DOC: Display Refresh Rate Switching (DRRS)
5679 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5680 * which enables swtching between low and high refresh rates,
5681 * dynamically, based on the usage scenario. This feature is applicable
5682 * for internal panels.
5684 * Indication that the panel supports DRRS is given by the panel EDID, which
5685 * would list multiple refresh rates for one resolution.
5687 * DRRS is of 2 types - static and seamless.
5688 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5689 * (may appear as a blink on screen) and is used in dock-undock scenario.
5690 * Seamless DRRS involves changing RR without any visual effect to the user
5691 * and can be used during normal system usage. This is done by programming
5692 * certain registers.
5694 * Support for static/seamless DRRS may be indicated in the VBT based on
5695 * inputs from the panel spec.
5697 * DRRS saves power by switching to low RR based on usage scenarios.
5700 * The implementation is based on frontbuffer tracking implementation.
5701 * When there is a disturbance on the screen triggered by user activity or a
5702 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5703 * When there is no movement on screen, after a timeout of 1 second, a switch
5704 * to low RR is made.
5705 * For integration with frontbuffer tracking code,
5706 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5708 * DRRS can be further extended to support other internal panels and also
5709 * the scenario of video playback wherein RR is set based on the rate
5710 * requested by userspace.
5714 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5715 * @intel_connector: eDP connector
5716 * @fixed_mode: preferred mode of panel
5718 * This function is called only once at driver load to initialize basic
5722 * Downclock mode if panel supports it, else return NULL.
5723 * DRRS support is determined by the presence of downclock mode (apart
5724 * from VBT setting).
5726 static struct drm_display_mode
*
5727 intel_dp_drrs_init(struct intel_connector
*intel_connector
,
5728 struct drm_display_mode
*fixed_mode
)
5730 struct drm_connector
*connector
= &intel_connector
->base
;
5731 struct drm_device
*dev
= connector
->dev
;
5732 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5733 struct drm_display_mode
*downclock_mode
= NULL
;
5735 INIT_DELAYED_WORK(&dev_priv
->drrs
.work
, intel_edp_drrs_downclock_work
);
5736 mutex_init(&dev_priv
->drrs
.mutex
);
5738 if (INTEL_INFO(dev
)->gen
<= 6) {
5739 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5743 if (dev_priv
->vbt
.drrs_type
!= SEAMLESS_DRRS_SUPPORT
) {
5744 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5748 downclock_mode
= intel_find_panel_downclock
5749 (dev
, fixed_mode
, connector
);
5751 if (!downclock_mode
) {
5752 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5756 dev_priv
->drrs
.type
= dev_priv
->vbt
.drrs_type
;
5758 dev_priv
->drrs
.refresh_rate_type
= DRRS_HIGH_RR
;
5759 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5760 return downclock_mode
;
5763 static bool intel_edp_init_connector(struct intel_dp
*intel_dp
,
5764 struct intel_connector
*intel_connector
)
5766 struct drm_connector
*connector
= &intel_connector
->base
;
5767 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
5768 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5769 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5770 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5771 struct drm_display_mode
*fixed_mode
= NULL
;
5772 struct drm_display_mode
*downclock_mode
= NULL
;
5774 struct drm_display_mode
*scan
;
5776 enum pipe pipe
= INVALID_PIPE
;
5778 if (!is_edp(intel_dp
))
5782 intel_edp_panel_vdd_sanitize(intel_dp
);
5783 pps_unlock(intel_dp
);
5785 /* Cache DPCD and EDID for edp. */
5786 has_dpcd
= intel_dp_get_dpcd(intel_dp
);
5789 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11)
5790 dev_priv
->no_aux_handshake
=
5791 intel_dp
->dpcd
[DP_MAX_DOWNSPREAD
] &
5792 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
;
5794 /* if this fails, presume the device is a ghost */
5795 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5799 /* We now know it's not a ghost, init power sequence regs. */
5801 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
5802 pps_unlock(intel_dp
);
5804 mutex_lock(&dev
->mode_config
.mutex
);
5805 edid
= drm_get_edid(connector
, &intel_dp
->aux
.ddc
);
5807 if (drm_add_edid_modes(connector
, edid
)) {
5808 drm_mode_connector_update_edid_property(connector
,
5810 drm_edid_to_eld(connector
, edid
);
5813 edid
= ERR_PTR(-EINVAL
);
5816 edid
= ERR_PTR(-ENOENT
);
5818 intel_connector
->edid
= edid
;
5820 /* prefer fixed mode from EDID if available */
5821 list_for_each_entry(scan
, &connector
->probed_modes
, head
) {
5822 if ((scan
->type
& DRM_MODE_TYPE_PREFERRED
)) {
5823 fixed_mode
= drm_mode_duplicate(dev
, scan
);
5824 downclock_mode
= intel_dp_drrs_init(
5825 intel_connector
, fixed_mode
);
5830 /* fallback to VBT if available for eDP */
5831 if (!fixed_mode
&& dev_priv
->vbt
.lfp_lvds_vbt_mode
) {
5832 fixed_mode
= drm_mode_duplicate(dev
,
5833 dev_priv
->vbt
.lfp_lvds_vbt_mode
);
5835 fixed_mode
->type
|= DRM_MODE_TYPE_PREFERRED
;
5837 mutex_unlock(&dev
->mode_config
.mutex
);
5839 if (IS_VALLEYVIEW(dev
)) {
5840 intel_dp
->edp_notifier
.notifier_call
= edp_notify_handler
;
5841 register_reboot_notifier(&intel_dp
->edp_notifier
);
5844 * Figure out the current pipe for the initial backlight setup.
5845 * If the current pipe isn't valid, try the PPS pipe, and if that
5846 * fails just assume pipe A.
5848 if (IS_CHERRYVIEW(dev
))
5849 pipe
= DP_PORT_TO_PIPE_CHV(intel_dp
->DP
);
5851 pipe
= PORT_TO_PIPE(intel_dp
->DP
);
5853 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5854 pipe
= intel_dp
->pps_pipe
;
5856 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5859 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5863 intel_panel_init(&intel_connector
->panel
, fixed_mode
, downclock_mode
);
5864 intel_connector
->panel
.backlight
.power
= intel_edp_backlight_power
;
5865 intel_panel_setup_backlight(connector
, pipe
);
5871 intel_dp_init_connector(struct intel_digital_port
*intel_dig_port
,
5872 struct intel_connector
*intel_connector
)
5874 struct drm_connector
*connector
= &intel_connector
->base
;
5875 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5876 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5877 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5878 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5879 enum port port
= intel_dig_port
->port
;
5882 intel_dp
->pps_pipe
= INVALID_PIPE
;
5884 /* intel_dp vfuncs */
5885 if (INTEL_INFO(dev
)->gen
>= 9)
5886 intel_dp
->get_aux_clock_divider
= skl_get_aux_clock_divider
;
5887 else if (IS_VALLEYVIEW(dev
))
5888 intel_dp
->get_aux_clock_divider
= vlv_get_aux_clock_divider
;
5889 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
5890 intel_dp
->get_aux_clock_divider
= hsw_get_aux_clock_divider
;
5891 else if (HAS_PCH_SPLIT(dev
))
5892 intel_dp
->get_aux_clock_divider
= ilk_get_aux_clock_divider
;
5894 intel_dp
->get_aux_clock_divider
= i9xx_get_aux_clock_divider
;
5896 if (INTEL_INFO(dev
)->gen
>= 9)
5897 intel_dp
->get_aux_send_ctl
= skl_get_aux_send_ctl
;
5899 intel_dp
->get_aux_send_ctl
= i9xx_get_aux_send_ctl
;
5902 intel_dp
->prepare_link_retrain
= intel_ddi_prepare_link_retrain
;
5904 /* Preserve the current hw state. */
5905 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
);
5906 intel_dp
->attached_connector
= intel_connector
;
5908 if (intel_dp_is_edp(dev
, port
))
5909 type
= DRM_MODE_CONNECTOR_eDP
;
5911 type
= DRM_MODE_CONNECTOR_DisplayPort
;
5914 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5915 * for DP the encoder type can be set by the caller to
5916 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5918 if (type
== DRM_MODE_CONNECTOR_eDP
)
5919 intel_encoder
->type
= INTEL_OUTPUT_EDP
;
5921 /* eDP only on port B and/or C on vlv/chv */
5922 if (WARN_ON(IS_VALLEYVIEW(dev
) && is_edp(intel_dp
) &&
5923 port
!= PORT_B
&& port
!= PORT_C
))
5926 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5927 type
== DRM_MODE_CONNECTOR_eDP
? "eDP" : "DP",
5930 drm_connector_init(dev
, connector
, &intel_dp_connector_funcs
, type
);
5931 drm_connector_helper_add(connector
, &intel_dp_connector_helper_funcs
);
5933 connector
->interlace_allowed
= true;
5934 connector
->doublescan_allowed
= 0;
5936 INIT_DELAYED_WORK(&intel_dp
->panel_vdd_work
,
5937 edp_panel_vdd_work
);
5939 intel_connector_attach_encoder(intel_connector
, intel_encoder
);
5940 drm_connector_register(connector
);
5943 intel_connector
->get_hw_state
= intel_ddi_connector_get_hw_state
;
5945 intel_connector
->get_hw_state
= intel_connector_get_hw_state
;
5946 intel_connector
->unregister
= intel_dp_connector_unregister
;
5948 /* Set up the hotplug pin. */
5951 intel_encoder
->hpd_pin
= HPD_PORT_A
;
5954 intel_encoder
->hpd_pin
= HPD_PORT_B
;
5955 if (IS_BXT_REVID(dev
, 0, BXT_REVID_A1
))
5956 intel_encoder
->hpd_pin
= HPD_PORT_A
;
5959 intel_encoder
->hpd_pin
= HPD_PORT_C
;
5962 intel_encoder
->hpd_pin
= HPD_PORT_D
;
5965 intel_encoder
->hpd_pin
= HPD_PORT_E
;
5971 if (is_edp(intel_dp
)) {
5973 intel_dp_init_panel_power_timestamps(intel_dp
);
5974 if (IS_VALLEYVIEW(dev
))
5975 vlv_initial_power_sequencer_setup(intel_dp
);
5977 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
5978 pps_unlock(intel_dp
);
5981 ret
= intel_dp_aux_init(intel_dp
, intel_connector
);
5985 /* init MST on ports that can support it */
5986 if (HAS_DP_MST(dev
) &&
5987 (port
== PORT_B
|| port
== PORT_C
|| port
== PORT_D
))
5988 intel_dp_mst_encoder_init(intel_dig_port
,
5989 intel_connector
->base
.base
.id
);
5991 if (!intel_edp_init_connector(intel_dp
, intel_connector
)) {
5992 intel_dp_aux_fini(intel_dp
);
5993 intel_dp_mst_encoder_cleanup(intel_dig_port
);
5997 intel_dp_add_properties(intel_dp
, connector
);
5999 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6000 * 0xd. Failure to do so will result in spurious interrupts being
6001 * generated on the port when a cable is not attached.
6003 if (IS_G4X(dev
) && !IS_GM45(dev
)) {
6004 u32 temp
= I915_READ(PEG_BAND_GAP_DATA
);
6005 I915_WRITE(PEG_BAND_GAP_DATA
, (temp
& ~0xf) | 0xd);
6008 i915_debugfs_connector_add(connector
);
6013 if (is_edp(intel_dp
)) {
6014 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
6016 * vdd might still be enabled do to the delayed vdd off.
6017 * Make sure vdd is actually turned off here.
6020 edp_panel_vdd_off_sync(intel_dp
);
6021 pps_unlock(intel_dp
);
6023 drm_connector_unregister(connector
);
6024 drm_connector_cleanup(connector
);
6030 intel_dp_init(struct drm_device
*dev
, int output_reg
, enum port port
)
6032 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6033 struct intel_digital_port
*intel_dig_port
;
6034 struct intel_encoder
*intel_encoder
;
6035 struct drm_encoder
*encoder
;
6036 struct intel_connector
*intel_connector
;
6038 intel_dig_port
= kzalloc(sizeof(*intel_dig_port
), GFP_KERNEL
);
6039 if (!intel_dig_port
)
6042 intel_connector
= intel_connector_alloc();
6043 if (!intel_connector
)
6044 goto err_connector_alloc
;
6046 intel_encoder
= &intel_dig_port
->base
;
6047 encoder
= &intel_encoder
->base
;
6049 drm_encoder_init(dev
, &intel_encoder
->base
, &intel_dp_enc_funcs
,
6050 DRM_MODE_ENCODER_TMDS
);
6052 intel_encoder
->compute_config
= intel_dp_compute_config
;
6053 intel_encoder
->disable
= intel_disable_dp
;
6054 intel_encoder
->get_hw_state
= intel_dp_get_hw_state
;
6055 intel_encoder
->get_config
= intel_dp_get_config
;
6056 intel_encoder
->suspend
= intel_dp_encoder_suspend
;
6057 if (IS_CHERRYVIEW(dev
)) {
6058 intel_encoder
->pre_pll_enable
= chv_dp_pre_pll_enable
;
6059 intel_encoder
->pre_enable
= chv_pre_enable_dp
;
6060 intel_encoder
->enable
= vlv_enable_dp
;
6061 intel_encoder
->post_disable
= chv_post_disable_dp
;
6062 intel_encoder
->post_pll_disable
= chv_dp_post_pll_disable
;
6063 } else if (IS_VALLEYVIEW(dev
)) {
6064 intel_encoder
->pre_pll_enable
= vlv_dp_pre_pll_enable
;
6065 intel_encoder
->pre_enable
= vlv_pre_enable_dp
;
6066 intel_encoder
->enable
= vlv_enable_dp
;
6067 intel_encoder
->post_disable
= vlv_post_disable_dp
;
6069 intel_encoder
->pre_enable
= g4x_pre_enable_dp
;
6070 intel_encoder
->enable
= g4x_enable_dp
;
6071 if (INTEL_INFO(dev
)->gen
>= 5)
6072 intel_encoder
->post_disable
= ilk_post_disable_dp
;
6075 intel_dig_port
->port
= port
;
6076 intel_dig_port
->dp
.output_reg
= output_reg
;
6078 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
6079 if (IS_CHERRYVIEW(dev
)) {
6081 intel_encoder
->crtc_mask
= 1 << 2;
6083 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1);
6085 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1) | (1 << 2);
6087 intel_encoder
->cloneable
= 0;
6089 intel_dig_port
->hpd_pulse
= intel_dp_hpd_pulse
;
6090 dev_priv
->hotplug
.irq_port
[port
] = intel_dig_port
;
6092 if (!intel_dp_init_connector(intel_dig_port
, intel_connector
))
6093 goto err_init_connector
;
6098 drm_encoder_cleanup(encoder
);
6099 kfree(intel_connector
);
6100 err_connector_alloc
:
6101 kfree(intel_dig_port
);
6106 void intel_dp_mst_suspend(struct drm_device
*dev
)
6108 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6112 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6113 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6114 if (!intel_dig_port
)
6117 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6118 if (!intel_dig_port
->dp
.can_mst
)
6120 if (intel_dig_port
->dp
.is_mst
)
6121 drm_dp_mst_topology_mgr_suspend(&intel_dig_port
->dp
.mst_mgr
);
6126 void intel_dp_mst_resume(struct drm_device
*dev
)
6128 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6131 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6132 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6133 if (!intel_dig_port
)
6135 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6138 if (!intel_dig_port
->dp
.can_mst
)
6141 ret
= drm_dp_mst_topology_mgr_resume(&intel_dig_port
->dp
.mst_mgr
);
6143 intel_dp_check_mst_status(&intel_dig_port
->dp
);