]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: remove HBR2 from chv supported list
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf
CML
50struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5
CML
69static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63
SJ
93
94static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
95 324000, 432000, 540000 };
96static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 97
cfcb0fc9
JB
98/**
99 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
100 * @intel_dp: DP struct
101 *
102 * If a CPU or PCH DP output is attached to an eDP panel, this function
103 * will return true, and false otherwise.
104 */
105static bool is_edp(struct intel_dp *intel_dp)
106{
da63a9f2
PZ
107 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
108
109 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
110}
111
68b4d824 112static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 113{
68b4d824
ID
114 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
115
116 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
117}
118
df0e9248
CW
119static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
120{
fa90ecef 121 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
122}
123
ea5b213a 124static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 125static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 126static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 127static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
128static void vlv_steal_power_sequencer(struct drm_device *dev,
129 enum pipe pipe);
a4fc5ed6 130
ed4e9c1d
VS
131static int
132intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 133{
7183dc29 134 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
135
136 switch (max_link_bw) {
137 case DP_LINK_BW_1_62:
138 case DP_LINK_BW_2_7:
1db10e28 139 case DP_LINK_BW_5_4:
d4eead50 140 break;
a4fc5ed6 141 default:
d4eead50
ID
142 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
143 max_link_bw);
a4fc5ed6
KP
144 max_link_bw = DP_LINK_BW_1_62;
145 break;
146 }
147 return max_link_bw;
148}
149
eeb6324d
PZ
150static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
151{
152 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
153 struct drm_device *dev = intel_dig_port->base.base.dev;
154 u8 source_max, sink_max;
155
156 source_max = 4;
157 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
158 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
159 source_max = 2;
160
161 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
162
163 return min(source_max, sink_max);
164}
165
cd9dde44
AJ
166/*
167 * The units on the numbers in the next two are... bizarre. Examples will
168 * make it clearer; this one parallels an example in the eDP spec.
169 *
170 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
171 *
172 * 270000 * 1 * 8 / 10 == 216000
173 *
174 * The actual data capacity of that configuration is 2.16Gbit/s, so the
175 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
176 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
177 * 119000. At 18bpp that's 2142000 kilobits per second.
178 *
179 * Thus the strange-looking division by 10 in intel_dp_link_required, to
180 * get the result in decakilobits instead of kilobits.
181 */
182
a4fc5ed6 183static int
c898261c 184intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 185{
cd9dde44 186 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
187}
188
fe27d53e
DA
189static int
190intel_dp_max_data_rate(int max_link_clock, int max_lanes)
191{
192 return (max_link_clock * max_lanes * 8) / 10;
193}
194
c19de8eb 195static enum drm_mode_status
a4fc5ed6
KP
196intel_dp_mode_valid(struct drm_connector *connector,
197 struct drm_display_mode *mode)
198{
df0e9248 199 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
200 struct intel_connector *intel_connector = to_intel_connector(connector);
201 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
202 int target_clock = mode->clock;
203 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 204
dd06f90e
JN
205 if (is_edp(intel_dp) && fixed_mode) {
206 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
207 return MODE_PANEL;
208
dd06f90e 209 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 210 return MODE_PANEL;
03afc4a2
DV
211
212 target_clock = fixed_mode->clock;
7de56f43
ZY
213 }
214
50fec21a 215 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 216 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
217
218 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
219 mode_rate = intel_dp_link_required(target_clock, 18);
220
221 if (mode_rate > max_rate)
c4867936 222 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
223
224 if (mode->clock < 10000)
225 return MODE_CLOCK_LOW;
226
0af78a2b
DV
227 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
228 return MODE_H_ILLEGAL;
229
a4fc5ed6
KP
230 return MODE_OK;
231}
232
a4f1289e 233uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
234{
235 int i;
236 uint32_t v = 0;
237
238 if (src_bytes > 4)
239 src_bytes = 4;
240 for (i = 0; i < src_bytes; i++)
241 v |= ((uint32_t) src[i]) << ((3-i) * 8);
242 return v;
243}
244
c2af70e2 245static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
246{
247 int i;
248 if (dst_bytes > 4)
249 dst_bytes = 4;
250 for (i = 0; i < dst_bytes; i++)
251 dst[i] = src >> ((3-i) * 8);
252}
253
fb0f8fbf
KP
254/* hrawclock is 1/4 the FSB frequency */
255static int
256intel_hrawclk(struct drm_device *dev)
257{
258 struct drm_i915_private *dev_priv = dev->dev_private;
259 uint32_t clkcfg;
260
9473c8f4
VP
261 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
262 if (IS_VALLEYVIEW(dev))
263 return 200;
264
fb0f8fbf
KP
265 clkcfg = I915_READ(CLKCFG);
266 switch (clkcfg & CLKCFG_FSB_MASK) {
267 case CLKCFG_FSB_400:
268 return 100;
269 case CLKCFG_FSB_533:
270 return 133;
271 case CLKCFG_FSB_667:
272 return 166;
273 case CLKCFG_FSB_800:
274 return 200;
275 case CLKCFG_FSB_1067:
276 return 266;
277 case CLKCFG_FSB_1333:
278 return 333;
279 /* these two are just a guess; one of them might be right */
280 case CLKCFG_FSB_1600:
281 case CLKCFG_FSB_1600_ALT:
282 return 400;
283 default:
284 return 133;
285 }
286}
287
bf13e81b
JN
288static void
289intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 290 struct intel_dp *intel_dp);
bf13e81b
JN
291static void
292intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 293 struct intel_dp *intel_dp);
bf13e81b 294
773538e8
VS
295static void pps_lock(struct intel_dp *intel_dp)
296{
297 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
298 struct intel_encoder *encoder = &intel_dig_port->base;
299 struct drm_device *dev = encoder->base.dev;
300 struct drm_i915_private *dev_priv = dev->dev_private;
301 enum intel_display_power_domain power_domain;
302
303 /*
304 * See vlv_power_sequencer_reset() why we need
305 * a power domain reference here.
306 */
307 power_domain = intel_display_port_power_domain(encoder);
308 intel_display_power_get(dev_priv, power_domain);
309
310 mutex_lock(&dev_priv->pps_mutex);
311}
312
313static void pps_unlock(struct intel_dp *intel_dp)
314{
315 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
316 struct intel_encoder *encoder = &intel_dig_port->base;
317 struct drm_device *dev = encoder->base.dev;
318 struct drm_i915_private *dev_priv = dev->dev_private;
319 enum intel_display_power_domain power_domain;
320
321 mutex_unlock(&dev_priv->pps_mutex);
322
323 power_domain = intel_display_port_power_domain(encoder);
324 intel_display_power_put(dev_priv, power_domain);
325}
326
961a0db0
VS
327static void
328vlv_power_sequencer_kick(struct intel_dp *intel_dp)
329{
330 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
331 struct drm_device *dev = intel_dig_port->base.base.dev;
332 struct drm_i915_private *dev_priv = dev->dev_private;
333 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 334 bool pll_enabled;
961a0db0
VS
335 uint32_t DP;
336
337 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
338 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
339 pipe_name(pipe), port_name(intel_dig_port->port)))
340 return;
341
342 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
343 pipe_name(pipe), port_name(intel_dig_port->port));
344
345 /* Preserve the BIOS-computed detected bit. This is
346 * supposed to be read-only.
347 */
348 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
349 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
350 DP |= DP_PORT_WIDTH(1);
351 DP |= DP_LINK_TRAIN_PAT_1;
352
353 if (IS_CHERRYVIEW(dev))
354 DP |= DP_PIPE_SELECT_CHV(pipe);
355 else if (pipe == PIPE_B)
356 DP |= DP_PIPEB_SELECT;
357
d288f65f
VS
358 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
359
360 /*
361 * The DPLL for the pipe must be enabled for this to work.
362 * So enable temporarily it if it's not already enabled.
363 */
364 if (!pll_enabled)
365 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
366 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
367
961a0db0
VS
368 /*
369 * Similar magic as in intel_dp_enable_port().
370 * We _must_ do this port enable + disable trick
371 * to make this power seqeuencer lock onto the port.
372 * Otherwise even VDD force bit won't work.
373 */
374 I915_WRITE(intel_dp->output_reg, DP);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
379
380 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
382
383 if (!pll_enabled)
384 vlv_force_pll_off(dev, pipe);
961a0db0
VS
385}
386
bf13e81b
JN
387static enum pipe
388vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
389{
390 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
391 struct drm_device *dev = intel_dig_port->base.base.dev;
392 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
393 struct intel_encoder *encoder;
394 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 395 enum pipe pipe;
bf13e81b 396
e39b999a 397 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 398
a8c3344e
VS
399 /* We should never land here with regular DP ports */
400 WARN_ON(!is_edp(intel_dp));
401
a4a5d2f8
VS
402 if (intel_dp->pps_pipe != INVALID_PIPE)
403 return intel_dp->pps_pipe;
404
405 /*
406 * We don't have power sequencer currently.
407 * Pick one that's not used by other ports.
408 */
409 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
410 base.head) {
411 struct intel_dp *tmp;
412
413 if (encoder->type != INTEL_OUTPUT_EDP)
414 continue;
415
416 tmp = enc_to_intel_dp(&encoder->base);
417
418 if (tmp->pps_pipe != INVALID_PIPE)
419 pipes &= ~(1 << tmp->pps_pipe);
420 }
421
422 /*
423 * Didn't find one. This should not happen since there
424 * are two power sequencers and up to two eDP ports.
425 */
426 if (WARN_ON(pipes == 0))
a8c3344e
VS
427 pipe = PIPE_A;
428 else
429 pipe = ffs(pipes) - 1;
a4a5d2f8 430
a8c3344e
VS
431 vlv_steal_power_sequencer(dev, pipe);
432 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
433
434 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
435 pipe_name(intel_dp->pps_pipe),
436 port_name(intel_dig_port->port));
437
438 /* init power sequencer on this pipe and port */
36b5f425
VS
439 intel_dp_init_panel_power_sequencer(dev, intel_dp);
440 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 441
961a0db0
VS
442 /*
443 * Even vdd force doesn't work until we've made
444 * the power sequencer lock in on the port.
445 */
446 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
447
448 return intel_dp->pps_pipe;
449}
450
6491ab27
VS
451typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
452 enum pipe pipe);
453
454static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
455 enum pipe pipe)
456{
457 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
458}
459
460static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
461 enum pipe pipe)
462{
463 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
464}
465
466static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
467 enum pipe pipe)
468{
469 return true;
470}
bf13e81b 471
a4a5d2f8 472static enum pipe
6491ab27
VS
473vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
474 enum port port,
475 vlv_pipe_check pipe_check)
a4a5d2f8
VS
476{
477 enum pipe pipe;
bf13e81b 478
bf13e81b
JN
479 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
480 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
481 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
482
483 if (port_sel != PANEL_PORT_SELECT_VLV(port))
484 continue;
485
6491ab27
VS
486 if (!pipe_check(dev_priv, pipe))
487 continue;
488
a4a5d2f8 489 return pipe;
bf13e81b
JN
490 }
491
a4a5d2f8
VS
492 return INVALID_PIPE;
493}
494
495static void
496vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
497{
498 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
499 struct drm_device *dev = intel_dig_port->base.base.dev;
500 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
501 enum port port = intel_dig_port->port;
502
503 lockdep_assert_held(&dev_priv->pps_mutex);
504
505 /* try to find a pipe with this port selected */
6491ab27
VS
506 /* first pick one where the panel is on */
507 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
508 vlv_pipe_has_pp_on);
509 /* didn't find one? pick one where vdd is on */
510 if (intel_dp->pps_pipe == INVALID_PIPE)
511 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
512 vlv_pipe_has_vdd_on);
513 /* didn't find one? pick one with just the correct port */
514 if (intel_dp->pps_pipe == INVALID_PIPE)
515 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
516 vlv_pipe_any);
a4a5d2f8
VS
517
518 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
519 if (intel_dp->pps_pipe == INVALID_PIPE) {
520 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
521 port_name(port));
522 return;
bf13e81b
JN
523 }
524
a4a5d2f8
VS
525 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
526 port_name(port), pipe_name(intel_dp->pps_pipe));
527
36b5f425
VS
528 intel_dp_init_panel_power_sequencer(dev, intel_dp);
529 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
530}
531
773538e8
VS
532void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
533{
534 struct drm_device *dev = dev_priv->dev;
535 struct intel_encoder *encoder;
536
537 if (WARN_ON(!IS_VALLEYVIEW(dev)))
538 return;
539
540 /*
541 * We can't grab pps_mutex here due to deadlock with power_domain
542 * mutex when power_domain functions are called while holding pps_mutex.
543 * That also means that in order to use pps_pipe the code needs to
544 * hold both a power domain reference and pps_mutex, and the power domain
545 * reference get/put must be done while _not_ holding pps_mutex.
546 * pps_{lock,unlock}() do these steps in the correct order, so one
547 * should use them always.
548 */
549
550 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
551 struct intel_dp *intel_dp;
552
553 if (encoder->type != INTEL_OUTPUT_EDP)
554 continue;
555
556 intel_dp = enc_to_intel_dp(&encoder->base);
557 intel_dp->pps_pipe = INVALID_PIPE;
558 }
bf13e81b
JN
559}
560
561static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
562{
563 struct drm_device *dev = intel_dp_to_dev(intel_dp);
564
565 if (HAS_PCH_SPLIT(dev))
566 return PCH_PP_CONTROL;
567 else
568 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
569}
570
571static u32 _pp_stat_reg(struct intel_dp *intel_dp)
572{
573 struct drm_device *dev = intel_dp_to_dev(intel_dp);
574
575 if (HAS_PCH_SPLIT(dev))
576 return PCH_PP_STATUS;
577 else
578 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
579}
580
01527b31
CT
581/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
582 This function only applicable when panel PM state is not to be tracked */
583static int edp_notify_handler(struct notifier_block *this, unsigned long code,
584 void *unused)
585{
586 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
587 edp_notifier);
588 struct drm_device *dev = intel_dp_to_dev(intel_dp);
589 struct drm_i915_private *dev_priv = dev->dev_private;
590 u32 pp_div;
591 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
592
593 if (!is_edp(intel_dp) || code != SYS_RESTART)
594 return 0;
595
773538e8 596 pps_lock(intel_dp);
e39b999a 597
01527b31 598 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
599 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
600
01527b31
CT
601 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
602 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
603 pp_div = I915_READ(pp_div_reg);
604 pp_div &= PP_REFERENCE_DIVIDER_MASK;
605
606 /* 0x1F write to PP_DIV_REG sets max cycle delay */
607 I915_WRITE(pp_div_reg, pp_div | 0x1F);
608 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
609 msleep(intel_dp->panel_power_cycle_delay);
610 }
611
773538e8 612 pps_unlock(intel_dp);
e39b999a 613
01527b31
CT
614 return 0;
615}
616
4be73780 617static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 618{
30add22d 619 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
620 struct drm_i915_private *dev_priv = dev->dev_private;
621
e39b999a
VS
622 lockdep_assert_held(&dev_priv->pps_mutex);
623
9a42356b
VS
624 if (IS_VALLEYVIEW(dev) &&
625 intel_dp->pps_pipe == INVALID_PIPE)
626 return false;
627
bf13e81b 628 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
629}
630
4be73780 631static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 632{
30add22d 633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
634 struct drm_i915_private *dev_priv = dev->dev_private;
635
e39b999a
VS
636 lockdep_assert_held(&dev_priv->pps_mutex);
637
9a42356b
VS
638 if (IS_VALLEYVIEW(dev) &&
639 intel_dp->pps_pipe == INVALID_PIPE)
640 return false;
641
773538e8 642 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
643}
644
9b984dae
KP
645static void
646intel_dp_check_edp(struct intel_dp *intel_dp)
647{
30add22d 648 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 649 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 650
9b984dae
KP
651 if (!is_edp(intel_dp))
652 return;
453c5420 653
4be73780 654 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
655 WARN(1, "eDP powered off while attempting aux channel communication.\n");
656 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
657 I915_READ(_pp_stat_reg(intel_dp)),
658 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
659 }
660}
661
9ee32fea
DV
662static uint32_t
663intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
664{
665 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
666 struct drm_device *dev = intel_dig_port->base.base.dev;
667 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 668 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
669 uint32_t status;
670 bool done;
671
ef04f00d 672#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 673 if (has_aux_irq)
b18ac466 674 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 675 msecs_to_jiffies_timeout(10));
9ee32fea
DV
676 else
677 done = wait_for_atomic(C, 10) == 0;
678 if (!done)
679 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
680 has_aux_irq);
681#undef C
682
683 return status;
684}
685
ec5b01dd 686static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 687{
174edf1f
PZ
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 690
ec5b01dd
DL
691 /*
692 * The clock divider is based off the hrawclk, and would like to run at
693 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 694 */
ec5b01dd
DL
695 return index ? 0 : intel_hrawclk(dev) / 2;
696}
697
698static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
699{
700 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
701 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 702 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
703
704 if (index)
705 return 0;
706
707 if (intel_dig_port->port == PORT_A) {
469d4b2a 708 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
ec5b01dd
DL
709 } else {
710 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
711 }
712}
713
714static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
715{
716 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
717 struct drm_device *dev = intel_dig_port->base.base.dev;
718 struct drm_i915_private *dev_priv = dev->dev_private;
719
720 if (intel_dig_port->port == PORT_A) {
721 if (index)
722 return 0;
1652d19e 723 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
2c55c336
JN
724 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
725 /* Workaround for non-ULT HSW */
bc86625a
CW
726 switch (index) {
727 case 0: return 63;
728 case 1: return 72;
729 default: return 0;
730 }
ec5b01dd 731 } else {
bc86625a 732 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 733 }
b84a1cf8
RV
734}
735
ec5b01dd
DL
736static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
737{
738 return index ? 0 : 100;
739}
740
b6b5e383
DL
741static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
742{
743 /*
744 * SKL doesn't need us to program the AUX clock divider (Hardware will
745 * derive the clock from CDCLK automatically). We still implement the
746 * get_aux_clock_divider vfunc to plug-in into the existing code.
747 */
748 return index ? 0 : 1;
749}
750
5ed12a19
DL
751static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
752 bool has_aux_irq,
753 int send_bytes,
754 uint32_t aux_clock_divider)
755{
756 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
757 struct drm_device *dev = intel_dig_port->base.base.dev;
758 uint32_t precharge, timeout;
759
760 if (IS_GEN6(dev))
761 precharge = 3;
762 else
763 precharge = 5;
764
765 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
766 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
767 else
768 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
769
770 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 771 DP_AUX_CH_CTL_DONE |
5ed12a19 772 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 773 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 774 timeout |
788d4433 775 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
776 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
777 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 778 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
779}
780
b9ca5fad
DL
781static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
782 bool has_aux_irq,
783 int send_bytes,
784 uint32_t unused)
785{
786 return DP_AUX_CH_CTL_SEND_BUSY |
787 DP_AUX_CH_CTL_DONE |
788 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
789 DP_AUX_CH_CTL_TIME_OUT_ERROR |
790 DP_AUX_CH_CTL_TIME_OUT_1600us |
791 DP_AUX_CH_CTL_RECEIVE_ERROR |
792 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
793 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
794}
795
b84a1cf8
RV
796static int
797intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 798 const uint8_t *send, int send_bytes,
b84a1cf8
RV
799 uint8_t *recv, int recv_size)
800{
801 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
802 struct drm_device *dev = intel_dig_port->base.base.dev;
803 struct drm_i915_private *dev_priv = dev->dev_private;
804 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
805 uint32_t ch_data = ch_ctl + 4;
bc86625a 806 uint32_t aux_clock_divider;
b84a1cf8
RV
807 int i, ret, recv_bytes;
808 uint32_t status;
5ed12a19 809 int try, clock = 0;
4e6b788c 810 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
811 bool vdd;
812
773538e8 813 pps_lock(intel_dp);
e39b999a 814
72c3500a
VS
815 /*
816 * We will be called with VDD already enabled for dpcd/edid/oui reads.
817 * In such cases we want to leave VDD enabled and it's up to upper layers
818 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
819 * ourselves.
820 */
1e0560e0 821 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
822
823 /* dp aux is extremely sensitive to irq latency, hence request the
824 * lowest possible wakeup latency and so prevent the cpu from going into
825 * deep sleep states.
826 */
827 pm_qos_update_request(&dev_priv->pm_qos, 0);
828
829 intel_dp_check_edp(intel_dp);
5eb08b69 830
c67a470b
PZ
831 intel_aux_display_runtime_get(dev_priv);
832
11bee43e
JB
833 /* Try to wait for any previous AUX channel activity */
834 for (try = 0; try < 3; try++) {
ef04f00d 835 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
836 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
837 break;
838 msleep(1);
839 }
840
841 if (try == 3) {
842 WARN(1, "dp_aux_ch not started status 0x%08x\n",
843 I915_READ(ch_ctl));
9ee32fea
DV
844 ret = -EBUSY;
845 goto out;
4f7f7b7e
CW
846 }
847
46a5ae9f
PZ
848 /* Only 5 data registers! */
849 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
850 ret = -E2BIG;
851 goto out;
852 }
853
ec5b01dd 854 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
855 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
856 has_aux_irq,
857 send_bytes,
858 aux_clock_divider);
5ed12a19 859
bc86625a
CW
860 /* Must try at least 3 times according to DP spec */
861 for (try = 0; try < 5; try++) {
862 /* Load the send data into the aux channel data registers */
863 for (i = 0; i < send_bytes; i += 4)
864 I915_WRITE(ch_data + i,
a4f1289e
RV
865 intel_dp_pack_aux(send + i,
866 send_bytes - i));
bc86625a
CW
867
868 /* Send the command and wait for it to complete */
5ed12a19 869 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
870
871 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
872
873 /* Clear done status and any errors */
874 I915_WRITE(ch_ctl,
875 status |
876 DP_AUX_CH_CTL_DONE |
877 DP_AUX_CH_CTL_TIME_OUT_ERROR |
878 DP_AUX_CH_CTL_RECEIVE_ERROR);
879
74ebf294 880 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 881 continue;
74ebf294
TP
882
883 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
884 * 400us delay required for errors and timeouts
885 * Timeout errors from the HW already meet this
886 * requirement so skip to next iteration
887 */
888 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
889 usleep_range(400, 500);
bc86625a 890 continue;
74ebf294 891 }
bc86625a 892 if (status & DP_AUX_CH_CTL_DONE)
e058c945 893 goto done;
bc86625a 894 }
a4fc5ed6
KP
895 }
896
a4fc5ed6 897 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 898 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
899 ret = -EBUSY;
900 goto out;
a4fc5ed6
KP
901 }
902
e058c945 903done:
a4fc5ed6
KP
904 /* Check for timeout or receive error.
905 * Timeouts occur when the sink is not connected
906 */
a5b3da54 907 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 908 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
909 ret = -EIO;
910 goto out;
a5b3da54 911 }
1ae8c0a5
KP
912
913 /* Timeouts occur when the device isn't connected, so they're
914 * "normal" -- don't fill the kernel log with these */
a5b3da54 915 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 916 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
917 ret = -ETIMEDOUT;
918 goto out;
a4fc5ed6
KP
919 }
920
921 /* Unload any bytes sent back from the other side */
922 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
923 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
924 if (recv_bytes > recv_size)
925 recv_bytes = recv_size;
0206e353 926
4f7f7b7e 927 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
928 intel_dp_unpack_aux(I915_READ(ch_data + i),
929 recv + i, recv_bytes - i);
a4fc5ed6 930
9ee32fea
DV
931 ret = recv_bytes;
932out:
933 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 934 intel_aux_display_runtime_put(dev_priv);
9ee32fea 935
884f19e9
JN
936 if (vdd)
937 edp_panel_vdd_off(intel_dp, false);
938
773538e8 939 pps_unlock(intel_dp);
e39b999a 940
9ee32fea 941 return ret;
a4fc5ed6
KP
942}
943
a6c8aff0
JN
944#define BARE_ADDRESS_SIZE 3
945#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
946static ssize_t
947intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 948{
9d1a1031
JN
949 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
950 uint8_t txbuf[20], rxbuf[20];
951 size_t txsize, rxsize;
a4fc5ed6 952 int ret;
a4fc5ed6 953
d2d9cbbd
VS
954 txbuf[0] = (msg->request << 4) |
955 ((msg->address >> 16) & 0xf);
956 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
957 txbuf[2] = msg->address & 0xff;
958 txbuf[3] = msg->size - 1;
46a5ae9f 959
9d1a1031
JN
960 switch (msg->request & ~DP_AUX_I2C_MOT) {
961 case DP_AUX_NATIVE_WRITE:
962 case DP_AUX_I2C_WRITE:
a6c8aff0 963 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 964 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 965
9d1a1031
JN
966 if (WARN_ON(txsize > 20))
967 return -E2BIG;
a4fc5ed6 968
9d1a1031 969 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 970
9d1a1031
JN
971 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
972 if (ret > 0) {
973 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 974
a1ddefd8
JN
975 if (ret > 1) {
976 /* Number of bytes written in a short write. */
977 ret = clamp_t(int, rxbuf[1], 0, msg->size);
978 } else {
979 /* Return payload size. */
980 ret = msg->size;
981 }
9d1a1031
JN
982 }
983 break;
46a5ae9f 984
9d1a1031
JN
985 case DP_AUX_NATIVE_READ:
986 case DP_AUX_I2C_READ:
a6c8aff0 987 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 988 rxsize = msg->size + 1;
a4fc5ed6 989
9d1a1031
JN
990 if (WARN_ON(rxsize > 20))
991 return -E2BIG;
a4fc5ed6 992
9d1a1031
JN
993 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
994 if (ret > 0) {
995 msg->reply = rxbuf[0] >> 4;
996 /*
997 * Assume happy day, and copy the data. The caller is
998 * expected to check msg->reply before touching it.
999 *
1000 * Return payload size.
1001 */
1002 ret--;
1003 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1004 }
9d1a1031
JN
1005 break;
1006
1007 default:
1008 ret = -EINVAL;
1009 break;
a4fc5ed6 1010 }
f51a44b9 1011
9d1a1031 1012 return ret;
a4fc5ed6
KP
1013}
1014
9d1a1031
JN
1015static void
1016intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1017{
1018 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1019 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1020 enum port port = intel_dig_port->port;
0b99836f 1021 const char *name = NULL;
ab2c0672
DA
1022 int ret;
1023
33ad6626
JN
1024 switch (port) {
1025 case PORT_A:
1026 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1027 name = "DPDDC-A";
ab2c0672 1028 break;
33ad6626
JN
1029 case PORT_B:
1030 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1031 name = "DPDDC-B";
ab2c0672 1032 break;
33ad6626
JN
1033 case PORT_C:
1034 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1035 name = "DPDDC-C";
ab2c0672 1036 break;
33ad6626
JN
1037 case PORT_D:
1038 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1039 name = "DPDDC-D";
33ad6626
JN
1040 break;
1041 default:
1042 BUG();
ab2c0672
DA
1043 }
1044
1b1aad75
DL
1045 /*
1046 * The AUX_CTL register is usually DP_CTL + 0x10.
1047 *
1048 * On Haswell and Broadwell though:
1049 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1050 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1051 *
1052 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1053 */
1054 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1055 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1056
0b99836f 1057 intel_dp->aux.name = name;
9d1a1031
JN
1058 intel_dp->aux.dev = dev->dev;
1059 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1060
0b99836f
JN
1061 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1062 connector->base.kdev->kobj.name);
8316f337 1063
4f71d0cb 1064 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1065 if (ret < 0) {
4f71d0cb 1066 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1067 name, ret);
1068 return;
ab2c0672 1069 }
8a5e6aeb 1070
0b99836f
JN
1071 ret = sysfs_create_link(&connector->base.kdev->kobj,
1072 &intel_dp->aux.ddc.dev.kobj,
1073 intel_dp->aux.ddc.dev.kobj.name);
1074 if (ret < 0) {
1075 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1076 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1077 }
a4fc5ed6
KP
1078}
1079
80f65de3
ID
1080static void
1081intel_dp_connector_unregister(struct intel_connector *intel_connector)
1082{
1083 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1084
0e32b39c
DA
1085 if (!intel_connector->mst_port)
1086 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1087 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1088 intel_connector_unregister(intel_connector);
1089}
1090
5416d871 1091static void
c3346ef6 1092skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1093{
1094 u32 ctrl1;
1095
dd3cd74a
ACO
1096 memset(&pipe_config->dpll_hw_state, 0,
1097 sizeof(pipe_config->dpll_hw_state));
1098
5416d871
DL
1099 pipe_config->ddi_pll_sel = SKL_DPLL0;
1100 pipe_config->dpll_hw_state.cfgcr1 = 0;
1101 pipe_config->dpll_hw_state.cfgcr2 = 0;
1102
1103 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1104 switch (link_clock / 2) {
1105 case 81000:
71cd8423 1106 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1107 SKL_DPLL0);
1108 break;
c3346ef6 1109 case 135000:
71cd8423 1110 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1111 SKL_DPLL0);
1112 break;
c3346ef6 1113 case 270000:
71cd8423 1114 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1115 SKL_DPLL0);
1116 break;
c3346ef6 1117 case 162000:
71cd8423 1118 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1119 SKL_DPLL0);
1120 break;
1121 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1122 results in CDCLK change. Need to handle the change of CDCLK by
1123 disabling pipes and re-enabling them */
1124 case 108000:
71cd8423 1125 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1126 SKL_DPLL0);
1127 break;
1128 case 216000:
71cd8423 1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1130 SKL_DPLL0);
1131 break;
1132
5416d871
DL
1133 }
1134 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1135}
1136
0e50338c 1137static void
5cec258b 1138hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c 1139{
ee46f3c7
ACO
1140 memset(&pipe_config->dpll_hw_state, 0,
1141 sizeof(pipe_config->dpll_hw_state));
1142
0e50338c
DV
1143 switch (link_bw) {
1144 case DP_LINK_BW_1_62:
1145 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1146 break;
1147 case DP_LINK_BW_2_7:
1148 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1149 break;
1150 case DP_LINK_BW_5_4:
1151 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1152 break;
1153 }
1154}
1155
fc0f8e25 1156static int
12f6a2e2 1157intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1158{
94ca719e
VS
1159 if (intel_dp->num_sink_rates) {
1160 *sink_rates = intel_dp->sink_rates;
1161 return intel_dp->num_sink_rates;
fc0f8e25 1162 }
12f6a2e2
VS
1163
1164 *sink_rates = default_rates;
1165
1166 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1167}
1168
a8f3ef61 1169static int
1db10e28 1170intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1171{
637a9c63
SJ
1172 if (IS_SKYLAKE(dev)) {
1173 *source_rates = skl_rates;
1174 return ARRAY_SIZE(skl_rates);
a8f3ef61 1175 }
636280ba
VS
1176
1177 *source_rates = default_rates;
1178
5e86dfe3 1179 /* WaDisableHBR2:skl */
1db10e28 1180 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1db10e28 1181 return (DP_LINK_BW_2_7 >> 3) + 1;
5e86dfe3
TS
1182
1183 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1184 (INTEL_INFO(dev)->gen >= 9))
1db10e28
VS
1185 return (DP_LINK_BW_5_4 >> 3) + 1;
1186 else
1187 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1188}
1189
c6bb3538
DV
1190static void
1191intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1192 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1193{
1194 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1195 const struct dp_link_dpll *divisor = NULL;
1196 int i, count = 0;
c6bb3538
DV
1197
1198 if (IS_G4X(dev)) {
9dd4ffdf
CML
1199 divisor = gen4_dpll;
1200 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1201 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1202 divisor = pch_dpll;
1203 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1204 } else if (IS_CHERRYVIEW(dev)) {
1205 divisor = chv_dpll;
1206 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1207 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1208 divisor = vlv_dpll;
1209 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1210 }
9dd4ffdf
CML
1211
1212 if (divisor && count) {
1213 for (i = 0; i < count; i++) {
1214 if (link_bw == divisor[i].link_bw) {
1215 pipe_config->dpll = divisor[i].dpll;
1216 pipe_config->clock_set = true;
1217 break;
1218 }
1219 }
c6bb3538
DV
1220 }
1221}
1222
2ecae76a
VS
1223static int intersect_rates(const int *source_rates, int source_len,
1224 const int *sink_rates, int sink_len,
94ca719e 1225 int *common_rates)
a8f3ef61
SJ
1226{
1227 int i = 0, j = 0, k = 0;
1228
a8f3ef61
SJ
1229 while (i < source_len && j < sink_len) {
1230 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1231 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1232 return k;
94ca719e 1233 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1234 ++k;
1235 ++i;
1236 ++j;
1237 } else if (source_rates[i] < sink_rates[j]) {
1238 ++i;
1239 } else {
1240 ++j;
1241 }
1242 }
1243 return k;
1244}
1245
94ca719e
VS
1246static int intel_dp_common_rates(struct intel_dp *intel_dp,
1247 int *common_rates)
2ecae76a
VS
1248{
1249 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1250 const int *source_rates, *sink_rates;
1251 int source_len, sink_len;
1252
1253 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1254 source_len = intel_dp_source_rates(dev, &source_rates);
1255
1256 return intersect_rates(source_rates, source_len,
1257 sink_rates, sink_len,
94ca719e 1258 common_rates);
2ecae76a
VS
1259}
1260
0336400e
VS
1261static void snprintf_int_array(char *str, size_t len,
1262 const int *array, int nelem)
1263{
1264 int i;
1265
1266 str[0] = '\0';
1267
1268 for (i = 0; i < nelem; i++) {
b2f505be 1269 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1270 if (r >= len)
1271 return;
1272 str += r;
1273 len -= r;
1274 }
1275}
1276
1277static void intel_dp_print_rates(struct intel_dp *intel_dp)
1278{
1279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1280 const int *source_rates, *sink_rates;
94ca719e
VS
1281 int source_len, sink_len, common_len;
1282 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1283 char str[128]; /* FIXME: too big for stack? */
1284
1285 if ((drm_debug & DRM_UT_KMS) == 0)
1286 return;
1287
1288 source_len = intel_dp_source_rates(dev, &source_rates);
1289 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1290 DRM_DEBUG_KMS("source rates: %s\n", str);
1291
1292 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1293 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1294 DRM_DEBUG_KMS("sink rates: %s\n", str);
1295
94ca719e
VS
1296 common_len = intel_dp_common_rates(intel_dp, common_rates);
1297 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1298 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1299}
1300
f4896f15 1301static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1302{
1303 int i = 0;
1304
1305 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1306 if (find == rates[i])
1307 break;
1308
1309 return i;
1310}
1311
50fec21a
VS
1312int
1313intel_dp_max_link_rate(struct intel_dp *intel_dp)
1314{
1315 int rates[DP_MAX_SUPPORTED_RATES] = {};
1316 int len;
1317
94ca719e 1318 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1319 if (WARN_ON(len <= 0))
1320 return 162000;
1321
1322 return rates[rate_to_index(0, rates) - 1];
1323}
1324
ed4e9c1d
VS
1325int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1326{
94ca719e 1327 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1328}
1329
00c09d70 1330bool
5bfe2ac0 1331intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1332 struct intel_crtc_state *pipe_config)
a4fc5ed6 1333{
5bfe2ac0 1334 struct drm_device *dev = encoder->base.dev;
36008365 1335 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1336 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1337 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1338 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1339 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1340 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1341 int lane_count, clock;
56071a20 1342 int min_lane_count = 1;
eeb6324d 1343 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1344 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1345 int min_clock = 0;
a8f3ef61 1346 int max_clock;
083f9560 1347 int bpp, mode_rate;
ff9a6750 1348 int link_avail, link_clock;
94ca719e
VS
1349 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1350 int common_len;
a8f3ef61 1351
94ca719e 1352 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1353
1354 /* No common link rates between source and sink */
94ca719e 1355 WARN_ON(common_len <= 0);
a8f3ef61 1356
94ca719e 1357 max_clock = common_len - 1;
a4fc5ed6 1358
bc7d38a4 1359 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1360 pipe_config->has_pch_encoder = true;
1361
03afc4a2 1362 pipe_config->has_dp_encoder = true;
f769cd24 1363 pipe_config->has_drrs = false;
9fcb1704 1364 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1365
dd06f90e
JN
1366 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1367 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1368 adjusted_mode);
a1b2278e
CK
1369
1370 if (INTEL_INFO(dev)->gen >= 9) {
1371 int ret;
1372 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1373 if (ret)
1374 return ret;
1375 }
1376
2dd24552
JB
1377 if (!HAS_PCH_SPLIT(dev))
1378 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1379 intel_connector->panel.fitting_mode);
1380 else
b074cec8
JB
1381 intel_pch_panel_fitting(intel_crtc, pipe_config,
1382 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1383 }
1384
cb1793ce 1385 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1386 return false;
1387
083f9560 1388 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1389 "max bw %d pixel clock %iKHz\n",
94ca719e 1390 max_lane_count, common_rates[max_clock],
241bfc38 1391 adjusted_mode->crtc_clock);
083f9560 1392
36008365
DV
1393 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1394 * bpc in between. */
3e7ca985 1395 bpp = pipe_config->pipe_bpp;
56071a20
JN
1396 if (is_edp(intel_dp)) {
1397 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1398 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1399 dev_priv->vbt.edp_bpp);
1400 bpp = dev_priv->vbt.edp_bpp;
1401 }
1402
344c5bbc
JN
1403 /*
1404 * Use the maximum clock and number of lanes the eDP panel
1405 * advertizes being capable of. The panels are generally
1406 * designed to support only a single clock and lane
1407 * configuration, and typically these values correspond to the
1408 * native resolution of the panel.
1409 */
1410 min_lane_count = max_lane_count;
1411 min_clock = max_clock;
7984211e 1412 }
657445fe 1413
36008365 1414 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1415 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1416 bpp);
36008365 1417
c6930992 1418 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1419 for (lane_count = min_lane_count;
1420 lane_count <= max_lane_count;
1421 lane_count <<= 1) {
1422
94ca719e 1423 link_clock = common_rates[clock];
36008365
DV
1424 link_avail = intel_dp_max_data_rate(link_clock,
1425 lane_count);
1426
1427 if (mode_rate <= link_avail) {
1428 goto found;
1429 }
1430 }
1431 }
1432 }
c4867936 1433
36008365 1434 return false;
3685a8f3 1435
36008365 1436found:
55bc60db
VS
1437 if (intel_dp->color_range_auto) {
1438 /*
1439 * See:
1440 * CEA-861-E - 5.1 Default Encoding Parameters
1441 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1442 */
18316c8c 1443 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1444 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1445 else
1446 intel_dp->color_range = 0;
1447 }
1448
3685a8f3 1449 if (intel_dp->color_range)
50f3b016 1450 pipe_config->limited_color_range = true;
a4fc5ed6 1451
36008365 1452 intel_dp->lane_count = lane_count;
a8f3ef61 1453
94ca719e 1454 if (intel_dp->num_sink_rates) {
bc27b7d3 1455 intel_dp->link_bw = 0;
a8f3ef61 1456 intel_dp->rate_select =
94ca719e 1457 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1458 } else {
1459 intel_dp->link_bw =
94ca719e 1460 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1461 intel_dp->rate_select = 0;
a8f3ef61
SJ
1462 }
1463
657445fe 1464 pipe_config->pipe_bpp = bpp;
94ca719e 1465 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1466
36008365
DV
1467 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1468 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1469 pipe_config->port_clock, bpp);
36008365
DV
1470 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1471 mode_rate, link_avail);
a4fc5ed6 1472
03afc4a2 1473 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1474 adjusted_mode->crtc_clock,
1475 pipe_config->port_clock,
03afc4a2 1476 &pipe_config->dp_m_n);
9d1a455b 1477
439d7ac0 1478 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1479 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1480 pipe_config->has_drrs = true;
439d7ac0
PB
1481 intel_link_compute_m_n(bpp, lane_count,
1482 intel_connector->panel.downclock_mode->clock,
1483 pipe_config->port_clock,
1484 &pipe_config->dp_m2_n2);
1485 }
1486
5416d871 1487 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1488 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
977bb38d
S
1489 else if (IS_BROXTON(dev))
1490 /* handled in ddi */;
5416d871 1491 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1492 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1493 else
1494 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1495
03afc4a2 1496 return true;
a4fc5ed6
KP
1497}
1498
7c62a164 1499static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1500{
7c62a164
DV
1501 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1502 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1503 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1504 struct drm_i915_private *dev_priv = dev->dev_private;
1505 u32 dpa_ctl;
1506
6e3c9717
ACO
1507 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1508 crtc->config->port_clock);
ea9b6006
DV
1509 dpa_ctl = I915_READ(DP_A);
1510 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1511
6e3c9717 1512 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1513 /* For a long time we've carried around a ILK-DevA w/a for the
1514 * 160MHz clock. If we're really unlucky, it's still required.
1515 */
1516 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1517 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1518 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1519 } else {
1520 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1521 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1522 }
1ce17038 1523
ea9b6006
DV
1524 I915_WRITE(DP_A, dpa_ctl);
1525
1526 POSTING_READ(DP_A);
1527 udelay(500);
1528}
1529
8ac33ed3 1530static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1531{
b934223d 1532 struct drm_device *dev = encoder->base.dev;
417e822d 1533 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1534 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1535 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1536 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1537 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1538
417e822d 1539 /*
1a2eb460 1540 * There are four kinds of DP registers:
417e822d
KP
1541 *
1542 * IBX PCH
1a2eb460
KP
1543 * SNB CPU
1544 * IVB CPU
417e822d
KP
1545 * CPT PCH
1546 *
1547 * IBX PCH and CPU are the same for almost everything,
1548 * except that the CPU DP PLL is configured in this
1549 * register
1550 *
1551 * CPT PCH is quite different, having many bits moved
1552 * to the TRANS_DP_CTL register instead. That
1553 * configuration happens (oddly) in ironlake_pch_enable
1554 */
9c9e7927 1555
417e822d
KP
1556 /* Preserve the BIOS-computed detected bit. This is
1557 * supposed to be read-only.
1558 */
1559 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1560
417e822d 1561 /* Handle DP bits in common between all three register formats */
417e822d 1562 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1563 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1564
6e3c9717 1565 if (crtc->config->has_audio)
ea5b213a 1566 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1567
417e822d 1568 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1569
39e5fa88 1570 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1571 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1572 intel_dp->DP |= DP_SYNC_HS_HIGH;
1573 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1574 intel_dp->DP |= DP_SYNC_VS_HIGH;
1575 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1576
6aba5b6c 1577 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1578 intel_dp->DP |= DP_ENHANCED_FRAMING;
1579
7c62a164 1580 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1581 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1582 u32 trans_dp;
1583
39e5fa88 1584 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1585
1586 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1587 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1588 trans_dp |= TRANS_DP_ENH_FRAMING;
1589 else
1590 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1591 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1592 } else {
b2634017 1593 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1594 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1595
1596 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1597 intel_dp->DP |= DP_SYNC_HS_HIGH;
1598 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1599 intel_dp->DP |= DP_SYNC_VS_HIGH;
1600 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1601
6aba5b6c 1602 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1603 intel_dp->DP |= DP_ENHANCED_FRAMING;
1604
39e5fa88 1605 if (IS_CHERRYVIEW(dev))
44f37d1f 1606 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1607 else if (crtc->pipe == PIPE_B)
1608 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1609 }
a4fc5ed6
KP
1610}
1611
ffd6749d
PZ
1612#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1613#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1614
1a5ef5b7
PZ
1615#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1616#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1617
ffd6749d
PZ
1618#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1619#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1620
4be73780 1621static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1622 u32 mask,
1623 u32 value)
bd943159 1624{
30add22d 1625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1626 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1627 u32 pp_stat_reg, pp_ctrl_reg;
1628
e39b999a
VS
1629 lockdep_assert_held(&dev_priv->pps_mutex);
1630
bf13e81b
JN
1631 pp_stat_reg = _pp_stat_reg(intel_dp);
1632 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1633
99ea7127 1634 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1635 mask, value,
1636 I915_READ(pp_stat_reg),
1637 I915_READ(pp_ctrl_reg));
32ce697c 1638
453c5420 1639 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1640 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1641 I915_READ(pp_stat_reg),
1642 I915_READ(pp_ctrl_reg));
32ce697c 1643 }
54c136d4
CW
1644
1645 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1646}
32ce697c 1647
4be73780 1648static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1649{
1650 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1651 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1652}
1653
4be73780 1654static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1655{
1656 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1657 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1658}
1659
4be73780 1660static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1661{
1662 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1663
1664 /* When we disable the VDD override bit last we have to do the manual
1665 * wait. */
1666 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1667 intel_dp->panel_power_cycle_delay);
1668
4be73780 1669 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1670}
1671
4be73780 1672static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1673{
1674 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1675 intel_dp->backlight_on_delay);
1676}
1677
4be73780 1678static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1679{
1680 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1681 intel_dp->backlight_off_delay);
1682}
99ea7127 1683
832dd3c1
KP
1684/* Read the current pp_control value, unlocking the register if it
1685 * is locked
1686 */
1687
453c5420 1688static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1689{
453c5420
JB
1690 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1691 struct drm_i915_private *dev_priv = dev->dev_private;
1692 u32 control;
832dd3c1 1693
e39b999a
VS
1694 lockdep_assert_held(&dev_priv->pps_mutex);
1695
bf13e81b 1696 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1697 control &= ~PANEL_UNLOCK_MASK;
1698 control |= PANEL_UNLOCK_REGS;
1699 return control;
bd943159
KP
1700}
1701
951468f3
VS
1702/*
1703 * Must be paired with edp_panel_vdd_off().
1704 * Must hold pps_mutex around the whole on/off sequence.
1705 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1706 */
1e0560e0 1707static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1708{
30add22d 1709 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1710 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1711 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1712 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1713 enum intel_display_power_domain power_domain;
5d613501 1714 u32 pp;
453c5420 1715 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1716 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1717
e39b999a
VS
1718 lockdep_assert_held(&dev_priv->pps_mutex);
1719
97af61f5 1720 if (!is_edp(intel_dp))
adddaaf4 1721 return false;
bd943159 1722
2c623c11 1723 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1724 intel_dp->want_panel_vdd = true;
99ea7127 1725
4be73780 1726 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1727 return need_to_disable;
b0665d57 1728
4e6e1a54
ID
1729 power_domain = intel_display_port_power_domain(intel_encoder);
1730 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1731
3936fcf4
VS
1732 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1733 port_name(intel_dig_port->port));
bd943159 1734
4be73780
DV
1735 if (!edp_have_panel_power(intel_dp))
1736 wait_panel_power_cycle(intel_dp);
99ea7127 1737
453c5420 1738 pp = ironlake_get_pp_control(intel_dp);
5d613501 1739 pp |= EDP_FORCE_VDD;
ebf33b18 1740
bf13e81b
JN
1741 pp_stat_reg = _pp_stat_reg(intel_dp);
1742 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1743
1744 I915_WRITE(pp_ctrl_reg, pp);
1745 POSTING_READ(pp_ctrl_reg);
1746 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1747 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1748 /*
1749 * If the panel wasn't on, delay before accessing aux channel
1750 */
4be73780 1751 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1752 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1753 port_name(intel_dig_port->port));
f01eca2e 1754 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1755 }
adddaaf4
JN
1756
1757 return need_to_disable;
1758}
1759
951468f3
VS
1760/*
1761 * Must be paired with intel_edp_panel_vdd_off() or
1762 * intel_edp_panel_off().
1763 * Nested calls to these functions are not allowed since
1764 * we drop the lock. Caller must use some higher level
1765 * locking to prevent nested calls from other threads.
1766 */
b80d6c78 1767void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1768{
c695b6b6 1769 bool vdd;
adddaaf4 1770
c695b6b6
VS
1771 if (!is_edp(intel_dp))
1772 return;
1773
773538e8 1774 pps_lock(intel_dp);
c695b6b6 1775 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1776 pps_unlock(intel_dp);
c695b6b6 1777
e2c719b7 1778 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1779 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1780}
1781
4be73780 1782static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1783{
30add22d 1784 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1785 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1786 struct intel_digital_port *intel_dig_port =
1787 dp_to_dig_port(intel_dp);
1788 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1789 enum intel_display_power_domain power_domain;
5d613501 1790 u32 pp;
453c5420 1791 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1792
e39b999a 1793 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1794
15e899a0 1795 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1796
15e899a0 1797 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1798 return;
b0665d57 1799
3936fcf4
VS
1800 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1801 port_name(intel_dig_port->port));
bd943159 1802
be2c9196
VS
1803 pp = ironlake_get_pp_control(intel_dp);
1804 pp &= ~EDP_FORCE_VDD;
453c5420 1805
be2c9196
VS
1806 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1807 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1808
be2c9196
VS
1809 I915_WRITE(pp_ctrl_reg, pp);
1810 POSTING_READ(pp_ctrl_reg);
90791a5c 1811
be2c9196
VS
1812 /* Make sure sequencer is idle before allowing subsequent activity */
1813 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1814 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1815
be2c9196
VS
1816 if ((pp & POWER_TARGET_ON) == 0)
1817 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1818
be2c9196
VS
1819 power_domain = intel_display_port_power_domain(intel_encoder);
1820 intel_display_power_put(dev_priv, power_domain);
bd943159 1821}
5d613501 1822
4be73780 1823static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1824{
1825 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1826 struct intel_dp, panel_vdd_work);
bd943159 1827
773538e8 1828 pps_lock(intel_dp);
15e899a0
VS
1829 if (!intel_dp->want_panel_vdd)
1830 edp_panel_vdd_off_sync(intel_dp);
773538e8 1831 pps_unlock(intel_dp);
bd943159
KP
1832}
1833
aba86890
ID
1834static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1835{
1836 unsigned long delay;
1837
1838 /*
1839 * Queue the timer to fire a long time from now (relative to the power
1840 * down delay) to keep the panel power up across a sequence of
1841 * operations.
1842 */
1843 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1844 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1845}
1846
951468f3
VS
1847/*
1848 * Must be paired with edp_panel_vdd_on().
1849 * Must hold pps_mutex around the whole on/off sequence.
1850 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1851 */
4be73780 1852static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1853{
e39b999a
VS
1854 struct drm_i915_private *dev_priv =
1855 intel_dp_to_dev(intel_dp)->dev_private;
1856
1857 lockdep_assert_held(&dev_priv->pps_mutex);
1858
97af61f5
KP
1859 if (!is_edp(intel_dp))
1860 return;
5d613501 1861
e2c719b7 1862 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1863 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1864
bd943159
KP
1865 intel_dp->want_panel_vdd = false;
1866
aba86890 1867 if (sync)
4be73780 1868 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1869 else
1870 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1871}
1872
9f0fb5be 1873static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1874{
30add22d 1875 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1876 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1877 u32 pp;
453c5420 1878 u32 pp_ctrl_reg;
9934c132 1879
9f0fb5be
VS
1880 lockdep_assert_held(&dev_priv->pps_mutex);
1881
97af61f5 1882 if (!is_edp(intel_dp))
bd943159 1883 return;
99ea7127 1884
3936fcf4
VS
1885 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1886 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1887
e7a89ace
VS
1888 if (WARN(edp_have_panel_power(intel_dp),
1889 "eDP port %c panel power already on\n",
1890 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1891 return;
9934c132 1892
4be73780 1893 wait_panel_power_cycle(intel_dp);
37c6c9b0 1894
bf13e81b 1895 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1896 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1897 if (IS_GEN5(dev)) {
1898 /* ILK workaround: disable reset around power sequence */
1899 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1900 I915_WRITE(pp_ctrl_reg, pp);
1901 POSTING_READ(pp_ctrl_reg);
05ce1a49 1902 }
37c6c9b0 1903
1c0ae80a 1904 pp |= POWER_TARGET_ON;
99ea7127
KP
1905 if (!IS_GEN5(dev))
1906 pp |= PANEL_POWER_RESET;
1907
453c5420
JB
1908 I915_WRITE(pp_ctrl_reg, pp);
1909 POSTING_READ(pp_ctrl_reg);
9934c132 1910
4be73780 1911 wait_panel_on(intel_dp);
dce56b3c 1912 intel_dp->last_power_on = jiffies;
9934c132 1913
05ce1a49
KP
1914 if (IS_GEN5(dev)) {
1915 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1916 I915_WRITE(pp_ctrl_reg, pp);
1917 POSTING_READ(pp_ctrl_reg);
05ce1a49 1918 }
9f0fb5be 1919}
e39b999a 1920
9f0fb5be
VS
1921void intel_edp_panel_on(struct intel_dp *intel_dp)
1922{
1923 if (!is_edp(intel_dp))
1924 return;
1925
1926 pps_lock(intel_dp);
1927 edp_panel_on(intel_dp);
773538e8 1928 pps_unlock(intel_dp);
9934c132
JB
1929}
1930
9f0fb5be
VS
1931
1932static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1933{
4e6e1a54
ID
1934 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1935 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1936 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1937 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1938 enum intel_display_power_domain power_domain;
99ea7127 1939 u32 pp;
453c5420 1940 u32 pp_ctrl_reg;
9934c132 1941
9f0fb5be
VS
1942 lockdep_assert_held(&dev_priv->pps_mutex);
1943
97af61f5
KP
1944 if (!is_edp(intel_dp))
1945 return;
37c6c9b0 1946
3936fcf4
VS
1947 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1948 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1949
3936fcf4
VS
1950 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1951 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1952
453c5420 1953 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1954 /* We need to switch off panel power _and_ force vdd, for otherwise some
1955 * panels get very unhappy and cease to work. */
b3064154
PJ
1956 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1957 EDP_BLC_ENABLE);
453c5420 1958
bf13e81b 1959 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1960
849e39f5
PZ
1961 intel_dp->want_panel_vdd = false;
1962
453c5420
JB
1963 I915_WRITE(pp_ctrl_reg, pp);
1964 POSTING_READ(pp_ctrl_reg);
9934c132 1965
dce56b3c 1966 intel_dp->last_power_cycle = jiffies;
4be73780 1967 wait_panel_off(intel_dp);
849e39f5
PZ
1968
1969 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1970 power_domain = intel_display_port_power_domain(intel_encoder);
1971 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1972}
e39b999a 1973
9f0fb5be
VS
1974void intel_edp_panel_off(struct intel_dp *intel_dp)
1975{
1976 if (!is_edp(intel_dp))
1977 return;
e39b999a 1978
9f0fb5be
VS
1979 pps_lock(intel_dp);
1980 edp_panel_off(intel_dp);
773538e8 1981 pps_unlock(intel_dp);
9934c132
JB
1982}
1983
1250d107
JN
1984/* Enable backlight in the panel power control. */
1985static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1986{
da63a9f2
PZ
1987 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1988 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1989 struct drm_i915_private *dev_priv = dev->dev_private;
1990 u32 pp;
453c5420 1991 u32 pp_ctrl_reg;
32f9d658 1992
01cb9ea6
JB
1993 /*
1994 * If we enable the backlight right away following a panel power
1995 * on, we may see slight flicker as the panel syncs with the eDP
1996 * link. So delay a bit to make sure the image is solid before
1997 * allowing it to appear.
1998 */
4be73780 1999 wait_backlight_on(intel_dp);
e39b999a 2000
773538e8 2001 pps_lock(intel_dp);
e39b999a 2002
453c5420 2003 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2004 pp |= EDP_BLC_ENABLE;
453c5420 2005
bf13e81b 2006 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2007
2008 I915_WRITE(pp_ctrl_reg, pp);
2009 POSTING_READ(pp_ctrl_reg);
e39b999a 2010
773538e8 2011 pps_unlock(intel_dp);
32f9d658
ZW
2012}
2013
1250d107
JN
2014/* Enable backlight PWM and backlight PP control. */
2015void intel_edp_backlight_on(struct intel_dp *intel_dp)
2016{
2017 if (!is_edp(intel_dp))
2018 return;
2019
2020 DRM_DEBUG_KMS("\n");
2021
2022 intel_panel_enable_backlight(intel_dp->attached_connector);
2023 _intel_edp_backlight_on(intel_dp);
2024}
2025
2026/* Disable backlight in the panel power control. */
2027static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2028{
30add22d 2029 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2030 struct drm_i915_private *dev_priv = dev->dev_private;
2031 u32 pp;
453c5420 2032 u32 pp_ctrl_reg;
32f9d658 2033
f01eca2e
KP
2034 if (!is_edp(intel_dp))
2035 return;
2036
773538e8 2037 pps_lock(intel_dp);
e39b999a 2038
453c5420 2039 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2040 pp &= ~EDP_BLC_ENABLE;
453c5420 2041
bf13e81b 2042 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2043
2044 I915_WRITE(pp_ctrl_reg, pp);
2045 POSTING_READ(pp_ctrl_reg);
f7d2323c 2046
773538e8 2047 pps_unlock(intel_dp);
e39b999a
VS
2048
2049 intel_dp->last_backlight_off = jiffies;
f7d2323c 2050 edp_wait_backlight_off(intel_dp);
1250d107 2051}
f7d2323c 2052
1250d107
JN
2053/* Disable backlight PP control and backlight PWM. */
2054void intel_edp_backlight_off(struct intel_dp *intel_dp)
2055{
2056 if (!is_edp(intel_dp))
2057 return;
2058
2059 DRM_DEBUG_KMS("\n");
f7d2323c 2060
1250d107 2061 _intel_edp_backlight_off(intel_dp);
f7d2323c 2062 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2063}
a4fc5ed6 2064
73580fb7
JN
2065/*
2066 * Hook for controlling the panel power control backlight through the bl_power
2067 * sysfs attribute. Take care to handle multiple calls.
2068 */
2069static void intel_edp_backlight_power(struct intel_connector *connector,
2070 bool enable)
2071{
2072 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2073 bool is_enabled;
2074
773538e8 2075 pps_lock(intel_dp);
e39b999a 2076 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2077 pps_unlock(intel_dp);
73580fb7
JN
2078
2079 if (is_enabled == enable)
2080 return;
2081
23ba9373
JN
2082 DRM_DEBUG_KMS("panel power control backlight %s\n",
2083 enable ? "enable" : "disable");
73580fb7
JN
2084
2085 if (enable)
2086 _intel_edp_backlight_on(intel_dp);
2087 else
2088 _intel_edp_backlight_off(intel_dp);
2089}
2090
2bd2ad64 2091static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2092{
da63a9f2
PZ
2093 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2094 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2095 struct drm_device *dev = crtc->dev;
d240f20f
JB
2096 struct drm_i915_private *dev_priv = dev->dev_private;
2097 u32 dpa_ctl;
2098
2bd2ad64
DV
2099 assert_pipe_disabled(dev_priv,
2100 to_intel_crtc(crtc)->pipe);
2101
d240f20f
JB
2102 DRM_DEBUG_KMS("\n");
2103 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2104 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2105 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2106
2107 /* We don't adjust intel_dp->DP while tearing down the link, to
2108 * facilitate link retraining (e.g. after hotplug). Hence clear all
2109 * enable bits here to ensure that we don't enable too much. */
2110 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2111 intel_dp->DP |= DP_PLL_ENABLE;
2112 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2113 POSTING_READ(DP_A);
2114 udelay(200);
d240f20f
JB
2115}
2116
2bd2ad64 2117static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2118{
da63a9f2
PZ
2119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2120 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2121 struct drm_device *dev = crtc->dev;
d240f20f
JB
2122 struct drm_i915_private *dev_priv = dev->dev_private;
2123 u32 dpa_ctl;
2124
2bd2ad64
DV
2125 assert_pipe_disabled(dev_priv,
2126 to_intel_crtc(crtc)->pipe);
2127
d240f20f 2128 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2129 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2130 "dp pll off, should be on\n");
2131 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2132
2133 /* We can't rely on the value tracked for the DP register in
2134 * intel_dp->DP because link_down must not change that (otherwise link
2135 * re-training will fail. */
298b0b39 2136 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2137 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2138 POSTING_READ(DP_A);
d240f20f
JB
2139 udelay(200);
2140}
2141
c7ad3810 2142/* If the sink supports it, try to set the power state appropriately */
c19b0669 2143void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2144{
2145 int ret, i;
2146
2147 /* Should have a valid DPCD by this point */
2148 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2149 return;
2150
2151 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2152 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2153 DP_SET_POWER_D3);
c7ad3810
JB
2154 } else {
2155 /*
2156 * When turning on, we need to retry for 1ms to give the sink
2157 * time to wake up.
2158 */
2159 for (i = 0; i < 3; i++) {
9d1a1031
JN
2160 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2161 DP_SET_POWER_D0);
c7ad3810
JB
2162 if (ret == 1)
2163 break;
2164 msleep(1);
2165 }
2166 }
f9cac721
JN
2167
2168 if (ret != 1)
2169 DRM_DEBUG_KMS("failed to %s sink power state\n",
2170 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2171}
2172
19d8fe15
DV
2173static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2174 enum pipe *pipe)
d240f20f 2175{
19d8fe15 2176 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2177 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2178 struct drm_device *dev = encoder->base.dev;
2179 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2180 enum intel_display_power_domain power_domain;
2181 u32 tmp;
2182
2183 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2184 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2185 return false;
2186
2187 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2188
2189 if (!(tmp & DP_PORT_EN))
2190 return false;
2191
39e5fa88 2192 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2193 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2194 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2195 enum pipe p;
19d8fe15 2196
adc289d7
VS
2197 for_each_pipe(dev_priv, p) {
2198 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2199 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2200 *pipe = p;
19d8fe15
DV
2201 return true;
2202 }
2203 }
19d8fe15 2204
4a0833ec
DV
2205 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2206 intel_dp->output_reg);
39e5fa88
VS
2207 } else if (IS_CHERRYVIEW(dev)) {
2208 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2209 } else {
2210 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2211 }
d240f20f 2212
19d8fe15
DV
2213 return true;
2214}
d240f20f 2215
045ac3b5 2216static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2217 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2218{
2219 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2220 u32 tmp, flags = 0;
63000ef6
XZ
2221 struct drm_device *dev = encoder->base.dev;
2222 struct drm_i915_private *dev_priv = dev->dev_private;
2223 enum port port = dp_to_dig_port(intel_dp)->port;
2224 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2225 int dotclock;
045ac3b5 2226
9ed109a7 2227 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2228
2229 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2230
39e5fa88
VS
2231 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2232 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2233 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2234 flags |= DRM_MODE_FLAG_PHSYNC;
2235 else
2236 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2237
39e5fa88 2238 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2239 flags |= DRM_MODE_FLAG_PVSYNC;
2240 else
2241 flags |= DRM_MODE_FLAG_NVSYNC;
2242 } else {
39e5fa88 2243 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2244 flags |= DRM_MODE_FLAG_PHSYNC;
2245 else
2246 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2247
39e5fa88 2248 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2249 flags |= DRM_MODE_FLAG_PVSYNC;
2250 else
2251 flags |= DRM_MODE_FLAG_NVSYNC;
2252 }
045ac3b5 2253
2d112de7 2254 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2255
8c875fca
VS
2256 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2257 tmp & DP_COLOR_RANGE_16_235)
2258 pipe_config->limited_color_range = true;
2259
eb14cb74
VS
2260 pipe_config->has_dp_encoder = true;
2261
2262 intel_dp_get_m_n(crtc, pipe_config);
2263
18442d08 2264 if (port == PORT_A) {
f1f644dc
JB
2265 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2266 pipe_config->port_clock = 162000;
2267 else
2268 pipe_config->port_clock = 270000;
2269 }
18442d08
VS
2270
2271 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2272 &pipe_config->dp_m_n);
2273
2274 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2275 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2276
2d112de7 2277 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2278
c6cd2ee2
JN
2279 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2280 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2281 /*
2282 * This is a big fat ugly hack.
2283 *
2284 * Some machines in UEFI boot mode provide us a VBT that has 18
2285 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2286 * unknown we fail to light up. Yet the same BIOS boots up with
2287 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2288 * max, not what it tells us to use.
2289 *
2290 * Note: This will still be broken if the eDP panel is not lit
2291 * up by the BIOS, and thus we can't get the mode at module
2292 * load.
2293 */
2294 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2295 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2296 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2297 }
045ac3b5
JB
2298}
2299
e8cb4558 2300static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2301{
e8cb4558 2302 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2303 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2304 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2305
6e3c9717 2306 if (crtc->config->has_audio)
495a5bb8 2307 intel_audio_codec_disable(encoder);
6cb49835 2308
b32c6f48
RV
2309 if (HAS_PSR(dev) && !HAS_DDI(dev))
2310 intel_psr_disable(intel_dp);
2311
6cb49835
DV
2312 /* Make sure the panel is off before trying to change the mode. But also
2313 * ensure that we have vdd while we switch off the panel. */
24f3e092 2314 intel_edp_panel_vdd_on(intel_dp);
4be73780 2315 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2316 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2317 intel_edp_panel_off(intel_dp);
3739850b 2318
08aff3fe
VS
2319 /* disable the port before the pipe on g4x */
2320 if (INTEL_INFO(dev)->gen < 5)
3739850b 2321 intel_dp_link_down(intel_dp);
d240f20f
JB
2322}
2323
08aff3fe 2324static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2325{
2bd2ad64 2326 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2327 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2328
49277c31 2329 intel_dp_link_down(intel_dp);
08aff3fe
VS
2330 if (port == PORT_A)
2331 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2332}
2333
2334static void vlv_post_disable_dp(struct intel_encoder *encoder)
2335{
2336 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2337
2338 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2339}
2340
580d3811
VS
2341static void chv_post_disable_dp(struct intel_encoder *encoder)
2342{
2343 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2344 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2345 struct drm_device *dev = encoder->base.dev;
2346 struct drm_i915_private *dev_priv = dev->dev_private;
2347 struct intel_crtc *intel_crtc =
2348 to_intel_crtc(encoder->base.crtc);
2349 enum dpio_channel ch = vlv_dport_to_channel(dport);
2350 enum pipe pipe = intel_crtc->pipe;
2351 u32 val;
2352
2353 intel_dp_link_down(intel_dp);
2354
a580516d 2355 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2356
2357 /* Propagate soft reset to data lane reset */
97fd4d5c 2358 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2359 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2360 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2361
97fd4d5c
VS
2362 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2363 val |= CHV_PCS_REQ_SOFTRESET_EN;
2364 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2365
2366 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2367 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2368 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2369
2370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2371 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2372 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811 2373
a580516d 2374 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2375}
2376
7b13b58a
VS
2377static void
2378_intel_dp_set_link_train(struct intel_dp *intel_dp,
2379 uint32_t *DP,
2380 uint8_t dp_train_pat)
2381{
2382 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2383 struct drm_device *dev = intel_dig_port->base.base.dev;
2384 struct drm_i915_private *dev_priv = dev->dev_private;
2385 enum port port = intel_dig_port->port;
2386
2387 if (HAS_DDI(dev)) {
2388 uint32_t temp = I915_READ(DP_TP_CTL(port));
2389
2390 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2391 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2392 else
2393 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2394
2395 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2396 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2397 case DP_TRAINING_PATTERN_DISABLE:
2398 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2399
2400 break;
2401 case DP_TRAINING_PATTERN_1:
2402 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2403 break;
2404 case DP_TRAINING_PATTERN_2:
2405 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2406 break;
2407 case DP_TRAINING_PATTERN_3:
2408 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2409 break;
2410 }
2411 I915_WRITE(DP_TP_CTL(port), temp);
2412
39e5fa88
VS
2413 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2414 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2415 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2416
2417 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2418 case DP_TRAINING_PATTERN_DISABLE:
2419 *DP |= DP_LINK_TRAIN_OFF_CPT;
2420 break;
2421 case DP_TRAINING_PATTERN_1:
2422 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2423 break;
2424 case DP_TRAINING_PATTERN_2:
2425 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2426 break;
2427 case DP_TRAINING_PATTERN_3:
2428 DRM_ERROR("DP training pattern 3 not supported\n");
2429 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2430 break;
2431 }
2432
2433 } else {
2434 if (IS_CHERRYVIEW(dev))
2435 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2436 else
2437 *DP &= ~DP_LINK_TRAIN_MASK;
2438
2439 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2440 case DP_TRAINING_PATTERN_DISABLE:
2441 *DP |= DP_LINK_TRAIN_OFF;
2442 break;
2443 case DP_TRAINING_PATTERN_1:
2444 *DP |= DP_LINK_TRAIN_PAT_1;
2445 break;
2446 case DP_TRAINING_PATTERN_2:
2447 *DP |= DP_LINK_TRAIN_PAT_2;
2448 break;
2449 case DP_TRAINING_PATTERN_3:
2450 if (IS_CHERRYVIEW(dev)) {
2451 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2452 } else {
2453 DRM_ERROR("DP training pattern 3 not supported\n");
2454 *DP |= DP_LINK_TRAIN_PAT_2;
2455 }
2456 break;
2457 }
2458 }
2459}
2460
2461static void intel_dp_enable_port(struct intel_dp *intel_dp)
2462{
2463 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2464 struct drm_i915_private *dev_priv = dev->dev_private;
2465
7b13b58a
VS
2466 /* enable with pattern 1 (as per spec) */
2467 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2468 DP_TRAINING_PATTERN_1);
2469
2470 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2471 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2472
2473 /*
2474 * Magic for VLV/CHV. We _must_ first set up the register
2475 * without actually enabling the port, and then do another
2476 * write to enable the port. Otherwise link training will
2477 * fail when the power sequencer is freshly used for this port.
2478 */
2479 intel_dp->DP |= DP_PORT_EN;
2480
2481 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2482 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2483}
2484
e8cb4558 2485static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2486{
e8cb4558
DV
2487 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2488 struct drm_device *dev = encoder->base.dev;
2489 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2490 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2491 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2492 unsigned int lane_mask = 0x0;
5d613501 2493
0c33d8d7
DV
2494 if (WARN_ON(dp_reg & DP_PORT_EN))
2495 return;
5d613501 2496
093e3f13
VS
2497 pps_lock(intel_dp);
2498
2499 if (IS_VALLEYVIEW(dev))
2500 vlv_init_panel_power_sequencer(intel_dp);
2501
7b13b58a 2502 intel_dp_enable_port(intel_dp);
093e3f13
VS
2503
2504 edp_panel_vdd_on(intel_dp);
2505 edp_panel_on(intel_dp);
2506 edp_panel_vdd_off(intel_dp, true);
2507
2508 pps_unlock(intel_dp);
2509
61234fa5 2510 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2511 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2512 lane_mask);
61234fa5 2513
f01eca2e 2514 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2515 intel_dp_start_link_train(intel_dp);
33a34e4e 2516 intel_dp_complete_link_train(intel_dp);
3ab9c637 2517 intel_dp_stop_link_train(intel_dp);
c1dec79a 2518
6e3c9717 2519 if (crtc->config->has_audio) {
c1dec79a
JN
2520 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2521 pipe_name(crtc->pipe));
2522 intel_audio_codec_enable(encoder);
2523 }
ab1f90f9 2524}
89b667f8 2525
ecff4f3b
JN
2526static void g4x_enable_dp(struct intel_encoder *encoder)
2527{
828f5c6e
JN
2528 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2529
ecff4f3b 2530 intel_enable_dp(encoder);
4be73780 2531 intel_edp_backlight_on(intel_dp);
ab1f90f9 2532}
89b667f8 2533
ab1f90f9
JN
2534static void vlv_enable_dp(struct intel_encoder *encoder)
2535{
828f5c6e
JN
2536 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2537
4be73780 2538 intel_edp_backlight_on(intel_dp);
b32c6f48 2539 intel_psr_enable(intel_dp);
d240f20f
JB
2540}
2541
ecff4f3b 2542static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2543{
2544 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2545 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2546
8ac33ed3
DV
2547 intel_dp_prepare(encoder);
2548
d41f1efb
DV
2549 /* Only ilk+ has port A */
2550 if (dport->port == PORT_A) {
2551 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2552 ironlake_edp_pll_on(intel_dp);
d41f1efb 2553 }
ab1f90f9
JN
2554}
2555
83b84597
VS
2556static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2557{
2558 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2559 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2560 enum pipe pipe = intel_dp->pps_pipe;
2561 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2562
2563 edp_panel_vdd_off_sync(intel_dp);
2564
2565 /*
2566 * VLV seems to get confused when multiple power seqeuencers
2567 * have the same port selected (even if only one has power/vdd
2568 * enabled). The failure manifests as vlv_wait_port_ready() failing
2569 * CHV on the other hand doesn't seem to mind having the same port
2570 * selected in multiple power seqeuencers, but let's clear the
2571 * port select always when logically disconnecting a power sequencer
2572 * from a port.
2573 */
2574 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2575 pipe_name(pipe), port_name(intel_dig_port->port));
2576 I915_WRITE(pp_on_reg, 0);
2577 POSTING_READ(pp_on_reg);
2578
2579 intel_dp->pps_pipe = INVALID_PIPE;
2580}
2581
a4a5d2f8
VS
2582static void vlv_steal_power_sequencer(struct drm_device *dev,
2583 enum pipe pipe)
2584{
2585 struct drm_i915_private *dev_priv = dev->dev_private;
2586 struct intel_encoder *encoder;
2587
2588 lockdep_assert_held(&dev_priv->pps_mutex);
2589
ac3c12e4
VS
2590 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2591 return;
2592
a4a5d2f8
VS
2593 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2594 base.head) {
2595 struct intel_dp *intel_dp;
773538e8 2596 enum port port;
a4a5d2f8
VS
2597
2598 if (encoder->type != INTEL_OUTPUT_EDP)
2599 continue;
2600
2601 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2602 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2603
2604 if (intel_dp->pps_pipe != pipe)
2605 continue;
2606
2607 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2608 pipe_name(pipe), port_name(port));
a4a5d2f8 2609
034e43c6
VS
2610 WARN(encoder->connectors_active,
2611 "stealing pipe %c power sequencer from active eDP port %c\n",
2612 pipe_name(pipe), port_name(port));
a4a5d2f8 2613
a4a5d2f8 2614 /* make sure vdd is off before we steal it */
83b84597 2615 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2616 }
2617}
2618
2619static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2620{
2621 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2622 struct intel_encoder *encoder = &intel_dig_port->base;
2623 struct drm_device *dev = encoder->base.dev;
2624 struct drm_i915_private *dev_priv = dev->dev_private;
2625 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2626
2627 lockdep_assert_held(&dev_priv->pps_mutex);
2628
093e3f13
VS
2629 if (!is_edp(intel_dp))
2630 return;
2631
a4a5d2f8
VS
2632 if (intel_dp->pps_pipe == crtc->pipe)
2633 return;
2634
2635 /*
2636 * If another power sequencer was being used on this
2637 * port previously make sure to turn off vdd there while
2638 * we still have control of it.
2639 */
2640 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2641 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2642
2643 /*
2644 * We may be stealing the power
2645 * sequencer from another port.
2646 */
2647 vlv_steal_power_sequencer(dev, crtc->pipe);
2648
2649 /* now it's all ours */
2650 intel_dp->pps_pipe = crtc->pipe;
2651
2652 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2653 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2654
2655 /* init power sequencer on this pipe and port */
36b5f425
VS
2656 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2657 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2658}
2659
ab1f90f9 2660static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2661{
2bd2ad64 2662 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2663 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2664 struct drm_device *dev = encoder->base.dev;
89b667f8 2665 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2666 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2667 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2668 int pipe = intel_crtc->pipe;
2669 u32 val;
a4fc5ed6 2670
a580516d 2671 mutex_lock(&dev_priv->sb_lock);
89b667f8 2672
ab3c759a 2673 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2674 val = 0;
2675 if (pipe)
2676 val |= (1<<21);
2677 else
2678 val &= ~(1<<21);
2679 val |= 0x001000c4;
ab3c759a
CML
2680 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2681 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2682 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2683
a580516d 2684 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2685
2686 intel_enable_dp(encoder);
89b667f8
JB
2687}
2688
ecff4f3b 2689static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2690{
2691 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2692 struct drm_device *dev = encoder->base.dev;
2693 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2694 struct intel_crtc *intel_crtc =
2695 to_intel_crtc(encoder->base.crtc);
e4607fcf 2696 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2697 int pipe = intel_crtc->pipe;
89b667f8 2698
8ac33ed3
DV
2699 intel_dp_prepare(encoder);
2700
89b667f8 2701 /* Program Tx lane resets to default */
a580516d 2702 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2703 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2704 DPIO_PCS_TX_LANE2_RESET |
2705 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2706 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2707 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2708 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2709 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2710 DPIO_PCS_CLK_SOFT_RESET);
2711
2712 /* Fix up inter-pair skew failure */
ab3c759a
CML
2713 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2714 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2715 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2716 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2717}
2718
e4a1d846
CML
2719static void chv_pre_enable_dp(struct intel_encoder *encoder)
2720{
2721 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2722 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2723 struct drm_device *dev = encoder->base.dev;
2724 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2725 struct intel_crtc *intel_crtc =
2726 to_intel_crtc(encoder->base.crtc);
2727 enum dpio_channel ch = vlv_dport_to_channel(dport);
2728 int pipe = intel_crtc->pipe;
2e523e98 2729 int data, i, stagger;
949c1d43 2730 u32 val;
e4a1d846 2731
a580516d 2732 mutex_lock(&dev_priv->sb_lock);
949c1d43 2733
570e2a74
VS
2734 /* allow hardware to manage TX FIFO reset source */
2735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2736 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2737 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2738
2739 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2740 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2742
949c1d43 2743 /* Deassert soft data lane reset*/
97fd4d5c 2744 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2745 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2746 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2747
2748 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2749 val |= CHV_PCS_REQ_SOFTRESET_EN;
2750 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2751
2752 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2753 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2754 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2755
97fd4d5c 2756 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2757 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2758 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2759
2760 /* Program Tx lane latency optimal setting*/
e4a1d846 2761 for (i = 0; i < 4; i++) {
e4a1d846
CML
2762 /* Set the upar bit */
2763 data = (i == 1) ? 0x0 : 0x1;
2764 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2765 data << DPIO_UPAR_SHIFT);
2766 }
2767
2768 /* Data lane stagger programming */
2e523e98
VS
2769 if (intel_crtc->config->port_clock > 270000)
2770 stagger = 0x18;
2771 else if (intel_crtc->config->port_clock > 135000)
2772 stagger = 0xd;
2773 else if (intel_crtc->config->port_clock > 67500)
2774 stagger = 0x7;
2775 else if (intel_crtc->config->port_clock > 33750)
2776 stagger = 0x4;
2777 else
2778 stagger = 0x2;
2779
2780 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2781 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2782 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2783
2784 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2785 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2786 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
e4a1d846 2787
2e523e98
VS
2788 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2789 DPIO_LANESTAGGER_STRAP(stagger) |
2790 DPIO_LANESTAGGER_STRAP_OVRD |
2791 DPIO_TX1_STAGGER_MASK(0x1f) |
2792 DPIO_TX1_STAGGER_MULT(6) |
2793 DPIO_TX2_STAGGER_MULT(0));
2794
2795 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2796 DPIO_LANESTAGGER_STRAP(stagger) |
2797 DPIO_LANESTAGGER_STRAP_OVRD |
2798 DPIO_TX1_STAGGER_MASK(0x1f) |
2799 DPIO_TX1_STAGGER_MULT(7) |
2800 DPIO_TX2_STAGGER_MULT(5));
e4a1d846 2801
a580516d 2802 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2803
e4a1d846 2804 intel_enable_dp(encoder);
e4a1d846
CML
2805}
2806
9197c88b
VS
2807static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2808{
2809 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2810 struct drm_device *dev = encoder->base.dev;
2811 struct drm_i915_private *dev_priv = dev->dev_private;
2812 struct intel_crtc *intel_crtc =
2813 to_intel_crtc(encoder->base.crtc);
2814 enum dpio_channel ch = vlv_dport_to_channel(dport);
2815 enum pipe pipe = intel_crtc->pipe;
2816 u32 val;
2817
625695f8
VS
2818 intel_dp_prepare(encoder);
2819
a580516d 2820 mutex_lock(&dev_priv->sb_lock);
9197c88b 2821
b9e5ac3c
VS
2822 /* program left/right clock distribution */
2823 if (pipe != PIPE_B) {
2824 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2825 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2826 if (ch == DPIO_CH0)
2827 val |= CHV_BUFLEFTENA1_FORCE;
2828 if (ch == DPIO_CH1)
2829 val |= CHV_BUFRIGHTENA1_FORCE;
2830 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2831 } else {
2832 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2833 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2834 if (ch == DPIO_CH0)
2835 val |= CHV_BUFLEFTENA2_FORCE;
2836 if (ch == DPIO_CH1)
2837 val |= CHV_BUFRIGHTENA2_FORCE;
2838 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2839 }
2840
9197c88b
VS
2841 /* program clock channel usage */
2842 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2843 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2844 if (pipe != PIPE_B)
2845 val &= ~CHV_PCS_USEDCLKCHANNEL;
2846 else
2847 val |= CHV_PCS_USEDCLKCHANNEL;
2848 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2849
2850 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2851 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2852 if (pipe != PIPE_B)
2853 val &= ~CHV_PCS_USEDCLKCHANNEL;
2854 else
2855 val |= CHV_PCS_USEDCLKCHANNEL;
2856 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2857
2858 /*
2859 * This a a bit weird since generally CL
2860 * matches the pipe, but here we need to
2861 * pick the CL based on the port.
2862 */
2863 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2864 if (pipe != PIPE_B)
2865 val &= ~CHV_CMN_USEDCLKCHANNEL;
2866 else
2867 val |= CHV_CMN_USEDCLKCHANNEL;
2868 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2869
a580516d 2870 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2871}
2872
a4fc5ed6 2873/*
df0c237d
JB
2874 * Native read with retry for link status and receiver capability reads for
2875 * cases where the sink may still be asleep.
9d1a1031
JN
2876 *
2877 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2878 * supposed to retry 3 times per the spec.
a4fc5ed6 2879 */
9d1a1031
JN
2880static ssize_t
2881intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2882 void *buffer, size_t size)
a4fc5ed6 2883{
9d1a1031
JN
2884 ssize_t ret;
2885 int i;
61da5fab 2886
f6a19066
VS
2887 /*
2888 * Sometime we just get the same incorrect byte repeated
2889 * over the entire buffer. Doing just one throw away read
2890 * initially seems to "solve" it.
2891 */
2892 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2893
61da5fab 2894 for (i = 0; i < 3; i++) {
9d1a1031
JN
2895 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2896 if (ret == size)
2897 return ret;
61da5fab
JB
2898 msleep(1);
2899 }
a4fc5ed6 2900
9d1a1031 2901 return ret;
a4fc5ed6
KP
2902}
2903
2904/*
2905 * Fetch AUX CH registers 0x202 - 0x207 which contain
2906 * link status information
2907 */
2908static bool
93f62dad 2909intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2910{
9d1a1031
JN
2911 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2912 DP_LANE0_1_STATUS,
2913 link_status,
2914 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2915}
2916
1100244e 2917/* These are source-specific values. */
a4fc5ed6 2918static uint8_t
1a2eb460 2919intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2920{
30add22d 2921 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2922 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2923 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2924
9314726b
VK
2925 if (IS_BROXTON(dev))
2926 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2927 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 2928 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 2929 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2930 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2931 } else if (IS_VALLEYVIEW(dev))
bd60018a 2932 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2933 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2934 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2935 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2936 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2937 else
bd60018a 2938 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2939}
2940
2941static uint8_t
2942intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2943{
30add22d 2944 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2945 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2946
5a9d1f1a
DL
2947 if (INTEL_INFO(dev)->gen >= 9) {
2948 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2949 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2950 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2952 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2953 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2954 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2956 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2957 default:
2958 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2959 }
2960 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2961 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2962 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2963 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2964 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2965 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2966 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2967 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2968 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2969 default:
bd60018a 2970 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2971 }
e2fa6fba
P
2972 } else if (IS_VALLEYVIEW(dev)) {
2973 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2974 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2975 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2977 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2978 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2979 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2981 default:
bd60018a 2982 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2983 }
bc7d38a4 2984 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2985 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2986 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2987 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2989 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2990 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2991 default:
bd60018a 2992 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2993 }
2994 } else {
2995 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2996 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2997 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2998 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2999 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3000 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3001 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3002 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3003 default:
bd60018a 3004 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3005 }
a4fc5ed6
KP
3006 }
3007}
3008
5829975c 3009static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3010{
3011 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3012 struct drm_i915_private *dev_priv = dev->dev_private;
3013 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3014 struct intel_crtc *intel_crtc =
3015 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3016 unsigned long demph_reg_value, preemph_reg_value,
3017 uniqtranscale_reg_value;
3018 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3019 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3020 int pipe = intel_crtc->pipe;
e2fa6fba
P
3021
3022 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3023 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3024 preemph_reg_value = 0x0004000;
3025 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3027 demph_reg_value = 0x2B405555;
3028 uniqtranscale_reg_value = 0x552AB83A;
3029 break;
bd60018a 3030 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3031 demph_reg_value = 0x2B404040;
3032 uniqtranscale_reg_value = 0x5548B83A;
3033 break;
bd60018a 3034 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3035 demph_reg_value = 0x2B245555;
3036 uniqtranscale_reg_value = 0x5560B83A;
3037 break;
bd60018a 3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3039 demph_reg_value = 0x2B405555;
3040 uniqtranscale_reg_value = 0x5598DA3A;
3041 break;
3042 default:
3043 return 0;
3044 }
3045 break;
bd60018a 3046 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3047 preemph_reg_value = 0x0002000;
3048 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3050 demph_reg_value = 0x2B404040;
3051 uniqtranscale_reg_value = 0x5552B83A;
3052 break;
bd60018a 3053 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3054 demph_reg_value = 0x2B404848;
3055 uniqtranscale_reg_value = 0x5580B83A;
3056 break;
bd60018a 3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3058 demph_reg_value = 0x2B404040;
3059 uniqtranscale_reg_value = 0x55ADDA3A;
3060 break;
3061 default:
3062 return 0;
3063 }
3064 break;
bd60018a 3065 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3066 preemph_reg_value = 0x0000000;
3067 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3068 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3069 demph_reg_value = 0x2B305555;
3070 uniqtranscale_reg_value = 0x5570B83A;
3071 break;
bd60018a 3072 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3073 demph_reg_value = 0x2B2B4040;
3074 uniqtranscale_reg_value = 0x55ADDA3A;
3075 break;
3076 default:
3077 return 0;
3078 }
3079 break;
bd60018a 3080 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3081 preemph_reg_value = 0x0006000;
3082 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3084 demph_reg_value = 0x1B405555;
3085 uniqtranscale_reg_value = 0x55ADDA3A;
3086 break;
3087 default:
3088 return 0;
3089 }
3090 break;
3091 default:
3092 return 0;
3093 }
3094
a580516d 3095 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3096 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3097 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3098 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3099 uniqtranscale_reg_value);
ab3c759a
CML
3100 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3101 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3102 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3103 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3104 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3105
3106 return 0;
3107}
3108
5829975c 3109static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3110{
3111 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3112 struct drm_i915_private *dev_priv = dev->dev_private;
3113 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3114 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3115 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3116 uint8_t train_set = intel_dp->train_set[0];
3117 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3118 enum pipe pipe = intel_crtc->pipe;
3119 int i;
e4a1d846
CML
3120
3121 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3122 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3123 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3125 deemph_reg_value = 128;
3126 margin_reg_value = 52;
3127 break;
bd60018a 3128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3129 deemph_reg_value = 128;
3130 margin_reg_value = 77;
3131 break;
bd60018a 3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3133 deemph_reg_value = 128;
3134 margin_reg_value = 102;
3135 break;
bd60018a 3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3137 deemph_reg_value = 128;
3138 margin_reg_value = 154;
3139 /* FIXME extra to set for 1200 */
3140 break;
3141 default:
3142 return 0;
3143 }
3144 break;
bd60018a 3145 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3146 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3148 deemph_reg_value = 85;
3149 margin_reg_value = 78;
3150 break;
bd60018a 3151 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3152 deemph_reg_value = 85;
3153 margin_reg_value = 116;
3154 break;
bd60018a 3155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3156 deemph_reg_value = 85;
3157 margin_reg_value = 154;
3158 break;
3159 default:
3160 return 0;
3161 }
3162 break;
bd60018a 3163 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3164 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3165 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3166 deemph_reg_value = 64;
3167 margin_reg_value = 104;
3168 break;
bd60018a 3169 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3170 deemph_reg_value = 64;
3171 margin_reg_value = 154;
3172 break;
3173 default:
3174 return 0;
3175 }
3176 break;
bd60018a 3177 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3178 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3180 deemph_reg_value = 43;
3181 margin_reg_value = 154;
3182 break;
3183 default:
3184 return 0;
3185 }
3186 break;
3187 default:
3188 return 0;
3189 }
3190
a580516d 3191 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3192
3193 /* Clear calc init */
1966e59e
VS
3194 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3195 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3196 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3197 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3198 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3199
3200 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3201 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3202 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3203 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3204 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3205
a02ef3c7
VS
3206 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3207 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3208 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3209 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3210
3211 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3212 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3213 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3214 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3215
e4a1d846 3216 /* Program swing deemph */
f72df8db
VS
3217 for (i = 0; i < 4; i++) {
3218 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3219 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3220 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3221 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3222 }
e4a1d846
CML
3223
3224 /* Program swing margin */
f72df8db
VS
3225 for (i = 0; i < 4; i++) {
3226 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3227 val &= ~DPIO_SWING_MARGIN000_MASK;
3228 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3229 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3230 }
e4a1d846
CML
3231
3232 /* Disable unique transition scale */
f72df8db
VS
3233 for (i = 0; i < 4; i++) {
3234 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3235 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3236 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3237 }
e4a1d846
CML
3238
3239 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3240 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3241 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3242 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3243
3244 /*
3245 * The document said it needs to set bit 27 for ch0 and bit 26
3246 * for ch1. Might be a typo in the doc.
3247 * For now, for this unique transition scale selection, set bit
3248 * 27 for ch0 and ch1.
3249 */
f72df8db
VS
3250 for (i = 0; i < 4; i++) {
3251 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3252 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3253 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3254 }
e4a1d846 3255
f72df8db
VS
3256 for (i = 0; i < 4; i++) {
3257 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3258 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3259 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3260 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3261 }
e4a1d846
CML
3262 }
3263
3264 /* Start swing calculation */
1966e59e
VS
3265 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3266 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3267 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3268
3269 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3270 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3271 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3272
3273 /* LRC Bypass */
3274 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3275 val |= DPIO_LRC_BYPASS;
3276 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3277
a580516d 3278 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3279
3280 return 0;
3281}
3282
a4fc5ed6 3283static void
0301b3ac
JN
3284intel_get_adjust_train(struct intel_dp *intel_dp,
3285 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3286{
3287 uint8_t v = 0;
3288 uint8_t p = 0;
3289 int lane;
1a2eb460
KP
3290 uint8_t voltage_max;
3291 uint8_t preemph_max;
a4fc5ed6 3292
33a34e4e 3293 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3294 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3295 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3296
3297 if (this_v > v)
3298 v = this_v;
3299 if (this_p > p)
3300 p = this_p;
3301 }
3302
1a2eb460 3303 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3304 if (v >= voltage_max)
3305 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3306
1a2eb460
KP
3307 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3308 if (p >= preemph_max)
3309 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3310
3311 for (lane = 0; lane < 4; lane++)
33a34e4e 3312 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3313}
3314
3315static uint32_t
5829975c 3316gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3317{
3cf2efb1 3318 uint32_t signal_levels = 0;
a4fc5ed6 3319
3cf2efb1 3320 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3322 default:
3323 signal_levels |= DP_VOLTAGE_0_4;
3324 break;
bd60018a 3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3326 signal_levels |= DP_VOLTAGE_0_6;
3327 break;
bd60018a 3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3329 signal_levels |= DP_VOLTAGE_0_8;
3330 break;
bd60018a 3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3332 signal_levels |= DP_VOLTAGE_1_2;
3333 break;
3334 }
3cf2efb1 3335 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3336 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3337 default:
3338 signal_levels |= DP_PRE_EMPHASIS_0;
3339 break;
bd60018a 3340 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3341 signal_levels |= DP_PRE_EMPHASIS_3_5;
3342 break;
bd60018a 3343 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3344 signal_levels |= DP_PRE_EMPHASIS_6;
3345 break;
bd60018a 3346 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3347 signal_levels |= DP_PRE_EMPHASIS_9_5;
3348 break;
3349 }
3350 return signal_levels;
3351}
3352
e3421a18
ZW
3353/* Gen6's DP voltage swing and pre-emphasis control */
3354static uint32_t
5829975c 3355gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3356{
3c5a62b5
YL
3357 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3358 DP_TRAIN_PRE_EMPHASIS_MASK);
3359 switch (signal_levels) {
bd60018a
SJ
3360 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3361 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3362 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3364 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3367 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3370 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3373 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3374 default:
3c5a62b5
YL
3375 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3376 "0x%x\n", signal_levels);
3377 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3378 }
3379}
3380
1a2eb460
KP
3381/* Gen7's DP voltage swing and pre-emphasis control */
3382static uint32_t
5829975c 3383gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3384{
3385 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3386 DP_TRAIN_PRE_EMPHASIS_MASK);
3387 switch (signal_levels) {
bd60018a 3388 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3389 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3391 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3392 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3393 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3394
bd60018a 3395 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3396 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3397 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3398 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3399
bd60018a 3400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3401 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3402 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3403 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3404
3405 default:
3406 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3407 "0x%x\n", signal_levels);
3408 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3409 }
3410}
3411
d6c0d722
PZ
3412/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3413static uint32_t
5829975c 3414hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3415{
d6c0d722
PZ
3416 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3417 DP_TRAIN_PRE_EMPHASIS_MASK);
3418 switch (signal_levels) {
bd60018a 3419 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3420 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3422 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3423 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3424 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3426 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3427
bd60018a 3428 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3429 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3431 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3433 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3434
bd60018a 3435 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3436 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3437 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3438 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3439
3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3441 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3442 default:
3443 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3444 "0x%x\n", signal_levels);
c5fe6a06 3445 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3446 }
a4fc5ed6
KP
3447}
3448
5829975c 3449static void bxt_signal_levels(struct intel_dp *intel_dp)
96fb9f9b
VK
3450{
3451 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3452 enum port port = dport->port;
3453 struct drm_device *dev = dport->base.base.dev;
3454 struct intel_encoder *encoder = &dport->base;
3455 uint8_t train_set = intel_dp->train_set[0];
3456 uint32_t level = 0;
3457
3458 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3459 DP_TRAIN_PRE_EMPHASIS_MASK);
3460 switch (signal_levels) {
3461 default:
3462 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3464 level = 0;
3465 break;
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3467 level = 1;
3468 break;
3469 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3470 level = 2;
3471 break;
3472 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3473 level = 3;
3474 break;
3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3476 level = 4;
3477 break;
3478 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3479 level = 5;
3480 break;
3481 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3482 level = 6;
3483 break;
3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3485 level = 7;
3486 break;
3487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3488 level = 8;
3489 break;
3490 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3491 level = 9;
3492 break;
3493 }
3494
3495 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3496}
3497
f0a3424e
PZ
3498/* Properly updates "DP" with the correct signal levels. */
3499static void
3500intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3501{
3502 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3503 enum port port = intel_dig_port->port;
f0a3424e
PZ
3504 struct drm_device *dev = intel_dig_port->base.base.dev;
3505 uint32_t signal_levels, mask;
3506 uint8_t train_set = intel_dp->train_set[0];
3507
96fb9f9b
VK
3508 if (IS_BROXTON(dev)) {
3509 signal_levels = 0;
5829975c 3510 bxt_signal_levels(intel_dp);
96fb9f9b
VK
3511 mask = 0;
3512 } else if (HAS_DDI(dev)) {
5829975c 3513 signal_levels = hsw_signal_levels(train_set);
f0a3424e 3514 mask = DDI_BUF_EMP_MASK;
e4a1d846 3515 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3516 signal_levels = chv_signal_levels(intel_dp);
e4a1d846 3517 mask = 0;
e2fa6fba 3518 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3519 signal_levels = vlv_signal_levels(intel_dp);
e2fa6fba 3520 mask = 0;
bc7d38a4 3521 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3522 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3523 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3524 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3525 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3526 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3527 } else {
5829975c 3528 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3529 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3530 }
3531
96fb9f9b
VK
3532 if (mask)
3533 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3534
3535 DRM_DEBUG_KMS("Using vswing level %d\n",
3536 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3537 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3538 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3539 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3540
3541 *DP = (*DP & ~mask) | signal_levels;
3542}
3543
a4fc5ed6 3544static bool
ea5b213a 3545intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3546 uint32_t *DP,
58e10eb9 3547 uint8_t dp_train_pat)
a4fc5ed6 3548{
174edf1f
PZ
3549 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3550 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3551 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3552 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3553 int ret, len;
a4fc5ed6 3554
7b13b58a 3555 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3556
70aff66c 3557 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3558 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3559
2cdfe6c8
JN
3560 buf[0] = dp_train_pat;
3561 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3562 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3563 /* don't write DP_TRAINING_LANEx_SET on disable */
3564 len = 1;
3565 } else {
3566 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3567 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3568 len = intel_dp->lane_count + 1;
47ea7542 3569 }
a4fc5ed6 3570
9d1a1031
JN
3571 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3572 buf, len);
2cdfe6c8
JN
3573
3574 return ret == len;
a4fc5ed6
KP
3575}
3576
70aff66c
JN
3577static bool
3578intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3579 uint8_t dp_train_pat)
3580{
4e96c977
MK
3581 if (!intel_dp->train_set_valid)
3582 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3583 intel_dp_set_signal_levels(intel_dp, DP);
3584 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3585}
3586
3587static bool
3588intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3589 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3590{
3591 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3592 struct drm_device *dev = intel_dig_port->base.base.dev;
3593 struct drm_i915_private *dev_priv = dev->dev_private;
3594 int ret;
3595
3596 intel_get_adjust_train(intel_dp, link_status);
3597 intel_dp_set_signal_levels(intel_dp, DP);
3598
3599 I915_WRITE(intel_dp->output_reg, *DP);
3600 POSTING_READ(intel_dp->output_reg);
3601
9d1a1031
JN
3602 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3603 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3604
3605 return ret == intel_dp->lane_count;
3606}
3607
3ab9c637
ID
3608static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3609{
3610 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3611 struct drm_device *dev = intel_dig_port->base.base.dev;
3612 struct drm_i915_private *dev_priv = dev->dev_private;
3613 enum port port = intel_dig_port->port;
3614 uint32_t val;
3615
3616 if (!HAS_DDI(dev))
3617 return;
3618
3619 val = I915_READ(DP_TP_CTL(port));
3620 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3621 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3622 I915_WRITE(DP_TP_CTL(port), val);
3623
3624 /*
3625 * On PORT_A we can have only eDP in SST mode. There the only reason
3626 * we need to set idle transmission mode is to work around a HW issue
3627 * where we enable the pipe while not in idle link-training mode.
3628 * In this case there is requirement to wait for a minimum number of
3629 * idle patterns to be sent.
3630 */
3631 if (port == PORT_A)
3632 return;
3633
3634 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3635 1))
3636 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3637}
3638
33a34e4e 3639/* Enable corresponding port and start training pattern 1 */
c19b0669 3640void
33a34e4e 3641intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3642{
da63a9f2 3643 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3644 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3645 int i;
3646 uint8_t voltage;
cdb0e95b 3647 int voltage_tries, loop_tries;
ea5b213a 3648 uint32_t DP = intel_dp->DP;
6aba5b6c 3649 uint8_t link_config[2];
a4fc5ed6 3650
affa9354 3651 if (HAS_DDI(dev))
c19b0669
PZ
3652 intel_ddi_prepare_link_retrain(encoder);
3653
3cf2efb1 3654 /* Write the link configuration data */
6aba5b6c
JN
3655 link_config[0] = intel_dp->link_bw;
3656 link_config[1] = intel_dp->lane_count;
3657 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3658 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3659 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3660 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3661 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3662 &intel_dp->rate_select, 1);
6aba5b6c
JN
3663
3664 link_config[0] = 0;
3665 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3666 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3667
3668 DP |= DP_PORT_EN;
1a2eb460 3669
70aff66c
JN
3670 /* clock recovery */
3671 if (!intel_dp_reset_link_train(intel_dp, &DP,
3672 DP_TRAINING_PATTERN_1 |
3673 DP_LINK_SCRAMBLING_DISABLE)) {
3674 DRM_ERROR("failed to enable link training\n");
3675 return;
3676 }
3677
a4fc5ed6 3678 voltage = 0xff;
cdb0e95b
KP
3679 voltage_tries = 0;
3680 loop_tries = 0;
a4fc5ed6 3681 for (;;) {
70aff66c 3682 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3683
a7c9655f 3684 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3685 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3686 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3687 break;
93f62dad 3688 }
a4fc5ed6 3689
01916270 3690 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3691 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3692 break;
3693 }
3694
4e96c977
MK
3695 /*
3696 * if we used previously trained voltage and pre-emphasis values
3697 * and we don't get clock recovery, reset link training values
3698 */
3699 if (intel_dp->train_set_valid) {
3700 DRM_DEBUG_KMS("clock recovery not ok, reset");
3701 /* clear the flag as we are not reusing train set */
3702 intel_dp->train_set_valid = false;
3703 if (!intel_dp_reset_link_train(intel_dp, &DP,
3704 DP_TRAINING_PATTERN_1 |
3705 DP_LINK_SCRAMBLING_DISABLE)) {
3706 DRM_ERROR("failed to enable link training\n");
3707 return;
3708 }
3709 continue;
3710 }
3711
3cf2efb1
CW
3712 /* Check to see if we've tried the max voltage */
3713 for (i = 0; i < intel_dp->lane_count; i++)
3714 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3715 break;
3b4f819d 3716 if (i == intel_dp->lane_count) {
b06fbda3
DV
3717 ++loop_tries;
3718 if (loop_tries == 5) {
3def84b3 3719 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3720 break;
3721 }
70aff66c
JN
3722 intel_dp_reset_link_train(intel_dp, &DP,
3723 DP_TRAINING_PATTERN_1 |
3724 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3725 voltage_tries = 0;
3726 continue;
3727 }
a4fc5ed6 3728
3cf2efb1 3729 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3730 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3731 ++voltage_tries;
b06fbda3 3732 if (voltage_tries == 5) {
3def84b3 3733 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3734 break;
3735 }
3736 } else
3737 voltage_tries = 0;
3738 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3739
70aff66c
JN
3740 /* Update training set as requested by target */
3741 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3742 DRM_ERROR("failed to update link training\n");
3743 break;
3744 }
a4fc5ed6
KP
3745 }
3746
33a34e4e
JB
3747 intel_dp->DP = DP;
3748}
3749
c19b0669 3750void
33a34e4e
JB
3751intel_dp_complete_link_train(struct intel_dp *intel_dp)
3752{
33a34e4e 3753 bool channel_eq = false;
37f80975 3754 int tries, cr_tries;
33a34e4e 3755 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3756 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3757
3758 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3759 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3760 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3761
a4fc5ed6 3762 /* channel equalization */
70aff66c 3763 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3764 training_pattern |
70aff66c
JN
3765 DP_LINK_SCRAMBLING_DISABLE)) {
3766 DRM_ERROR("failed to start channel equalization\n");
3767 return;
3768 }
3769
a4fc5ed6 3770 tries = 0;
37f80975 3771 cr_tries = 0;
a4fc5ed6
KP
3772 channel_eq = false;
3773 for (;;) {
70aff66c 3774 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3775
37f80975
JB
3776 if (cr_tries > 5) {
3777 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3778 break;
3779 }
3780
a7c9655f 3781 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3782 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3783 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3784 break;
70aff66c 3785 }
a4fc5ed6 3786
37f80975 3787 /* Make sure clock is still ok */
01916270 3788 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
4e96c977 3789 intel_dp->train_set_valid = false;
37f80975 3790 intel_dp_start_link_train(intel_dp);
70aff66c 3791 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3792 training_pattern |
70aff66c 3793 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3794 cr_tries++;
3795 continue;
3796 }
3797
1ffdff13 3798 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3799 channel_eq = true;
3800 break;
3801 }
a4fc5ed6 3802
37f80975
JB
3803 /* Try 5 times, then try clock recovery if that fails */
3804 if (tries > 5) {
4e96c977 3805 intel_dp->train_set_valid = false;
37f80975 3806 intel_dp_start_link_train(intel_dp);
70aff66c 3807 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3808 training_pattern |
70aff66c 3809 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3810 tries = 0;
3811 cr_tries++;
3812 continue;
3813 }
a4fc5ed6 3814
70aff66c
JN
3815 /* Update training set as requested by target */
3816 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3817 DRM_ERROR("failed to update link training\n");
3818 break;
3819 }
3cf2efb1 3820 ++tries;
869184a6 3821 }
3cf2efb1 3822
3ab9c637
ID
3823 intel_dp_set_idle_link_train(intel_dp);
3824
3825 intel_dp->DP = DP;
3826
4e96c977 3827 if (channel_eq) {
5fa836a9 3828 intel_dp->train_set_valid = true;
07f42258 3829 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3830 }
3ab9c637
ID
3831}
3832
3833void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3834{
70aff66c 3835 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3836 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3837}
3838
3839static void
ea5b213a 3840intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3841{
da63a9f2 3842 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3843 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3844 enum port port = intel_dig_port->port;
da63a9f2 3845 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3846 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3847 uint32_t DP = intel_dp->DP;
a4fc5ed6 3848
bc76e320 3849 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3850 return;
3851
0c33d8d7 3852 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3853 return;
3854
28c97730 3855 DRM_DEBUG_KMS("\n");
32f9d658 3856
39e5fa88
VS
3857 if ((IS_GEN7(dev) && port == PORT_A) ||
3858 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3859 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3860 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3861 } else {
aad3d14d
VS
3862 if (IS_CHERRYVIEW(dev))
3863 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3864 else
3865 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3866 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3867 }
1612c8bd 3868 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3869 POSTING_READ(intel_dp->output_reg);
5eb08b69 3870
1612c8bd
VS
3871 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3872 I915_WRITE(intel_dp->output_reg, DP);
3873 POSTING_READ(intel_dp->output_reg);
3874
3875 /*
3876 * HW workaround for IBX, we need to move the port
3877 * to transcoder A after disabling it to allow the
3878 * matching HDMI port to be enabled on transcoder A.
3879 */
3880 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3881 /* always enable with pattern 1 (as per spec) */
3882 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3883 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3884 I915_WRITE(intel_dp->output_reg, DP);
3885 POSTING_READ(intel_dp->output_reg);
3886
3887 DP &= ~DP_PORT_EN;
5bddd17f 3888 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3889 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3890 }
3891
f01eca2e 3892 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3893}
3894
26d61aad
KP
3895static bool
3896intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3897{
a031d709
RV
3898 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3899 struct drm_device *dev = dig_port->base.base.dev;
3900 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3901 uint8_t rev;
a031d709 3902
9d1a1031
JN
3903 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3904 sizeof(intel_dp->dpcd)) < 0)
edb39244 3905 return false; /* aux transfer failed */
92fd8fd1 3906
a8e98153 3907 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3908
edb39244
AJ
3909 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3910 return false; /* DPCD not present */
3911
2293bb5c
SK
3912 /* Check if the panel supports PSR */
3913 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3914 if (is_edp(intel_dp)) {
9d1a1031
JN
3915 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3916 intel_dp->psr_dpcd,
3917 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3918 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3919 dev_priv->psr.sink_support = true;
50003939 3920 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3921 }
474d1ec4
SJ
3922
3923 if (INTEL_INFO(dev)->gen >= 9 &&
3924 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3925 uint8_t frame_sync_cap;
3926
3927 dev_priv->psr.sink_support = true;
3928 intel_dp_dpcd_read_wake(&intel_dp->aux,
3929 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3930 &frame_sync_cap, 1);
3931 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3932 /* PSR2 needs frame sync as well */
3933 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3934 DRM_DEBUG_KMS("PSR2 %s on sink",
3935 dev_priv->psr.psr2_support ? "supported" : "not supported");
3936 }
50003939
JN
3937 }
3938
7809a611 3939 /* Training Pattern 3 support, both source and sink */
06ea66b6 3940 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3941 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3942 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3943 intel_dp->use_tps3 = true;
f8d8a672 3944 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3945 } else
3946 intel_dp->use_tps3 = false;
3947
fc0f8e25
SJ
3948 /* Intermediate frequency support */
3949 if (is_edp(intel_dp) &&
3950 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3951 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3952 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3953 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3954 int i;
3955
fc0f8e25
SJ
3956 intel_dp_dpcd_read_wake(&intel_dp->aux,
3957 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3958 sink_rates,
3959 sizeof(sink_rates));
ea2d8a42 3960
94ca719e
VS
3961 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3962 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3963
3964 if (val == 0)
3965 break;
3966
af77b974
SJ
3967 /* Value read is in kHz while drm clock is saved in deca-kHz */
3968 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3969 }
94ca719e 3970 intel_dp->num_sink_rates = i;
fc0f8e25 3971 }
0336400e
VS
3972
3973 intel_dp_print_rates(intel_dp);
3974
edb39244
AJ
3975 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3976 DP_DWN_STRM_PORT_PRESENT))
3977 return true; /* native DP sink */
3978
3979 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3980 return true; /* no per-port downstream info */
3981
9d1a1031
JN
3982 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3983 intel_dp->downstream_ports,
3984 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3985 return false; /* downstream port status fetch failed */
3986
3987 return true;
92fd8fd1
KP
3988}
3989
0d198328
AJ
3990static void
3991intel_dp_probe_oui(struct intel_dp *intel_dp)
3992{
3993 u8 buf[3];
3994
3995 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3996 return;
3997
9d1a1031 3998 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3999 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4000 buf[0], buf[1], buf[2]);
4001
9d1a1031 4002 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
4003 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4004 buf[0], buf[1], buf[2]);
4005}
4006
0e32b39c
DA
4007static bool
4008intel_dp_probe_mst(struct intel_dp *intel_dp)
4009{
4010 u8 buf[1];
4011
4012 if (!intel_dp->can_mst)
4013 return false;
4014
4015 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4016 return false;
4017
0e32b39c
DA
4018 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4019 if (buf[0] & DP_MST_CAP) {
4020 DRM_DEBUG_KMS("Sink is MST capable\n");
4021 intel_dp->is_mst = true;
4022 } else {
4023 DRM_DEBUG_KMS("Sink is not MST capable\n");
4024 intel_dp->is_mst = false;
4025 }
4026 }
0e32b39c
DA
4027
4028 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4029 return intel_dp->is_mst;
4030}
4031
d2e216d0
RV
4032int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4033{
4034 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4035 struct drm_device *dev = intel_dig_port->base.base.dev;
4036 struct intel_crtc *intel_crtc =
4037 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
4038 u8 buf;
4039 int test_crc_count;
4040 int attempts = 6;
4373f0f2 4041 int ret = 0;
d2e216d0 4042
4373f0f2 4043 hsw_disable_ips(intel_crtc);
d2e216d0 4044
4373f0f2
PZ
4045 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4046 ret = -EIO;
4047 goto out;
4048 }
4049
4050 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4051 ret = -ENOTTY;
4052 goto out;
4053 }
d2e216d0 4054
4373f0f2
PZ
4055 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4056 ret = -EIO;
4057 goto out;
4058 }
1dda5f93 4059
9d1a1031 4060 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4373f0f2
PZ
4061 buf | DP_TEST_SINK_START) < 0) {
4062 ret = -EIO;
4063 goto out;
4064 }
4065
4066 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4067 ret = -EIO;
4068 goto out;
4069 }
d2e216d0 4070
ad9dc91b 4071 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 4072
ad9dc91b 4073 do {
1dda5f93 4074 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4075 DP_TEST_SINK_MISC, &buf) < 0) {
4076 ret = -EIO;
4077 goto out;
4078 }
ad9dc91b
RV
4079 intel_wait_for_vblank(dev, intel_crtc->pipe);
4080 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4081
4082 if (attempts == 0) {
90bd1f46 4083 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4373f0f2
PZ
4084 ret = -ETIMEDOUT;
4085 goto out;
ad9dc91b 4086 }
d2e216d0 4087
4373f0f2
PZ
4088 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4089 ret = -EIO;
4090 goto out;
4091 }
d2e216d0 4092
4373f0f2
PZ
4093 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4094 ret = -EIO;
4095 goto out;
4096 }
1dda5f93 4097 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4373f0f2
PZ
4098 buf & ~DP_TEST_SINK_START) < 0) {
4099 ret = -EIO;
4100 goto out;
4101 }
4102out:
4103 hsw_enable_ips(intel_crtc);
4104 return ret;
d2e216d0
RV
4105}
4106
a60f0e38
JB
4107static bool
4108intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4109{
9d1a1031
JN
4110 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4111 DP_DEVICE_SERVICE_IRQ_VECTOR,
4112 sink_irq_vector, 1) == 1;
a60f0e38
JB
4113}
4114
0e32b39c
DA
4115static bool
4116intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4117{
4118 int ret;
4119
4120 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4121 DP_SINK_COUNT_ESI,
4122 sink_irq_vector, 14);
4123 if (ret != 14)
4124 return false;
4125
4126 return true;
4127}
4128
c5d5ab7a
TP
4129static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4130{
4131 uint8_t test_result = DP_TEST_ACK;
4132 return test_result;
4133}
4134
4135static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4136{
4137 uint8_t test_result = DP_TEST_NAK;
4138 return test_result;
4139}
4140
4141static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4142{
c5d5ab7a 4143 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4144 struct intel_connector *intel_connector = intel_dp->attached_connector;
4145 struct drm_connector *connector = &intel_connector->base;
4146
4147 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4148 connector->edid_corrupt ||
559be30c
TP
4149 intel_dp->aux.i2c_defer_count > 6) {
4150 /* Check EDID read for NACKs, DEFERs and corruption
4151 * (DP CTS 1.2 Core r1.1)
4152 * 4.2.2.4 : Failed EDID read, I2C_NAK
4153 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4154 * 4.2.2.6 : EDID corruption detected
4155 * Use failsafe mode for all cases
4156 */
4157 if (intel_dp->aux.i2c_nack_count > 0 ||
4158 intel_dp->aux.i2c_defer_count > 0)
4159 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4160 intel_dp->aux.i2c_nack_count,
4161 intel_dp->aux.i2c_defer_count);
4162 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4163 } else {
4164 if (!drm_dp_dpcd_write(&intel_dp->aux,
4165 DP_TEST_EDID_CHECKSUM,
4166 &intel_connector->detect_edid->checksum,
5a1cc655 4167 1))
559be30c
TP
4168 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4169
4170 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4171 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4172 }
4173
4174 /* Set test active flag here so userspace doesn't interrupt things */
4175 intel_dp->compliance_test_active = 1;
4176
c5d5ab7a
TP
4177 return test_result;
4178}
4179
4180static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4181{
c5d5ab7a
TP
4182 uint8_t test_result = DP_TEST_NAK;
4183 return test_result;
4184}
4185
4186static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
a60f0e38 4187{
c5d5ab7a
TP
4188 uint8_t response = DP_TEST_NAK;
4189 uint8_t rxdata = 0;
4190 int status = 0;
4191
559be30c 4192 intel_dp->compliance_test_active = 0;
c5d5ab7a 4193 intel_dp->compliance_test_type = 0;
559be30c
TP
4194 intel_dp->compliance_test_data = 0;
4195
c5d5ab7a
TP
4196 intel_dp->aux.i2c_nack_count = 0;
4197 intel_dp->aux.i2c_defer_count = 0;
4198
4199 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4200 if (status <= 0) {
4201 DRM_DEBUG_KMS("Could not read test request from sink\n");
4202 goto update_status;
4203 }
4204
4205 switch (rxdata) {
4206 case DP_TEST_LINK_TRAINING:
4207 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4208 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4209 response = intel_dp_autotest_link_training(intel_dp);
4210 break;
4211 case DP_TEST_LINK_VIDEO_PATTERN:
4212 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4213 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4214 response = intel_dp_autotest_video_pattern(intel_dp);
4215 break;
4216 case DP_TEST_LINK_EDID_READ:
4217 DRM_DEBUG_KMS("EDID test requested\n");
4218 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4219 response = intel_dp_autotest_edid(intel_dp);
4220 break;
4221 case DP_TEST_LINK_PHY_TEST_PATTERN:
4222 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4223 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4224 response = intel_dp_autotest_phy_pattern(intel_dp);
4225 break;
4226 default:
4227 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4228 break;
4229 }
4230
4231update_status:
4232 status = drm_dp_dpcd_write(&intel_dp->aux,
4233 DP_TEST_RESPONSE,
4234 &response, 1);
4235 if (status <= 0)
4236 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4237}
4238
0e32b39c
DA
4239static int
4240intel_dp_check_mst_status(struct intel_dp *intel_dp)
4241{
4242 bool bret;
4243
4244 if (intel_dp->is_mst) {
4245 u8 esi[16] = { 0 };
4246 int ret = 0;
4247 int retry;
4248 bool handled;
4249 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4250go_again:
4251 if (bret == true) {
4252
4253 /* check link status - esi[10] = 0x200c */
4254 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4255 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4256 intel_dp_start_link_train(intel_dp);
4257 intel_dp_complete_link_train(intel_dp);
4258 intel_dp_stop_link_train(intel_dp);
4259 }
4260
6f34cc39 4261 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4262 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4263
4264 if (handled) {
4265 for (retry = 0; retry < 3; retry++) {
4266 int wret;
4267 wret = drm_dp_dpcd_write(&intel_dp->aux,
4268 DP_SINK_COUNT_ESI+1,
4269 &esi[1], 3);
4270 if (wret == 3) {
4271 break;
4272 }
4273 }
4274
4275 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4276 if (bret == true) {
6f34cc39 4277 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4278 goto go_again;
4279 }
4280 } else
4281 ret = 0;
4282
4283 return ret;
4284 } else {
4285 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4286 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4287 intel_dp->is_mst = false;
4288 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4289 /* send a hotplug event */
4290 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4291 }
4292 }
4293 return -EINVAL;
4294}
4295
a4fc5ed6
KP
4296/*
4297 * According to DP spec
4298 * 5.1.2:
4299 * 1. Read DPCD
4300 * 2. Configure link according to Receiver Capabilities
4301 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4302 * 4. Check link status on receipt of hot-plug interrupt
4303 */
a5146200 4304static void
ea5b213a 4305intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4306{
5b215bcf 4307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4308 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4309 u8 sink_irq_vector;
93f62dad 4310 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4311
5b215bcf
DA
4312 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4313
da63a9f2 4314 if (!intel_encoder->connectors_active)
d2b996ac 4315 return;
59cd09e1 4316
da63a9f2 4317 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4318 return;
4319
1a125d8a
ID
4320 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4321 return;
4322
92fd8fd1 4323 /* Try to read receiver status if the link appears to be up */
93f62dad 4324 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4325 return;
4326 }
4327
92fd8fd1 4328 /* Now read the DPCD to see if it's actually running */
26d61aad 4329 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4330 return;
4331 }
4332
a60f0e38
JB
4333 /* Try to read the source of the interrupt */
4334 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4335 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4336 /* Clear interrupt source */
9d1a1031
JN
4337 drm_dp_dpcd_writeb(&intel_dp->aux,
4338 DP_DEVICE_SERVICE_IRQ_VECTOR,
4339 sink_irq_vector);
a60f0e38
JB
4340
4341 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4342 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4343 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4344 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4345 }
4346
1ffdff13 4347 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4348 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4349 intel_encoder->base.name);
33a34e4e
JB
4350 intel_dp_start_link_train(intel_dp);
4351 intel_dp_complete_link_train(intel_dp);
3ab9c637 4352 intel_dp_stop_link_train(intel_dp);
33a34e4e 4353 }
a4fc5ed6 4354}
a4fc5ed6 4355
caf9ab24 4356/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4357static enum drm_connector_status
26d61aad 4358intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4359{
caf9ab24 4360 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4361 uint8_t type;
4362
4363 if (!intel_dp_get_dpcd(intel_dp))
4364 return connector_status_disconnected;
4365
4366 /* if there's no downstream port, we're done */
4367 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4368 return connector_status_connected;
caf9ab24
AJ
4369
4370 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4371 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4372 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4373 uint8_t reg;
9d1a1031
JN
4374
4375 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4376 &reg, 1) < 0)
caf9ab24 4377 return connector_status_unknown;
9d1a1031 4378
23235177
AJ
4379 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4380 : connector_status_disconnected;
caf9ab24
AJ
4381 }
4382
4383 /* If no HPD, poke DDC gently */
0b99836f 4384 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4385 return connector_status_connected;
caf9ab24
AJ
4386
4387 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4388 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4389 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4390 if (type == DP_DS_PORT_TYPE_VGA ||
4391 type == DP_DS_PORT_TYPE_NON_EDID)
4392 return connector_status_unknown;
4393 } else {
4394 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4395 DP_DWN_STRM_PORT_TYPE_MASK;
4396 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4397 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4398 return connector_status_unknown;
4399 }
caf9ab24
AJ
4400
4401 /* Anything else is out of spec, warn and ignore */
4402 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4403 return connector_status_disconnected;
71ba9000
AJ
4404}
4405
d410b56d
CW
4406static enum drm_connector_status
4407edp_detect(struct intel_dp *intel_dp)
4408{
4409 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4410 enum drm_connector_status status;
4411
4412 status = intel_panel_detect(dev);
4413 if (status == connector_status_unknown)
4414 status = connector_status_connected;
4415
4416 return status;
4417}
4418
5eb08b69 4419static enum drm_connector_status
a9756bb5 4420ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4421{
30add22d 4422 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4423 struct drm_i915_private *dev_priv = dev->dev_private;
4424 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4425
1b469639
DL
4426 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4427 return connector_status_disconnected;
4428
26d61aad 4429 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4430}
4431
2a592bec
DA
4432static int g4x_digital_port_connected(struct drm_device *dev,
4433 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4434{
a4fc5ed6 4435 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4436 uint32_t bit;
5eb08b69 4437
232a6ee9
TP
4438 if (IS_VALLEYVIEW(dev)) {
4439 switch (intel_dig_port->port) {
4440 case PORT_B:
4441 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4442 break;
4443 case PORT_C:
4444 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4445 break;
4446 case PORT_D:
4447 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4448 break;
4449 default:
2a592bec 4450 return -EINVAL;
232a6ee9
TP
4451 }
4452 } else {
4453 switch (intel_dig_port->port) {
4454 case PORT_B:
4455 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4456 break;
4457 case PORT_C:
4458 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4459 break;
4460 case PORT_D:
4461 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4462 break;
4463 default:
2a592bec 4464 return -EINVAL;
232a6ee9 4465 }
a4fc5ed6
KP
4466 }
4467
10f76a38 4468 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4469 return 0;
4470 return 1;
4471}
4472
4473static enum drm_connector_status
4474g4x_dp_detect(struct intel_dp *intel_dp)
4475{
4476 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4477 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4478 int ret;
4479
4480 /* Can't disconnect eDP, but you can close the lid... */
4481 if (is_edp(intel_dp)) {
4482 enum drm_connector_status status;
4483
4484 status = intel_panel_detect(dev);
4485 if (status == connector_status_unknown)
4486 status = connector_status_connected;
4487 return status;
4488 }
4489
4490 ret = g4x_digital_port_connected(dev, intel_dig_port);
4491 if (ret == -EINVAL)
4492 return connector_status_unknown;
4493 else if (ret == 0)
a4fc5ed6
KP
4494 return connector_status_disconnected;
4495
26d61aad 4496 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4497}
4498
8c241fef 4499static struct edid *
beb60608 4500intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4501{
beb60608 4502 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4503
9cd300e0
JN
4504 /* use cached edid if we have one */
4505 if (intel_connector->edid) {
9cd300e0
JN
4506 /* invalid edid */
4507 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4508 return NULL;
4509
55e9edeb 4510 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4511 } else
4512 return drm_get_edid(&intel_connector->base,
4513 &intel_dp->aux.ddc);
4514}
8c241fef 4515
beb60608
CW
4516static void
4517intel_dp_set_edid(struct intel_dp *intel_dp)
4518{
4519 struct intel_connector *intel_connector = intel_dp->attached_connector;
4520 struct edid *edid;
8c241fef 4521
beb60608
CW
4522 edid = intel_dp_get_edid(intel_dp);
4523 intel_connector->detect_edid = edid;
4524
4525 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4526 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4527 else
4528 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4529}
4530
beb60608
CW
4531static void
4532intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4533{
beb60608 4534 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4535
beb60608
CW
4536 kfree(intel_connector->detect_edid);
4537 intel_connector->detect_edid = NULL;
9cd300e0 4538
beb60608
CW
4539 intel_dp->has_audio = false;
4540}
d6f24d0f 4541
beb60608
CW
4542static enum intel_display_power_domain
4543intel_dp_power_get(struct intel_dp *dp)
4544{
4545 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4546 enum intel_display_power_domain power_domain;
4547
4548 power_domain = intel_display_port_power_domain(encoder);
4549 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4550
4551 return power_domain;
4552}
d6f24d0f 4553
beb60608
CW
4554static void
4555intel_dp_power_put(struct intel_dp *dp,
4556 enum intel_display_power_domain power_domain)
4557{
4558 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4559 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4560}
4561
a9756bb5
ZW
4562static enum drm_connector_status
4563intel_dp_detect(struct drm_connector *connector, bool force)
4564{
4565 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4566 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4567 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4568 struct drm_device *dev = connector->dev;
a9756bb5 4569 enum drm_connector_status status;
671dedd2 4570 enum intel_display_power_domain power_domain;
0e32b39c 4571 bool ret;
09b1eb13 4572 u8 sink_irq_vector;
a9756bb5 4573
164c8598 4574 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4575 connector->base.id, connector->name);
beb60608 4576 intel_dp_unset_edid(intel_dp);
164c8598 4577
0e32b39c
DA
4578 if (intel_dp->is_mst) {
4579 /* MST devices are disconnected from a monitor POV */
4580 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4581 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4582 return connector_status_disconnected;
0e32b39c
DA
4583 }
4584
beb60608 4585 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4586
d410b56d
CW
4587 /* Can't disconnect eDP, but you can close the lid... */
4588 if (is_edp(intel_dp))
4589 status = edp_detect(intel_dp);
4590 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4591 status = ironlake_dp_detect(intel_dp);
4592 else
4593 status = g4x_dp_detect(intel_dp);
4594 if (status != connector_status_connected)
c8c8fb33 4595 goto out;
a9756bb5 4596
0d198328
AJ
4597 intel_dp_probe_oui(intel_dp);
4598
0e32b39c
DA
4599 ret = intel_dp_probe_mst(intel_dp);
4600 if (ret) {
4601 /* if we are in MST mode then this connector
4602 won't appear connected or have anything with EDID on it */
4603 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4604 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4605 status = connector_status_disconnected;
4606 goto out;
4607 }
4608
beb60608 4609 intel_dp_set_edid(intel_dp);
a9756bb5 4610
d63885da
PZ
4611 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4612 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4613 status = connector_status_connected;
4614
09b1eb13
TP
4615 /* Try to read the source of the interrupt */
4616 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4617 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4618 /* Clear interrupt source */
4619 drm_dp_dpcd_writeb(&intel_dp->aux,
4620 DP_DEVICE_SERVICE_IRQ_VECTOR,
4621 sink_irq_vector);
4622
4623 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4624 intel_dp_handle_test_request(intel_dp);
4625 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4626 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4627 }
4628
c8c8fb33 4629out:
beb60608 4630 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4631 return status;
a4fc5ed6
KP
4632}
4633
beb60608
CW
4634static void
4635intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4636{
df0e9248 4637 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4638 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4639 enum intel_display_power_domain power_domain;
a4fc5ed6 4640
beb60608
CW
4641 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4642 connector->base.id, connector->name);
4643 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4644
beb60608
CW
4645 if (connector->status != connector_status_connected)
4646 return;
671dedd2 4647
beb60608
CW
4648 power_domain = intel_dp_power_get(intel_dp);
4649
4650 intel_dp_set_edid(intel_dp);
4651
4652 intel_dp_power_put(intel_dp, power_domain);
4653
4654 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4655 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4656}
4657
4658static int intel_dp_get_modes(struct drm_connector *connector)
4659{
4660 struct intel_connector *intel_connector = to_intel_connector(connector);
4661 struct edid *edid;
4662
4663 edid = intel_connector->detect_edid;
4664 if (edid) {
4665 int ret = intel_connector_update_modes(connector, edid);
4666 if (ret)
4667 return ret;
4668 }
32f9d658 4669
f8779fda 4670 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4671 if (is_edp(intel_attached_dp(connector)) &&
4672 intel_connector->panel.fixed_mode) {
f8779fda 4673 struct drm_display_mode *mode;
beb60608
CW
4674
4675 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4676 intel_connector->panel.fixed_mode);
f8779fda 4677 if (mode) {
32f9d658
ZW
4678 drm_mode_probed_add(connector, mode);
4679 return 1;
4680 }
4681 }
beb60608 4682
32f9d658 4683 return 0;
a4fc5ed6
KP
4684}
4685
1aad7ac0
CW
4686static bool
4687intel_dp_detect_audio(struct drm_connector *connector)
4688{
1aad7ac0 4689 bool has_audio = false;
beb60608 4690 struct edid *edid;
1aad7ac0 4691
beb60608
CW
4692 edid = to_intel_connector(connector)->detect_edid;
4693 if (edid)
1aad7ac0 4694 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4695
1aad7ac0
CW
4696 return has_audio;
4697}
4698
f684960e
CW
4699static int
4700intel_dp_set_property(struct drm_connector *connector,
4701 struct drm_property *property,
4702 uint64_t val)
4703{
e953fd7b 4704 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4705 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4706 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4707 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4708 int ret;
4709
662595df 4710 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4711 if (ret)
4712 return ret;
4713
3f43c48d 4714 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4715 int i = val;
4716 bool has_audio;
4717
4718 if (i == intel_dp->force_audio)
f684960e
CW
4719 return 0;
4720
1aad7ac0 4721 intel_dp->force_audio = i;
f684960e 4722
c3e5f67b 4723 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4724 has_audio = intel_dp_detect_audio(connector);
4725 else
c3e5f67b 4726 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4727
4728 if (has_audio == intel_dp->has_audio)
f684960e
CW
4729 return 0;
4730
1aad7ac0 4731 intel_dp->has_audio = has_audio;
f684960e
CW
4732 goto done;
4733 }
4734
e953fd7b 4735 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4736 bool old_auto = intel_dp->color_range_auto;
4737 uint32_t old_range = intel_dp->color_range;
4738
55bc60db
VS
4739 switch (val) {
4740 case INTEL_BROADCAST_RGB_AUTO:
4741 intel_dp->color_range_auto = true;
4742 break;
4743 case INTEL_BROADCAST_RGB_FULL:
4744 intel_dp->color_range_auto = false;
4745 intel_dp->color_range = 0;
4746 break;
4747 case INTEL_BROADCAST_RGB_LIMITED:
4748 intel_dp->color_range_auto = false;
4749 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4750 break;
4751 default:
4752 return -EINVAL;
4753 }
ae4edb80
DV
4754
4755 if (old_auto == intel_dp->color_range_auto &&
4756 old_range == intel_dp->color_range)
4757 return 0;
4758
e953fd7b
CW
4759 goto done;
4760 }
4761
53b41837
YN
4762 if (is_edp(intel_dp) &&
4763 property == connector->dev->mode_config.scaling_mode_property) {
4764 if (val == DRM_MODE_SCALE_NONE) {
4765 DRM_DEBUG_KMS("no scaling not supported\n");
4766 return -EINVAL;
4767 }
4768
4769 if (intel_connector->panel.fitting_mode == val) {
4770 /* the eDP scaling property is not changed */
4771 return 0;
4772 }
4773 intel_connector->panel.fitting_mode = val;
4774
4775 goto done;
4776 }
4777
f684960e
CW
4778 return -EINVAL;
4779
4780done:
c0c36b94
CW
4781 if (intel_encoder->base.crtc)
4782 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4783
4784 return 0;
4785}
4786
a4fc5ed6 4787static void
73845adf 4788intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4789{
1d508706 4790 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4791
10e972d3 4792 kfree(intel_connector->detect_edid);
beb60608 4793
9cd300e0
JN
4794 if (!IS_ERR_OR_NULL(intel_connector->edid))
4795 kfree(intel_connector->edid);
4796
acd8db10
PZ
4797 /* Can't call is_edp() since the encoder may have been destroyed
4798 * already. */
4799 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4800 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4801
a4fc5ed6 4802 drm_connector_cleanup(connector);
55f78c43 4803 kfree(connector);
a4fc5ed6
KP
4804}
4805
00c09d70 4806void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4807{
da63a9f2
PZ
4808 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4809 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4810
4f71d0cb 4811 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4812 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4813 if (is_edp(intel_dp)) {
4814 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4815 /*
4816 * vdd might still be enabled do to the delayed vdd off.
4817 * Make sure vdd is actually turned off here.
4818 */
773538e8 4819 pps_lock(intel_dp);
4be73780 4820 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4821 pps_unlock(intel_dp);
4822
01527b31
CT
4823 if (intel_dp->edp_notifier.notifier_call) {
4824 unregister_reboot_notifier(&intel_dp->edp_notifier);
4825 intel_dp->edp_notifier.notifier_call = NULL;
4826 }
bd943159 4827 }
c8bd0e49 4828 drm_encoder_cleanup(encoder);
da63a9f2 4829 kfree(intel_dig_port);
24d05927
DV
4830}
4831
07f9cd0b
ID
4832static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4833{
4834 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4835
4836 if (!is_edp(intel_dp))
4837 return;
4838
951468f3
VS
4839 /*
4840 * vdd might still be enabled do to the delayed vdd off.
4841 * Make sure vdd is actually turned off here.
4842 */
afa4e53a 4843 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4844 pps_lock(intel_dp);
07f9cd0b 4845 edp_panel_vdd_off_sync(intel_dp);
773538e8 4846 pps_unlock(intel_dp);
07f9cd0b
ID
4847}
4848
49e6bc51
VS
4849static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4850{
4851 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4852 struct drm_device *dev = intel_dig_port->base.base.dev;
4853 struct drm_i915_private *dev_priv = dev->dev_private;
4854 enum intel_display_power_domain power_domain;
4855
4856 lockdep_assert_held(&dev_priv->pps_mutex);
4857
4858 if (!edp_have_panel_vdd(intel_dp))
4859 return;
4860
4861 /*
4862 * The VDD bit needs a power domain reference, so if the bit is
4863 * already enabled when we boot or resume, grab this reference and
4864 * schedule a vdd off, so we don't hold on to the reference
4865 * indefinitely.
4866 */
4867 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4868 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4869 intel_display_power_get(dev_priv, power_domain);
4870
4871 edp_panel_vdd_schedule_off(intel_dp);
4872}
4873
6d93c0c4
ID
4874static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4875{
49e6bc51
VS
4876 struct intel_dp *intel_dp;
4877
4878 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4879 return;
4880
4881 intel_dp = enc_to_intel_dp(encoder);
4882
4883 pps_lock(intel_dp);
4884
4885 /*
4886 * Read out the current power sequencer assignment,
4887 * in case the BIOS did something with it.
4888 */
4889 if (IS_VALLEYVIEW(encoder->dev))
4890 vlv_initial_power_sequencer_setup(intel_dp);
4891
4892 intel_edp_panel_vdd_sanitize(intel_dp);
4893
4894 pps_unlock(intel_dp);
6d93c0c4
ID
4895}
4896
a4fc5ed6 4897static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4898 .dpms = intel_connector_dpms,
a4fc5ed6 4899 .detect = intel_dp_detect,
beb60608 4900 .force = intel_dp_force,
a4fc5ed6 4901 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4902 .set_property = intel_dp_set_property,
2545e4a6 4903 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4904 .destroy = intel_dp_connector_destroy,
c6f95f27 4905 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4906 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4907};
4908
4909static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4910 .get_modes = intel_dp_get_modes,
4911 .mode_valid = intel_dp_mode_valid,
df0e9248 4912 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4913};
4914
a4fc5ed6 4915static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4916 .reset = intel_dp_encoder_reset,
24d05927 4917 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4918};
4919
0e32b39c 4920void
21d40d37 4921intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4922{
0e32b39c 4923 return;
c8110e52 4924}
6207937d 4925
b2c5c181 4926enum irqreturn
13cf5504
DA
4927intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4928{
4929 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4930 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4931 struct drm_device *dev = intel_dig_port->base.base.dev;
4932 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4933 enum intel_display_power_domain power_domain;
b2c5c181 4934 enum irqreturn ret = IRQ_NONE;
1c767b33 4935
0e32b39c
DA
4936 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4937 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4938
7a7f84cc
VS
4939 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4940 /*
4941 * vdd off can generate a long pulse on eDP which
4942 * would require vdd on to handle it, and thus we
4943 * would end up in an endless cycle of
4944 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4945 */
4946 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4947 port_name(intel_dig_port->port));
a8b3d52f 4948 return IRQ_HANDLED;
7a7f84cc
VS
4949 }
4950
26fbb774
VS
4951 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4952 port_name(intel_dig_port->port),
0e32b39c 4953 long_hpd ? "long" : "short");
13cf5504 4954
1c767b33
ID
4955 power_domain = intel_display_port_power_domain(intel_encoder);
4956 intel_display_power_get(dev_priv, power_domain);
4957
0e32b39c 4958 if (long_hpd) {
5fa836a9
MK
4959 /* indicate that we need to restart link training */
4960 intel_dp->train_set_valid = false;
2a592bec
DA
4961
4962 if (HAS_PCH_SPLIT(dev)) {
4963 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4964 goto mst_fail;
4965 } else {
4966 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4967 goto mst_fail;
4968 }
0e32b39c
DA
4969
4970 if (!intel_dp_get_dpcd(intel_dp)) {
4971 goto mst_fail;
4972 }
4973
4974 intel_dp_probe_oui(intel_dp);
4975
4976 if (!intel_dp_probe_mst(intel_dp))
4977 goto mst_fail;
4978
4979 } else {
4980 if (intel_dp->is_mst) {
1c767b33 4981 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4982 goto mst_fail;
4983 }
4984
4985 if (!intel_dp->is_mst) {
4986 /*
4987 * we'll check the link status via the normal hot plug path later -
4988 * but for short hpds we should check it now
4989 */
5b215bcf 4990 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4991 intel_dp_check_link_status(intel_dp);
5b215bcf 4992 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4993 }
4994 }
b2c5c181
DV
4995
4996 ret = IRQ_HANDLED;
4997
1c767b33 4998 goto put_power;
0e32b39c
DA
4999mst_fail:
5000 /* if we were in MST mode, and device is not there get out of MST mode */
5001 if (intel_dp->is_mst) {
5002 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5003 intel_dp->is_mst = false;
5004 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5005 }
1c767b33
ID
5006put_power:
5007 intel_display_power_put(dev_priv, power_domain);
5008
5009 return ret;
13cf5504
DA
5010}
5011
e3421a18
ZW
5012/* Return which DP Port should be selected for Transcoder DP control */
5013int
0206e353 5014intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5015{
5016 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5017 struct intel_encoder *intel_encoder;
5018 struct intel_dp *intel_dp;
e3421a18 5019
fa90ecef
PZ
5020 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5021 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5022
fa90ecef
PZ
5023 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5024 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5025 return intel_dp->output_reg;
e3421a18 5026 }
ea5b213a 5027
e3421a18
ZW
5028 return -1;
5029}
5030
36e83a18 5031/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5032bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5033{
5034 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5035 union child_device_config *p_child;
36e83a18 5036 int i;
5d8a7752
VS
5037 static const short port_mapping[] = {
5038 [PORT_B] = PORT_IDPB,
5039 [PORT_C] = PORT_IDPC,
5040 [PORT_D] = PORT_IDPD,
5041 };
36e83a18 5042
3b32a35b
VS
5043 if (port == PORT_A)
5044 return true;
5045
41aa3448 5046 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5047 return false;
5048
41aa3448
RV
5049 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5050 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5051
5d8a7752 5052 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5053 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5054 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5055 return true;
5056 }
5057 return false;
5058}
5059
0e32b39c 5060void
f684960e
CW
5061intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5062{
53b41837
YN
5063 struct intel_connector *intel_connector = to_intel_connector(connector);
5064
3f43c48d 5065 intel_attach_force_audio_property(connector);
e953fd7b 5066 intel_attach_broadcast_rgb_property(connector);
55bc60db 5067 intel_dp->color_range_auto = true;
53b41837
YN
5068
5069 if (is_edp(intel_dp)) {
5070 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5071 drm_object_attach_property(
5072 &connector->base,
53b41837 5073 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5074 DRM_MODE_SCALE_ASPECT);
5075 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5076 }
f684960e
CW
5077}
5078
dada1a9f
ID
5079static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5080{
5081 intel_dp->last_power_cycle = jiffies;
5082 intel_dp->last_power_on = jiffies;
5083 intel_dp->last_backlight_off = jiffies;
5084}
5085
67a54566
DV
5086static void
5087intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5088 struct intel_dp *intel_dp)
67a54566
DV
5089{
5090 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5091 struct edp_power_seq cur, vbt, spec,
5092 *final = &intel_dp->pps_delays;
67a54566 5093 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 5094 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5095
e39b999a
VS
5096 lockdep_assert_held(&dev_priv->pps_mutex);
5097
81ddbc69
VS
5098 /* already initialized? */
5099 if (final->t11_t12 != 0)
5100 return;
5101
453c5420 5102 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5103 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5104 pp_on_reg = PCH_PP_ON_DELAYS;
5105 pp_off_reg = PCH_PP_OFF_DELAYS;
5106 pp_div_reg = PCH_PP_DIVISOR;
5107 } else {
bf13e81b
JN
5108 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5109
5110 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5111 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5112 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5113 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5114 }
67a54566
DV
5115
5116 /* Workaround: Need to write PP_CONTROL with the unlock key as
5117 * the very first thing. */
453c5420 5118 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 5119 I915_WRITE(pp_ctrl_reg, pp);
67a54566 5120
453c5420
JB
5121 pp_on = I915_READ(pp_on_reg);
5122 pp_off = I915_READ(pp_off_reg);
5123 pp_div = I915_READ(pp_div_reg);
67a54566
DV
5124
5125 /* Pull timing values out of registers */
5126 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5127 PANEL_POWER_UP_DELAY_SHIFT;
5128
5129 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5130 PANEL_LIGHT_ON_DELAY_SHIFT;
5131
5132 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5133 PANEL_LIGHT_OFF_DELAY_SHIFT;
5134
5135 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5136 PANEL_POWER_DOWN_DELAY_SHIFT;
5137
5138 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5139 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5140
5141 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5142 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5143
41aa3448 5144 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5145
5146 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5147 * our hw here, which are all in 100usec. */
5148 spec.t1_t3 = 210 * 10;
5149 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5150 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5151 spec.t10 = 500 * 10;
5152 /* This one is special and actually in units of 100ms, but zero
5153 * based in the hw (so we need to add 100 ms). But the sw vbt
5154 * table multiplies it with 1000 to make it in units of 100usec,
5155 * too. */
5156 spec.t11_t12 = (510 + 100) * 10;
5157
5158 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5159 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5160
5161 /* Use the max of the register settings and vbt. If both are
5162 * unset, fall back to the spec limits. */
36b5f425 5163#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5164 spec.field : \
5165 max(cur.field, vbt.field))
5166 assign_final(t1_t3);
5167 assign_final(t8);
5168 assign_final(t9);
5169 assign_final(t10);
5170 assign_final(t11_t12);
5171#undef assign_final
5172
36b5f425 5173#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5174 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5175 intel_dp->backlight_on_delay = get_delay(t8);
5176 intel_dp->backlight_off_delay = get_delay(t9);
5177 intel_dp->panel_power_down_delay = get_delay(t10);
5178 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5179#undef get_delay
5180
f30d26e4
JN
5181 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5182 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5183 intel_dp->panel_power_cycle_delay);
5184
5185 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5186 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5187}
5188
5189static void
5190intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5191 struct intel_dp *intel_dp)
f30d26e4
JN
5192{
5193 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5194 u32 pp_on, pp_off, pp_div, port_sel = 0;
5195 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5196 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 5197 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5198 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5199
e39b999a 5200 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
5201
5202 if (HAS_PCH_SPLIT(dev)) {
5203 pp_on_reg = PCH_PP_ON_DELAYS;
5204 pp_off_reg = PCH_PP_OFF_DELAYS;
5205 pp_div_reg = PCH_PP_DIVISOR;
5206 } else {
bf13e81b
JN
5207 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5208
5209 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5210 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5211 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5212 }
5213
b2f19d1a
PZ
5214 /*
5215 * And finally store the new values in the power sequencer. The
5216 * backlight delays are set to 1 because we do manual waits on them. For
5217 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5218 * we'll end up waiting for the backlight off delay twice: once when we
5219 * do the manual sleep, and once when we disable the panel and wait for
5220 * the PP_STATUS bit to become zero.
5221 */
f30d26e4 5222 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5223 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5224 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5225 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5226 /* Compute the divisor for the pp clock, simply match the Bspec
5227 * formula. */
453c5420 5228 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 5229 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
5230 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5231
5232 /* Haswell doesn't have any port selection bits for the panel
5233 * power sequencer any more. */
bc7d38a4 5234 if (IS_VALLEYVIEW(dev)) {
ad933b56 5235 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5236 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5237 if (port == PORT_A)
a24c144c 5238 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5239 else
a24c144c 5240 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5241 }
5242
453c5420
JB
5243 pp_on |= port_sel;
5244
5245 I915_WRITE(pp_on_reg, pp_on);
5246 I915_WRITE(pp_off_reg, pp_off);
5247 I915_WRITE(pp_div_reg, pp_div);
67a54566 5248
67a54566 5249 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5250 I915_READ(pp_on_reg),
5251 I915_READ(pp_off_reg),
5252 I915_READ(pp_div_reg));
f684960e
CW
5253}
5254
b33a2815
VK
5255/**
5256 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5257 * @dev: DRM device
5258 * @refresh_rate: RR to be programmed
5259 *
5260 * This function gets called when refresh rate (RR) has to be changed from
5261 * one frequency to another. Switches can be between high and low RR
5262 * supported by the panel or to any other RR based on media playback (in
5263 * this case, RR value needs to be passed from user space).
5264 *
5265 * The caller of this function needs to take a lock on dev_priv->drrs.
5266 */
96178eeb 5267static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5268{
5269 struct drm_i915_private *dev_priv = dev->dev_private;
5270 struct intel_encoder *encoder;
96178eeb
VK
5271 struct intel_digital_port *dig_port = NULL;
5272 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5273 struct intel_crtc_state *config = NULL;
439d7ac0 5274 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5275 u32 reg, val;
96178eeb 5276 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5277
5278 if (refresh_rate <= 0) {
5279 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5280 return;
5281 }
5282
96178eeb
VK
5283 if (intel_dp == NULL) {
5284 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5285 return;
5286 }
5287
1fcc9d1c 5288 /*
e4d59f6b
RV
5289 * FIXME: This needs proper synchronization with psr state for some
5290 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5291 */
439d7ac0 5292
96178eeb
VK
5293 dig_port = dp_to_dig_port(intel_dp);
5294 encoder = &dig_port->base;
723f9aab 5295 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5296
5297 if (!intel_crtc) {
5298 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5299 return;
5300 }
5301
6e3c9717 5302 config = intel_crtc->config;
439d7ac0 5303
96178eeb 5304 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5305 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5306 return;
5307 }
5308
96178eeb
VK
5309 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5310 refresh_rate)
439d7ac0
PB
5311 index = DRRS_LOW_RR;
5312
96178eeb 5313 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5314 DRM_DEBUG_KMS(
5315 "DRRS requested for previously set RR...ignoring\n");
5316 return;
5317 }
5318
5319 if (!intel_crtc->active) {
5320 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5321 return;
5322 }
5323
44395bfe 5324 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5325 switch (index) {
5326 case DRRS_HIGH_RR:
5327 intel_dp_set_m_n(intel_crtc, M1_N1);
5328 break;
5329 case DRRS_LOW_RR:
5330 intel_dp_set_m_n(intel_crtc, M2_N2);
5331 break;
5332 case DRRS_MAX_RR:
5333 default:
5334 DRM_ERROR("Unsupported refreshrate type\n");
5335 }
5336 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5337 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5338 val = I915_READ(reg);
a4c30b1d 5339
439d7ac0 5340 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5341 if (IS_VALLEYVIEW(dev))
5342 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5343 else
5344 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5345 } else {
6fa7aec1
VK
5346 if (IS_VALLEYVIEW(dev))
5347 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5348 else
5349 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5350 }
5351 I915_WRITE(reg, val);
5352 }
5353
4e9ac947
VK
5354 dev_priv->drrs.refresh_rate_type = index;
5355
5356 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5357}
5358
b33a2815
VK
5359/**
5360 * intel_edp_drrs_enable - init drrs struct if supported
5361 * @intel_dp: DP struct
5362 *
5363 * Initializes frontbuffer_bits and drrs.dp
5364 */
c395578e
VK
5365void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5366{
5367 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5368 struct drm_i915_private *dev_priv = dev->dev_private;
5369 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5370 struct drm_crtc *crtc = dig_port->base.base.crtc;
5371 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5372
5373 if (!intel_crtc->config->has_drrs) {
5374 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5375 return;
5376 }
5377
5378 mutex_lock(&dev_priv->drrs.mutex);
5379 if (WARN_ON(dev_priv->drrs.dp)) {
5380 DRM_ERROR("DRRS already enabled\n");
5381 goto unlock;
5382 }
5383
5384 dev_priv->drrs.busy_frontbuffer_bits = 0;
5385
5386 dev_priv->drrs.dp = intel_dp;
5387
5388unlock:
5389 mutex_unlock(&dev_priv->drrs.mutex);
5390}
5391
b33a2815
VK
5392/**
5393 * intel_edp_drrs_disable - Disable DRRS
5394 * @intel_dp: DP struct
5395 *
5396 */
c395578e
VK
5397void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5398{
5399 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5400 struct drm_i915_private *dev_priv = dev->dev_private;
5401 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5402 struct drm_crtc *crtc = dig_port->base.base.crtc;
5403 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5404
5405 if (!intel_crtc->config->has_drrs)
5406 return;
5407
5408 mutex_lock(&dev_priv->drrs.mutex);
5409 if (!dev_priv->drrs.dp) {
5410 mutex_unlock(&dev_priv->drrs.mutex);
5411 return;
5412 }
5413
5414 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5415 intel_dp_set_drrs_state(dev_priv->dev,
5416 intel_dp->attached_connector->panel.
5417 fixed_mode->vrefresh);
5418
5419 dev_priv->drrs.dp = NULL;
5420 mutex_unlock(&dev_priv->drrs.mutex);
5421
5422 cancel_delayed_work_sync(&dev_priv->drrs.work);
5423}
5424
4e9ac947
VK
5425static void intel_edp_drrs_downclock_work(struct work_struct *work)
5426{
5427 struct drm_i915_private *dev_priv =
5428 container_of(work, typeof(*dev_priv), drrs.work.work);
5429 struct intel_dp *intel_dp;
5430
5431 mutex_lock(&dev_priv->drrs.mutex);
5432
5433 intel_dp = dev_priv->drrs.dp;
5434
5435 if (!intel_dp)
5436 goto unlock;
5437
439d7ac0 5438 /*
4e9ac947
VK
5439 * The delayed work can race with an invalidate hence we need to
5440 * recheck.
439d7ac0
PB
5441 */
5442
4e9ac947
VK
5443 if (dev_priv->drrs.busy_frontbuffer_bits)
5444 goto unlock;
439d7ac0 5445
4e9ac947
VK
5446 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5447 intel_dp_set_drrs_state(dev_priv->dev,
5448 intel_dp->attached_connector->panel.
5449 downclock_mode->vrefresh);
439d7ac0 5450
4e9ac947 5451unlock:
4e9ac947 5452 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5453}
5454
b33a2815
VK
5455/**
5456 * intel_edp_drrs_invalidate - Invalidate DRRS
5457 * @dev: DRM device
5458 * @frontbuffer_bits: frontbuffer plane tracking bits
5459 *
5460 * When there is a disturbance on screen (due to cursor movement/time
5461 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5462 * high RR.
5463 *
5464 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5465 */
a93fad0f
VK
5466void intel_edp_drrs_invalidate(struct drm_device *dev,
5467 unsigned frontbuffer_bits)
5468{
5469 struct drm_i915_private *dev_priv = dev->dev_private;
5470 struct drm_crtc *crtc;
5471 enum pipe pipe;
5472
9da7d693 5473 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5474 return;
5475
88f933a8 5476 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5477
a93fad0f 5478 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5479 if (!dev_priv->drrs.dp) {
5480 mutex_unlock(&dev_priv->drrs.mutex);
5481 return;
5482 }
5483
a93fad0f
VK
5484 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5485 pipe = to_intel_crtc(crtc)->pipe;
5486
5487 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5488 intel_dp_set_drrs_state(dev_priv->dev,
5489 dev_priv->drrs.dp->attached_connector->panel.
5490 fixed_mode->vrefresh);
5491 }
5492
5493 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5494
5495 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5496 mutex_unlock(&dev_priv->drrs.mutex);
5497}
5498
b33a2815
VK
5499/**
5500 * intel_edp_drrs_flush - Flush DRRS
5501 * @dev: DRM device
5502 * @frontbuffer_bits: frontbuffer plane tracking bits
5503 *
5504 * When there is no movement on screen, DRRS work can be scheduled.
5505 * This DRRS work is responsible for setting relevant registers after a
5506 * timeout of 1 second.
5507 *
5508 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5509 */
a93fad0f
VK
5510void intel_edp_drrs_flush(struct drm_device *dev,
5511 unsigned frontbuffer_bits)
5512{
5513 struct drm_i915_private *dev_priv = dev->dev_private;
5514 struct drm_crtc *crtc;
5515 enum pipe pipe;
5516
9da7d693 5517 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5518 return;
5519
88f933a8 5520 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5521
a93fad0f 5522 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5523 if (!dev_priv->drrs.dp) {
5524 mutex_unlock(&dev_priv->drrs.mutex);
5525 return;
5526 }
5527
a93fad0f
VK
5528 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5529 pipe = to_intel_crtc(crtc)->pipe;
5530 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5531
a93fad0f
VK
5532 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5533 !dev_priv->drrs.busy_frontbuffer_bits)
5534 schedule_delayed_work(&dev_priv->drrs.work,
5535 msecs_to_jiffies(1000));
5536 mutex_unlock(&dev_priv->drrs.mutex);
5537}
5538
b33a2815
VK
5539/**
5540 * DOC: Display Refresh Rate Switching (DRRS)
5541 *
5542 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5543 * which enables swtching between low and high refresh rates,
5544 * dynamically, based on the usage scenario. This feature is applicable
5545 * for internal panels.
5546 *
5547 * Indication that the panel supports DRRS is given by the panel EDID, which
5548 * would list multiple refresh rates for one resolution.
5549 *
5550 * DRRS is of 2 types - static and seamless.
5551 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5552 * (may appear as a blink on screen) and is used in dock-undock scenario.
5553 * Seamless DRRS involves changing RR without any visual effect to the user
5554 * and can be used during normal system usage. This is done by programming
5555 * certain registers.
5556 *
5557 * Support for static/seamless DRRS may be indicated in the VBT based on
5558 * inputs from the panel spec.
5559 *
5560 * DRRS saves power by switching to low RR based on usage scenarios.
5561 *
5562 * eDP DRRS:-
5563 * The implementation is based on frontbuffer tracking implementation.
5564 * When there is a disturbance on the screen triggered by user activity or a
5565 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5566 * When there is no movement on screen, after a timeout of 1 second, a switch
5567 * to low RR is made.
5568 * For integration with frontbuffer tracking code,
5569 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5570 *
5571 * DRRS can be further extended to support other internal panels and also
5572 * the scenario of video playback wherein RR is set based on the rate
5573 * requested by userspace.
5574 */
5575
5576/**
5577 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5578 * @intel_connector: eDP connector
5579 * @fixed_mode: preferred mode of panel
5580 *
5581 * This function is called only once at driver load to initialize basic
5582 * DRRS stuff.
5583 *
5584 * Returns:
5585 * Downclock mode if panel supports it, else return NULL.
5586 * DRRS support is determined by the presence of downclock mode (apart
5587 * from VBT setting).
5588 */
4f9db5b5 5589static struct drm_display_mode *
96178eeb
VK
5590intel_dp_drrs_init(struct intel_connector *intel_connector,
5591 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5592{
5593 struct drm_connector *connector = &intel_connector->base;
96178eeb 5594 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5595 struct drm_i915_private *dev_priv = dev->dev_private;
5596 struct drm_display_mode *downclock_mode = NULL;
5597
9da7d693
DV
5598 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5599 mutex_init(&dev_priv->drrs.mutex);
5600
4f9db5b5
PB
5601 if (INTEL_INFO(dev)->gen <= 6) {
5602 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5603 return NULL;
5604 }
5605
5606 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5607 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5608 return NULL;
5609 }
5610
5611 downclock_mode = intel_find_panel_downclock
5612 (dev, fixed_mode, connector);
5613
5614 if (!downclock_mode) {
a1d26342 5615 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5616 return NULL;
5617 }
5618
96178eeb 5619 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5620
96178eeb 5621 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5622 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5623 return downclock_mode;
5624}
5625
ed92f0b2 5626static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5627 struct intel_connector *intel_connector)
ed92f0b2
PZ
5628{
5629 struct drm_connector *connector = &intel_connector->base;
5630 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5631 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5632 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5633 struct drm_i915_private *dev_priv = dev->dev_private;
5634 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5635 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5636 bool has_dpcd;
5637 struct drm_display_mode *scan;
5638 struct edid *edid;
6517d273 5639 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5640
5641 if (!is_edp(intel_dp))
5642 return true;
5643
49e6bc51
VS
5644 pps_lock(intel_dp);
5645 intel_edp_panel_vdd_sanitize(intel_dp);
5646 pps_unlock(intel_dp);
63635217 5647
ed92f0b2 5648 /* Cache DPCD and EDID for edp. */
ed92f0b2 5649 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5650
5651 if (has_dpcd) {
5652 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5653 dev_priv->no_aux_handshake =
5654 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5655 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5656 } else {
5657 /* if this fails, presume the device is a ghost */
5658 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5659 return false;
5660 }
5661
5662 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5663 pps_lock(intel_dp);
36b5f425 5664 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5665 pps_unlock(intel_dp);
ed92f0b2 5666
060c8778 5667 mutex_lock(&dev->mode_config.mutex);
0b99836f 5668 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5669 if (edid) {
5670 if (drm_add_edid_modes(connector, edid)) {
5671 drm_mode_connector_update_edid_property(connector,
5672 edid);
5673 drm_edid_to_eld(connector, edid);
5674 } else {
5675 kfree(edid);
5676 edid = ERR_PTR(-EINVAL);
5677 }
5678 } else {
5679 edid = ERR_PTR(-ENOENT);
5680 }
5681 intel_connector->edid = edid;
5682
5683 /* prefer fixed mode from EDID if available */
5684 list_for_each_entry(scan, &connector->probed_modes, head) {
5685 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5686 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5687 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5688 intel_connector, fixed_mode);
ed92f0b2
PZ
5689 break;
5690 }
5691 }
5692
5693 /* fallback to VBT if available for eDP */
5694 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5695 fixed_mode = drm_mode_duplicate(dev,
5696 dev_priv->vbt.lfp_lvds_vbt_mode);
5697 if (fixed_mode)
5698 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5699 }
060c8778 5700 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5701
01527b31
CT
5702 if (IS_VALLEYVIEW(dev)) {
5703 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5704 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5705
5706 /*
5707 * Figure out the current pipe for the initial backlight setup.
5708 * If the current pipe isn't valid, try the PPS pipe, and if that
5709 * fails just assume pipe A.
5710 */
5711 if (IS_CHERRYVIEW(dev))
5712 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5713 else
5714 pipe = PORT_TO_PIPE(intel_dp->DP);
5715
5716 if (pipe != PIPE_A && pipe != PIPE_B)
5717 pipe = intel_dp->pps_pipe;
5718
5719 if (pipe != PIPE_A && pipe != PIPE_B)
5720 pipe = PIPE_A;
5721
5722 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5723 pipe_name(pipe));
01527b31
CT
5724 }
5725
4f9db5b5 5726 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5727 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5728 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5729
5730 return true;
5731}
5732
16c25533 5733bool
f0fec3f2
PZ
5734intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5735 struct intel_connector *intel_connector)
a4fc5ed6 5736{
f0fec3f2
PZ
5737 struct drm_connector *connector = &intel_connector->base;
5738 struct intel_dp *intel_dp = &intel_dig_port->dp;
5739 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5740 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5741 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5742 enum port port = intel_dig_port->port;
0b99836f 5743 int type;
a4fc5ed6 5744
a4a5d2f8
VS
5745 intel_dp->pps_pipe = INVALID_PIPE;
5746
ec5b01dd 5747 /* intel_dp vfuncs */
b6b5e383
DL
5748 if (INTEL_INFO(dev)->gen >= 9)
5749 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5750 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5751 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5752 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5753 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5754 else if (HAS_PCH_SPLIT(dev))
5755 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5756 else
5757 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5758
b9ca5fad
DL
5759 if (INTEL_INFO(dev)->gen >= 9)
5760 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5761 else
5762 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5763
0767935e
DV
5764 /* Preserve the current hw state. */
5765 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5766 intel_dp->attached_connector = intel_connector;
3d3dc149 5767
3b32a35b 5768 if (intel_dp_is_edp(dev, port))
b329530c 5769 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5770 else
5771 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5772
f7d24902
ID
5773 /*
5774 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5775 * for DP the encoder type can be set by the caller to
5776 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5777 */
5778 if (type == DRM_MODE_CONNECTOR_eDP)
5779 intel_encoder->type = INTEL_OUTPUT_EDP;
5780
c17ed5b5
VS
5781 /* eDP only on port B and/or C on vlv/chv */
5782 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5783 port != PORT_B && port != PORT_C))
5784 return false;
5785
e7281eab
ID
5786 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5787 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5788 port_name(port));
5789
b329530c 5790 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5791 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5792
a4fc5ed6
KP
5793 connector->interlace_allowed = true;
5794 connector->doublescan_allowed = 0;
5795
f0fec3f2 5796 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5797 edp_panel_vdd_work);
a4fc5ed6 5798
df0e9248 5799 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5800 drm_connector_register(connector);
a4fc5ed6 5801
affa9354 5802 if (HAS_DDI(dev))
bcbc889b
PZ
5803 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5804 else
5805 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5806 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5807
0b99836f 5808 /* Set up the hotplug pin. */
ab9d7c30
PZ
5809 switch (port) {
5810 case PORT_A:
1d843f9d 5811 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5812 break;
5813 case PORT_B:
1d843f9d 5814 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5815 break;
5816 case PORT_C:
1d843f9d 5817 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5818 break;
5819 case PORT_D:
1d843f9d 5820 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5821 break;
5822 default:
ad1c0b19 5823 BUG();
5eb08b69
ZW
5824 }
5825
dada1a9f 5826 if (is_edp(intel_dp)) {
773538e8 5827 pps_lock(intel_dp);
1e74a324
VS
5828 intel_dp_init_panel_power_timestamps(intel_dp);
5829 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5830 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5831 else
36b5f425 5832 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5833 pps_unlock(intel_dp);
dada1a9f 5834 }
0095e6dc 5835
9d1a1031 5836 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5837
0e32b39c 5838 /* init MST on ports that can support it */
0c9b3715
JN
5839 if (HAS_DP_MST(dev) &&
5840 (port == PORT_B || port == PORT_C || port == PORT_D))
5841 intel_dp_mst_encoder_init(intel_dig_port,
5842 intel_connector->base.base.id);
0e32b39c 5843
36b5f425 5844 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5845 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5846 if (is_edp(intel_dp)) {
5847 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5848 /*
5849 * vdd might still be enabled do to the delayed vdd off.
5850 * Make sure vdd is actually turned off here.
5851 */
773538e8 5852 pps_lock(intel_dp);
4be73780 5853 edp_panel_vdd_off_sync(intel_dp);
773538e8 5854 pps_unlock(intel_dp);
15b1d171 5855 }
34ea3d38 5856 drm_connector_unregister(connector);
b2f246a8 5857 drm_connector_cleanup(connector);
16c25533 5858 return false;
b2f246a8 5859 }
32f9d658 5860
f684960e
CW
5861 intel_dp_add_properties(intel_dp, connector);
5862
a4fc5ed6
KP
5863 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5864 * 0xd. Failure to do so will result in spurious interrupts being
5865 * generated on the port when a cable is not attached.
5866 */
5867 if (IS_G4X(dev) && !IS_GM45(dev)) {
5868 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5869 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5870 }
16c25533 5871
aa7471d2
JN
5872 i915_debugfs_connector_add(connector);
5873
16c25533 5874 return true;
a4fc5ed6 5875}
f0fec3f2
PZ
5876
5877void
5878intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5879{
13cf5504 5880 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5881 struct intel_digital_port *intel_dig_port;
5882 struct intel_encoder *intel_encoder;
5883 struct drm_encoder *encoder;
5884 struct intel_connector *intel_connector;
5885
b14c5679 5886 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5887 if (!intel_dig_port)
5888 return;
5889
08d9bc92 5890 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5891 if (!intel_connector) {
5892 kfree(intel_dig_port);
5893 return;
5894 }
5895
5896 intel_encoder = &intel_dig_port->base;
5897 encoder = &intel_encoder->base;
5898
5899 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5900 DRM_MODE_ENCODER_TMDS);
5901
5bfe2ac0 5902 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5903 intel_encoder->disable = intel_disable_dp;
00c09d70 5904 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5905 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5906 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5907 if (IS_CHERRYVIEW(dev)) {
9197c88b 5908 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5909 intel_encoder->pre_enable = chv_pre_enable_dp;
5910 intel_encoder->enable = vlv_enable_dp;
580d3811 5911 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5912 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5913 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5914 intel_encoder->pre_enable = vlv_pre_enable_dp;
5915 intel_encoder->enable = vlv_enable_dp;
49277c31 5916 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5917 } else {
ecff4f3b
JN
5918 intel_encoder->pre_enable = g4x_pre_enable_dp;
5919 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5920 if (INTEL_INFO(dev)->gen >= 5)
5921 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5922 }
f0fec3f2 5923
174edf1f 5924 intel_dig_port->port = port;
f0fec3f2
PZ
5925 intel_dig_port->dp.output_reg = output_reg;
5926
00c09d70 5927 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5928 if (IS_CHERRYVIEW(dev)) {
5929 if (port == PORT_D)
5930 intel_encoder->crtc_mask = 1 << 2;
5931 else
5932 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5933 } else {
5934 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5935 }
bc079e8b 5936 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5937 intel_encoder->hot_plug = intel_dp_hot_plug;
5938
13cf5504
DA
5939 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5940 dev_priv->hpd_irq_port[port] = intel_dig_port;
5941
15b1d171
PZ
5942 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5943 drm_encoder_cleanup(encoder);
5944 kfree(intel_dig_port);
b2f246a8 5945 kfree(intel_connector);
15b1d171 5946 }
f0fec3f2 5947}
0e32b39c
DA
5948
5949void intel_dp_mst_suspend(struct drm_device *dev)
5950{
5951 struct drm_i915_private *dev_priv = dev->dev_private;
5952 int i;
5953
5954 /* disable MST */
5955 for (i = 0; i < I915_MAX_PORTS; i++) {
5956 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5957 if (!intel_dig_port)
5958 continue;
5959
5960 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5961 if (!intel_dig_port->dp.can_mst)
5962 continue;
5963 if (intel_dig_port->dp.is_mst)
5964 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5965 }
5966 }
5967}
5968
5969void intel_dp_mst_resume(struct drm_device *dev)
5970{
5971 struct drm_i915_private *dev_priv = dev->dev_private;
5972 int i;
5973
5974 for (i = 0; i < I915_MAX_PORTS; i++) {
5975 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5976 if (!intel_dig_port)
5977 continue;
5978 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5979 int ret;
5980
5981 if (!intel_dig_port->dp.can_mst)
5982 continue;
5983
5984 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5985 if (ret != 0) {
5986 intel_dp_check_mst_status(&intel_dig_port->dp);
5987 }
5988 }
5989 }
5990}