]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: fix VBT parsing for SDVO child device mapping
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf
CML
50struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5
CML
69static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
ed4e9c1d
VS
133static int
134intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 135{
7183dc29 136 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
137
138 switch (max_link_bw) {
139 case DP_LINK_BW_1_62:
140 case DP_LINK_BW_2_7:
1db10e28 141 case DP_LINK_BW_5_4:
d4eead50 142 break;
a4fc5ed6 143 default:
d4eead50
ID
144 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
145 max_link_bw);
a4fc5ed6
KP
146 max_link_bw = DP_LINK_BW_1_62;
147 break;
148 }
149 return max_link_bw;
150}
151
eeb6324d
PZ
152static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
153{
154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
155 struct drm_device *dev = intel_dig_port->base.base.dev;
156 u8 source_max, sink_max;
157
158 source_max = 4;
159 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
160 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
161 source_max = 2;
162
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
cd9dde44
AJ
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
a4fc5ed6 185static int
c898261c 186intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 187{
cd9dde44 188 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
189}
190
fe27d53e
DA
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
c19de8eb 197static enum drm_mode_status
a4fc5ed6
KP
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
df0e9248 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 206
dd06f90e
JN
207 if (is_edp(intel_dp) && fixed_mode) {
208 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
209 return MODE_PANEL;
210
dd06f90e 211 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 212 return MODE_PANEL;
03afc4a2
DV
213
214 target_clock = fixed_mode->clock;
7de56f43
ZY
215 }
216
50fec21a 217 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 218 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
219
220 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221 mode_rate = intel_dp_link_required(target_clock, 18);
222
223 if (mode_rate > max_rate)
c4867936 224 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
225
226 if (mode->clock < 10000)
227 return MODE_CLOCK_LOW;
228
0af78a2b
DV
229 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 return MODE_H_ILLEGAL;
231
a4fc5ed6
KP
232 return MODE_OK;
233}
234
a4f1289e 235uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
236{
237 int i;
238 uint32_t v = 0;
239
240 if (src_bytes > 4)
241 src_bytes = 4;
242 for (i = 0; i < src_bytes; i++)
243 v |= ((uint32_t) src[i]) << ((3-i) * 8);
244 return v;
245}
246
c2af70e2 247static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
248{
249 int i;
250 if (dst_bytes > 4)
251 dst_bytes = 4;
252 for (i = 0; i < dst_bytes; i++)
253 dst[i] = src >> ((3-i) * 8);
254}
255
fb0f8fbf
KP
256/* hrawclock is 1/4 the FSB frequency */
257static int
258intel_hrawclk(struct drm_device *dev)
259{
260 struct drm_i915_private *dev_priv = dev->dev_private;
261 uint32_t clkcfg;
262
9473c8f4
VP
263 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
264 if (IS_VALLEYVIEW(dev))
265 return 200;
266
fb0f8fbf
KP
267 clkcfg = I915_READ(CLKCFG);
268 switch (clkcfg & CLKCFG_FSB_MASK) {
269 case CLKCFG_FSB_400:
270 return 100;
271 case CLKCFG_FSB_533:
272 return 133;
273 case CLKCFG_FSB_667:
274 return 166;
275 case CLKCFG_FSB_800:
276 return 200;
277 case CLKCFG_FSB_1067:
278 return 266;
279 case CLKCFG_FSB_1333:
280 return 333;
281 /* these two are just a guess; one of them might be right */
282 case CLKCFG_FSB_1600:
283 case CLKCFG_FSB_1600_ALT:
284 return 400;
285 default:
286 return 133;
287 }
288}
289
bf13e81b
JN
290static void
291intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 292 struct intel_dp *intel_dp);
bf13e81b
JN
293static void
294intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 295 struct intel_dp *intel_dp);
bf13e81b 296
773538e8
VS
297static void pps_lock(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct intel_encoder *encoder = &intel_dig_port->base;
301 struct drm_device *dev = encoder->base.dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 enum intel_display_power_domain power_domain;
304
305 /*
306 * See vlv_power_sequencer_reset() why we need
307 * a power domain reference here.
308 */
309 power_domain = intel_display_port_power_domain(encoder);
310 intel_display_power_get(dev_priv, power_domain);
311
312 mutex_lock(&dev_priv->pps_mutex);
313}
314
315static void pps_unlock(struct intel_dp *intel_dp)
316{
317 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
318 struct intel_encoder *encoder = &intel_dig_port->base;
319 struct drm_device *dev = encoder->base.dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321 enum intel_display_power_domain power_domain;
322
323 mutex_unlock(&dev_priv->pps_mutex);
324
325 power_domain = intel_display_port_power_domain(encoder);
326 intel_display_power_put(dev_priv, power_domain);
327}
328
961a0db0
VS
329static void
330vlv_power_sequencer_kick(struct intel_dp *intel_dp)
331{
332 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
333 struct drm_device *dev = intel_dig_port->base.base.dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 336 bool pll_enabled;
961a0db0
VS
337 uint32_t DP;
338
339 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
340 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
341 pipe_name(pipe), port_name(intel_dig_port->port)))
342 return;
343
344 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
345 pipe_name(pipe), port_name(intel_dig_port->port));
346
347 /* Preserve the BIOS-computed detected bit. This is
348 * supposed to be read-only.
349 */
350 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
351 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
352 DP |= DP_PORT_WIDTH(1);
353 DP |= DP_LINK_TRAIN_PAT_1;
354
355 if (IS_CHERRYVIEW(dev))
356 DP |= DP_PIPE_SELECT_CHV(pipe);
357 else if (pipe == PIPE_B)
358 DP |= DP_PIPEB_SELECT;
359
d288f65f
VS
360 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
361
362 /*
363 * The DPLL for the pipe must be enabled for this to work.
364 * So enable temporarily it if it's not already enabled.
365 */
366 if (!pll_enabled)
367 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
368 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
369
961a0db0
VS
370 /*
371 * Similar magic as in intel_dp_enable_port().
372 * We _must_ do this port enable + disable trick
373 * to make this power seqeuencer lock onto the port.
374 * Otherwise even VDD force bit won't work.
375 */
376 I915_WRITE(intel_dp->output_reg, DP);
377 POSTING_READ(intel_dp->output_reg);
378
379 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
384
385 if (!pll_enabled)
386 vlv_force_pll_off(dev, pipe);
961a0db0
VS
387}
388
bf13e81b
JN
389static enum pipe
390vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
391{
392 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
393 struct drm_device *dev = intel_dig_port->base.base.dev;
394 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
395 struct intel_encoder *encoder;
396 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 397 enum pipe pipe;
bf13e81b 398
e39b999a 399 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 400
a8c3344e
VS
401 /* We should never land here with regular DP ports */
402 WARN_ON(!is_edp(intel_dp));
403
a4a5d2f8
VS
404 if (intel_dp->pps_pipe != INVALID_PIPE)
405 return intel_dp->pps_pipe;
406
407 /*
408 * We don't have power sequencer currently.
409 * Pick one that's not used by other ports.
410 */
411 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
412 base.head) {
413 struct intel_dp *tmp;
414
415 if (encoder->type != INTEL_OUTPUT_EDP)
416 continue;
417
418 tmp = enc_to_intel_dp(&encoder->base);
419
420 if (tmp->pps_pipe != INVALID_PIPE)
421 pipes &= ~(1 << tmp->pps_pipe);
422 }
423
424 /*
425 * Didn't find one. This should not happen since there
426 * are two power sequencers and up to two eDP ports.
427 */
428 if (WARN_ON(pipes == 0))
a8c3344e
VS
429 pipe = PIPE_A;
430 else
431 pipe = ffs(pipes) - 1;
a4a5d2f8 432
a8c3344e
VS
433 vlv_steal_power_sequencer(dev, pipe);
434 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
435
436 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
437 pipe_name(intel_dp->pps_pipe),
438 port_name(intel_dig_port->port));
439
440 /* init power sequencer on this pipe and port */
36b5f425
VS
441 intel_dp_init_panel_power_sequencer(dev, intel_dp);
442 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 443
961a0db0
VS
444 /*
445 * Even vdd force doesn't work until we've made
446 * the power sequencer lock in on the port.
447 */
448 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
449
450 return intel_dp->pps_pipe;
451}
452
6491ab27
VS
453typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
454 enum pipe pipe);
455
456static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
457 enum pipe pipe)
458{
459 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
460}
461
462static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
463 enum pipe pipe)
464{
465 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
466}
467
468static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
469 enum pipe pipe)
470{
471 return true;
472}
bf13e81b 473
a4a5d2f8 474static enum pipe
6491ab27
VS
475vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
476 enum port port,
477 vlv_pipe_check pipe_check)
a4a5d2f8
VS
478{
479 enum pipe pipe;
bf13e81b 480
bf13e81b
JN
481 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
482 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
483 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
484
485 if (port_sel != PANEL_PORT_SELECT_VLV(port))
486 continue;
487
6491ab27
VS
488 if (!pipe_check(dev_priv, pipe))
489 continue;
490
a4a5d2f8 491 return pipe;
bf13e81b
JN
492 }
493
a4a5d2f8
VS
494 return INVALID_PIPE;
495}
496
497static void
498vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
499{
500 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
501 struct drm_device *dev = intel_dig_port->base.base.dev;
502 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
503 enum port port = intel_dig_port->port;
504
505 lockdep_assert_held(&dev_priv->pps_mutex);
506
507 /* try to find a pipe with this port selected */
6491ab27
VS
508 /* first pick one where the panel is on */
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_has_pp_on);
511 /* didn't find one? pick one where vdd is on */
512 if (intel_dp->pps_pipe == INVALID_PIPE)
513 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
514 vlv_pipe_has_vdd_on);
515 /* didn't find one? pick one with just the correct port */
516 if (intel_dp->pps_pipe == INVALID_PIPE)
517 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
518 vlv_pipe_any);
a4a5d2f8
VS
519
520 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
521 if (intel_dp->pps_pipe == INVALID_PIPE) {
522 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
523 port_name(port));
524 return;
bf13e81b
JN
525 }
526
a4a5d2f8
VS
527 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
528 port_name(port), pipe_name(intel_dp->pps_pipe));
529
36b5f425
VS
530 intel_dp_init_panel_power_sequencer(dev, intel_dp);
531 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
532}
533
773538e8
VS
534void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
535{
536 struct drm_device *dev = dev_priv->dev;
537 struct intel_encoder *encoder;
538
539 if (WARN_ON(!IS_VALLEYVIEW(dev)))
540 return;
541
542 /*
543 * We can't grab pps_mutex here due to deadlock with power_domain
544 * mutex when power_domain functions are called while holding pps_mutex.
545 * That also means that in order to use pps_pipe the code needs to
546 * hold both a power domain reference and pps_mutex, and the power domain
547 * reference get/put must be done while _not_ holding pps_mutex.
548 * pps_{lock,unlock}() do these steps in the correct order, so one
549 * should use them always.
550 */
551
552 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
553 struct intel_dp *intel_dp;
554
555 if (encoder->type != INTEL_OUTPUT_EDP)
556 continue;
557
558 intel_dp = enc_to_intel_dp(&encoder->base);
559 intel_dp->pps_pipe = INVALID_PIPE;
560 }
bf13e81b
JN
561}
562
563static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
564{
565 struct drm_device *dev = intel_dp_to_dev(intel_dp);
566
b0a08bec
VK
567 if (IS_BROXTON(dev))
568 return BXT_PP_CONTROL(0);
569 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
570 return PCH_PP_CONTROL;
571 else
572 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
573}
574
575static u32 _pp_stat_reg(struct intel_dp *intel_dp)
576{
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578
b0a08bec
VK
579 if (IS_BROXTON(dev))
580 return BXT_PP_STATUS(0);
581 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
582 return PCH_PP_STATUS;
583 else
584 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
585}
586
01527b31
CT
587/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
588 This function only applicable when panel PM state is not to be tracked */
589static int edp_notify_handler(struct notifier_block *this, unsigned long code,
590 void *unused)
591{
592 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
593 edp_notifier);
594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
595 struct drm_i915_private *dev_priv = dev->dev_private;
596 u32 pp_div;
597 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
598
599 if (!is_edp(intel_dp) || code != SYS_RESTART)
600 return 0;
601
773538e8 602 pps_lock(intel_dp);
e39b999a 603
01527b31 604 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
605 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
606
01527b31
CT
607 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
609 pp_div = I915_READ(pp_div_reg);
610 pp_div &= PP_REFERENCE_DIVIDER_MASK;
611
612 /* 0x1F write to PP_DIV_REG sets max cycle delay */
613 I915_WRITE(pp_div_reg, pp_div | 0x1F);
614 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
615 msleep(intel_dp->panel_power_cycle_delay);
616 }
617
773538e8 618 pps_unlock(intel_dp);
e39b999a 619
01527b31
CT
620 return 0;
621}
622
4be73780 623static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 624{
30add22d 625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
626 struct drm_i915_private *dev_priv = dev->dev_private;
627
e39b999a
VS
628 lockdep_assert_held(&dev_priv->pps_mutex);
629
9a42356b
VS
630 if (IS_VALLEYVIEW(dev) &&
631 intel_dp->pps_pipe == INVALID_PIPE)
632 return false;
633
bf13e81b 634 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
635}
636
4be73780 637static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 638{
30add22d 639 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
640 struct drm_i915_private *dev_priv = dev->dev_private;
641
e39b999a
VS
642 lockdep_assert_held(&dev_priv->pps_mutex);
643
9a42356b
VS
644 if (IS_VALLEYVIEW(dev) &&
645 intel_dp->pps_pipe == INVALID_PIPE)
646 return false;
647
773538e8 648 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
649}
650
9b984dae
KP
651static void
652intel_dp_check_edp(struct intel_dp *intel_dp)
653{
30add22d 654 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 655 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 656
9b984dae
KP
657 if (!is_edp(intel_dp))
658 return;
453c5420 659
4be73780 660 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
661 WARN(1, "eDP powered off while attempting aux channel communication.\n");
662 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
663 I915_READ(_pp_stat_reg(intel_dp)),
664 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
665 }
666}
667
9ee32fea
DV
668static uint32_t
669intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
670{
671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
672 struct drm_device *dev = intel_dig_port->base.base.dev;
673 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 674 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
675 uint32_t status;
676 bool done;
677
ef04f00d 678#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 679 if (has_aux_irq)
b18ac466 680 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 681 msecs_to_jiffies_timeout(10));
9ee32fea
DV
682 else
683 done = wait_for_atomic(C, 10) == 0;
684 if (!done)
685 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
686 has_aux_irq);
687#undef C
688
689 return status;
690}
691
ec5b01dd 692static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 693{
174edf1f
PZ
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 696
ec5b01dd
DL
697 /*
698 * The clock divider is based off the hrawclk, and would like to run at
699 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 700 */
ec5b01dd
DL
701 return index ? 0 : intel_hrawclk(dev) / 2;
702}
703
704static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705{
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 708 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
709
710 if (index)
711 return 0;
712
713 if (intel_dig_port->port == PORT_A) {
05024da3
VS
714 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
715
ec5b01dd
DL
716 } else {
717 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
718 }
719}
720
721static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
722{
723 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724 struct drm_device *dev = intel_dig_port->base.base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726
727 if (intel_dig_port->port == PORT_A) {
728 if (index)
729 return 0;
05024da3 730 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
731 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732 /* Workaround for non-ULT HSW */
bc86625a
CW
733 switch (index) {
734 case 0: return 63;
735 case 1: return 72;
736 default: return 0;
737 }
ec5b01dd 738 } else {
bc86625a 739 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 740 }
b84a1cf8
RV
741}
742
ec5b01dd
DL
743static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744{
745 return index ? 0 : 100;
746}
747
b6b5e383
DL
748static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
749{
750 /*
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
754 */
755 return index ? 0 : 1;
756}
757
5ed12a19
DL
758static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
759 bool has_aux_irq,
760 int send_bytes,
761 uint32_t aux_clock_divider)
762{
763 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764 struct drm_device *dev = intel_dig_port->base.base.dev;
765 uint32_t precharge, timeout;
766
767 if (IS_GEN6(dev))
768 precharge = 3;
769 else
770 precharge = 5;
771
772 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
774 else
775 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
776
777 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 778 DP_AUX_CH_CTL_DONE |
5ed12a19 779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 781 timeout |
788d4433 782 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 785 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
786}
787
b9ca5fad
DL
788static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 bool has_aux_irq,
790 int send_bytes,
791 uint32_t unused)
792{
793 return DP_AUX_CH_CTL_SEND_BUSY |
794 DP_AUX_CH_CTL_DONE |
795 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR |
797 DP_AUX_CH_CTL_TIME_OUT_1600us |
798 DP_AUX_CH_CTL_RECEIVE_ERROR |
799 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
801}
802
b84a1cf8
RV
803static int
804intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 805 const uint8_t *send, int send_bytes,
b84a1cf8
RV
806 uint8_t *recv, int recv_size)
807{
808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809 struct drm_device *dev = intel_dig_port->base.base.dev;
810 struct drm_i915_private *dev_priv = dev->dev_private;
811 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
812 uint32_t ch_data = ch_ctl + 4;
bc86625a 813 uint32_t aux_clock_divider;
b84a1cf8
RV
814 int i, ret, recv_bytes;
815 uint32_t status;
5ed12a19 816 int try, clock = 0;
4e6b788c 817 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
818 bool vdd;
819
773538e8 820 pps_lock(intel_dp);
e39b999a 821
72c3500a
VS
822 /*
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
826 * ourselves.
827 */
1e0560e0 828 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
829
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
832 * deep sleep states.
833 */
834 pm_qos_update_request(&dev_priv->pm_qos, 0);
835
836 intel_dp_check_edp(intel_dp);
5eb08b69 837
c67a470b
PZ
838 intel_aux_display_runtime_get(dev_priv);
839
11bee43e
JB
840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
ef04f00d 842 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
843 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
844 break;
845 msleep(1);
846 }
847
848 if (try == 3) {
02196c77
MK
849 static u32 last_status = -1;
850 const u32 status = I915_READ(ch_ctl);
851
852 if (status != last_status) {
853 WARN(1, "dp_aux_ch not started status 0x%08x\n",
854 status);
855 last_status = status;
856 }
857
9ee32fea
DV
858 ret = -EBUSY;
859 goto out;
4f7f7b7e
CW
860 }
861
46a5ae9f
PZ
862 /* Only 5 data registers! */
863 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
864 ret = -E2BIG;
865 goto out;
866 }
867
ec5b01dd 868 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
869 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
870 has_aux_irq,
871 send_bytes,
872 aux_clock_divider);
5ed12a19 873
bc86625a
CW
874 /* Must try at least 3 times according to DP spec */
875 for (try = 0; try < 5; try++) {
876 /* Load the send data into the aux channel data registers */
877 for (i = 0; i < send_bytes; i += 4)
878 I915_WRITE(ch_data + i,
a4f1289e
RV
879 intel_dp_pack_aux(send + i,
880 send_bytes - i));
bc86625a
CW
881
882 /* Send the command and wait for it to complete */
5ed12a19 883 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
884
885 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
886
887 /* Clear done status and any errors */
888 I915_WRITE(ch_ctl,
889 status |
890 DP_AUX_CH_CTL_DONE |
891 DP_AUX_CH_CTL_TIME_OUT_ERROR |
892 DP_AUX_CH_CTL_RECEIVE_ERROR);
893
74ebf294 894 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 895 continue;
74ebf294
TP
896
897 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
898 * 400us delay required for errors and timeouts
899 * Timeout errors from the HW already meet this
900 * requirement so skip to next iteration
901 */
902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903 usleep_range(400, 500);
bc86625a 904 continue;
74ebf294 905 }
bc86625a 906 if (status & DP_AUX_CH_CTL_DONE)
e058c945 907 goto done;
bc86625a 908 }
a4fc5ed6
KP
909 }
910
a4fc5ed6 911 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 912 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
913 ret = -EBUSY;
914 goto out;
a4fc5ed6
KP
915 }
916
e058c945 917done:
a4fc5ed6
KP
918 /* Check for timeout or receive error.
919 * Timeouts occur when the sink is not connected
920 */
a5b3da54 921 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 922 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
923 ret = -EIO;
924 goto out;
a5b3da54 925 }
1ae8c0a5
KP
926
927 /* Timeouts occur when the device isn't connected, so they're
928 * "normal" -- don't fill the kernel log with these */
a5b3da54 929 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 930 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
931 ret = -ETIMEDOUT;
932 goto out;
a4fc5ed6
KP
933 }
934
935 /* Unload any bytes sent back from the other side */
936 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
937 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
0206e353 940
4f7f7b7e 941 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
942 intel_dp_unpack_aux(I915_READ(ch_data + i),
943 recv + i, recv_bytes - i);
a4fc5ed6 944
9ee32fea
DV
945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 948 intel_aux_display_runtime_put(dev_priv);
9ee32fea 949
884f19e9
JN
950 if (vdd)
951 edp_panel_vdd_off(intel_dp, false);
952
773538e8 953 pps_unlock(intel_dp);
e39b999a 954
9ee32fea 955 return ret;
a4fc5ed6
KP
956}
957
a6c8aff0
JN
958#define BARE_ADDRESS_SIZE 3
959#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
960static ssize_t
961intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 962{
9d1a1031
JN
963 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
964 uint8_t txbuf[20], rxbuf[20];
965 size_t txsize, rxsize;
a4fc5ed6 966 int ret;
a4fc5ed6 967
d2d9cbbd
VS
968 txbuf[0] = (msg->request << 4) |
969 ((msg->address >> 16) & 0xf);
970 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
971 txbuf[2] = msg->address & 0xff;
972 txbuf[3] = msg->size - 1;
46a5ae9f 973
9d1a1031
JN
974 switch (msg->request & ~DP_AUX_I2C_MOT) {
975 case DP_AUX_NATIVE_WRITE:
976 case DP_AUX_I2C_WRITE:
a6c8aff0 977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 978 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 979
9d1a1031
JN
980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
a4fc5ed6 982
9d1a1031 983 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 984
9d1a1031
JN
985 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
986 if (ret > 0) {
987 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 988
a1ddefd8
JN
989 if (ret > 1) {
990 /* Number of bytes written in a short write. */
991 ret = clamp_t(int, rxbuf[1], 0, msg->size);
992 } else {
993 /* Return payload size. */
994 ret = msg->size;
995 }
9d1a1031
JN
996 }
997 break;
46a5ae9f 998
9d1a1031
JN
999 case DP_AUX_NATIVE_READ:
1000 case DP_AUX_I2C_READ:
a6c8aff0 1001 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1002 rxsize = msg->size + 1;
a4fc5ed6 1003
9d1a1031
JN
1004 if (WARN_ON(rxsize > 20))
1005 return -E2BIG;
a4fc5ed6 1006
9d1a1031
JN
1007 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1008 if (ret > 0) {
1009 msg->reply = rxbuf[0] >> 4;
1010 /*
1011 * Assume happy day, and copy the data. The caller is
1012 * expected to check msg->reply before touching it.
1013 *
1014 * Return payload size.
1015 */
1016 ret--;
1017 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1018 }
9d1a1031
JN
1019 break;
1020
1021 default:
1022 ret = -EINVAL;
1023 break;
a4fc5ed6 1024 }
f51a44b9 1025
9d1a1031 1026 return ret;
a4fc5ed6
KP
1027}
1028
9d1a1031
JN
1029static void
1030intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1031{
1032 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1033 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1034 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1035 enum port port = intel_dig_port->port;
500ea70d 1036 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1037 const char *name = NULL;
500ea70d 1038 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1039 int ret;
1040
500ea70d
RV
1041 /* On SKL we don't have Aux for port E so we rely on VBT to set
1042 * a proper alternate aux channel.
1043 */
1044 if (IS_SKYLAKE(dev) && port == PORT_E) {
1045 switch (info->alternate_aux_channel) {
1046 case DP_AUX_B:
1047 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1048 break;
1049 case DP_AUX_C:
1050 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1051 break;
1052 case DP_AUX_D:
1053 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1054 break;
1055 case DP_AUX_A:
1056 default:
1057 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1058 }
1059 }
1060
33ad6626
JN
1061 switch (port) {
1062 case PORT_A:
1063 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1064 name = "DPDDC-A";
ab2c0672 1065 break;
33ad6626
JN
1066 case PORT_B:
1067 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1068 name = "DPDDC-B";
ab2c0672 1069 break;
33ad6626
JN
1070 case PORT_C:
1071 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1072 name = "DPDDC-C";
ab2c0672 1073 break;
33ad6626
JN
1074 case PORT_D:
1075 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1076 name = "DPDDC-D";
33ad6626 1077 break;
500ea70d
RV
1078 case PORT_E:
1079 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1080 name = "DPDDC-E";
1081 break;
33ad6626
JN
1082 default:
1083 BUG();
ab2c0672
DA
1084 }
1085
1b1aad75
DL
1086 /*
1087 * The AUX_CTL register is usually DP_CTL + 0x10.
1088 *
1089 * On Haswell and Broadwell though:
1090 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1091 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1092 *
1093 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1094 */
500ea70d 1095 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1096 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1097
0b99836f 1098 intel_dp->aux.name = name;
9d1a1031
JN
1099 intel_dp->aux.dev = dev->dev;
1100 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1101
0b99836f
JN
1102 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1103 connector->base.kdev->kobj.name);
8316f337 1104
4f71d0cb 1105 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1106 if (ret < 0) {
4f71d0cb 1107 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1108 name, ret);
1109 return;
ab2c0672 1110 }
8a5e6aeb 1111
0b99836f
JN
1112 ret = sysfs_create_link(&connector->base.kdev->kobj,
1113 &intel_dp->aux.ddc.dev.kobj,
1114 intel_dp->aux.ddc.dev.kobj.name);
1115 if (ret < 0) {
1116 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1117 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1118 }
a4fc5ed6
KP
1119}
1120
80f65de3
ID
1121static void
1122intel_dp_connector_unregister(struct intel_connector *intel_connector)
1123{
1124 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1125
0e32b39c
DA
1126 if (!intel_connector->mst_port)
1127 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1128 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1129 intel_connector_unregister(intel_connector);
1130}
1131
5416d871 1132static void
c3346ef6 1133skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1134{
1135 u32 ctrl1;
1136
dd3cd74a
ACO
1137 memset(&pipe_config->dpll_hw_state, 0,
1138 sizeof(pipe_config->dpll_hw_state));
1139
5416d871
DL
1140 pipe_config->ddi_pll_sel = SKL_DPLL0;
1141 pipe_config->dpll_hw_state.cfgcr1 = 0;
1142 pipe_config->dpll_hw_state.cfgcr2 = 0;
1143
1144 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1145 switch (link_clock / 2) {
1146 case 81000:
71cd8423 1147 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1148 SKL_DPLL0);
1149 break;
c3346ef6 1150 case 135000:
71cd8423 1151 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1152 SKL_DPLL0);
1153 break;
c3346ef6 1154 case 270000:
71cd8423 1155 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1156 SKL_DPLL0);
1157 break;
c3346ef6 1158 case 162000:
71cd8423 1159 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1160 SKL_DPLL0);
1161 break;
1162 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1163 results in CDCLK change. Need to handle the change of CDCLK by
1164 disabling pipes and re-enabling them */
1165 case 108000:
71cd8423 1166 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1167 SKL_DPLL0);
1168 break;
1169 case 216000:
71cd8423 1170 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1171 SKL_DPLL0);
1172 break;
1173
5416d871
DL
1174 }
1175 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1176}
1177
0e50338c 1178static void
5cec258b 1179hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c 1180{
ee46f3c7
ACO
1181 memset(&pipe_config->dpll_hw_state, 0,
1182 sizeof(pipe_config->dpll_hw_state));
1183
0e50338c
DV
1184 switch (link_bw) {
1185 case DP_LINK_BW_1_62:
1186 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1187 break;
1188 case DP_LINK_BW_2_7:
1189 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1190 break;
1191 case DP_LINK_BW_5_4:
1192 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1193 break;
1194 }
1195}
1196
fc0f8e25 1197static int
12f6a2e2 1198intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1199{
94ca719e
VS
1200 if (intel_dp->num_sink_rates) {
1201 *sink_rates = intel_dp->sink_rates;
1202 return intel_dp->num_sink_rates;
fc0f8e25 1203 }
12f6a2e2
VS
1204
1205 *sink_rates = default_rates;
1206
1207 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1208}
1209
ed63baaf
TS
1210static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1211{
1212 /* WaDisableHBR2:skl */
1213 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1214 return false;
1215
1216 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1217 (INTEL_INFO(dev)->gen >= 9))
1218 return true;
1219 else
1220 return false;
1221}
1222
a8f3ef61 1223static int
1db10e28 1224intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1225{
64987fc5
SJ
1226 if (IS_BROXTON(dev)) {
1227 *source_rates = bxt_rates;
1228 return ARRAY_SIZE(bxt_rates);
1229 } else if (IS_SKYLAKE(dev)) {
637a9c63
SJ
1230 *source_rates = skl_rates;
1231 return ARRAY_SIZE(skl_rates);
a8f3ef61 1232 }
636280ba
VS
1233
1234 *source_rates = default_rates;
1235
ed63baaf
TS
1236 /* This depends on the fact that 5.4 is last value in the array */
1237 if (intel_dp_source_supports_hbr2(dev))
1db10e28
VS
1238 return (DP_LINK_BW_5_4 >> 3) + 1;
1239 else
1240 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1241}
1242
c6bb3538
DV
1243static void
1244intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1245 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1246{
1247 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1248 const struct dp_link_dpll *divisor = NULL;
1249 int i, count = 0;
c6bb3538
DV
1250
1251 if (IS_G4X(dev)) {
9dd4ffdf
CML
1252 divisor = gen4_dpll;
1253 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1254 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1255 divisor = pch_dpll;
1256 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1257 } else if (IS_CHERRYVIEW(dev)) {
1258 divisor = chv_dpll;
1259 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1260 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1261 divisor = vlv_dpll;
1262 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1263 }
9dd4ffdf
CML
1264
1265 if (divisor && count) {
1266 for (i = 0; i < count; i++) {
1267 if (link_bw == divisor[i].link_bw) {
1268 pipe_config->dpll = divisor[i].dpll;
1269 pipe_config->clock_set = true;
1270 break;
1271 }
1272 }
c6bb3538
DV
1273 }
1274}
1275
2ecae76a
VS
1276static int intersect_rates(const int *source_rates, int source_len,
1277 const int *sink_rates, int sink_len,
94ca719e 1278 int *common_rates)
a8f3ef61
SJ
1279{
1280 int i = 0, j = 0, k = 0;
1281
a8f3ef61
SJ
1282 while (i < source_len && j < sink_len) {
1283 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1284 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1285 return k;
94ca719e 1286 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1287 ++k;
1288 ++i;
1289 ++j;
1290 } else if (source_rates[i] < sink_rates[j]) {
1291 ++i;
1292 } else {
1293 ++j;
1294 }
1295 }
1296 return k;
1297}
1298
94ca719e
VS
1299static int intel_dp_common_rates(struct intel_dp *intel_dp,
1300 int *common_rates)
2ecae76a
VS
1301{
1302 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1303 const int *source_rates, *sink_rates;
1304 int source_len, sink_len;
1305
1306 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1307 source_len = intel_dp_source_rates(dev, &source_rates);
1308
1309 return intersect_rates(source_rates, source_len,
1310 sink_rates, sink_len,
94ca719e 1311 common_rates);
2ecae76a
VS
1312}
1313
0336400e
VS
1314static void snprintf_int_array(char *str, size_t len,
1315 const int *array, int nelem)
1316{
1317 int i;
1318
1319 str[0] = '\0';
1320
1321 for (i = 0; i < nelem; i++) {
b2f505be 1322 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1323 if (r >= len)
1324 return;
1325 str += r;
1326 len -= r;
1327 }
1328}
1329
1330static void intel_dp_print_rates(struct intel_dp *intel_dp)
1331{
1332 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1333 const int *source_rates, *sink_rates;
94ca719e
VS
1334 int source_len, sink_len, common_len;
1335 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1336 char str[128]; /* FIXME: too big for stack? */
1337
1338 if ((drm_debug & DRM_UT_KMS) == 0)
1339 return;
1340
1341 source_len = intel_dp_source_rates(dev, &source_rates);
1342 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1343 DRM_DEBUG_KMS("source rates: %s\n", str);
1344
1345 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1346 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1347 DRM_DEBUG_KMS("sink rates: %s\n", str);
1348
94ca719e
VS
1349 common_len = intel_dp_common_rates(intel_dp, common_rates);
1350 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1351 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1352}
1353
f4896f15 1354static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1355{
1356 int i = 0;
1357
1358 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1359 if (find == rates[i])
1360 break;
1361
1362 return i;
1363}
1364
50fec21a
VS
1365int
1366intel_dp_max_link_rate(struct intel_dp *intel_dp)
1367{
1368 int rates[DP_MAX_SUPPORTED_RATES] = {};
1369 int len;
1370
94ca719e 1371 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1372 if (WARN_ON(len <= 0))
1373 return 162000;
1374
1375 return rates[rate_to_index(0, rates) - 1];
1376}
1377
ed4e9c1d
VS
1378int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1379{
94ca719e 1380 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1381}
1382
00c09d70 1383bool
5bfe2ac0 1384intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1385 struct intel_crtc_state *pipe_config)
a4fc5ed6 1386{
5bfe2ac0 1387 struct drm_device *dev = encoder->base.dev;
36008365 1388 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1389 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1390 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1391 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1392 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1393 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1394 int lane_count, clock;
56071a20 1395 int min_lane_count = 1;
eeb6324d 1396 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1397 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1398 int min_clock = 0;
a8f3ef61 1399 int max_clock;
083f9560 1400 int bpp, mode_rate;
ff9a6750 1401 int link_avail, link_clock;
94ca719e
VS
1402 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1403 int common_len;
a8f3ef61 1404
94ca719e 1405 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1406
1407 /* No common link rates between source and sink */
94ca719e 1408 WARN_ON(common_len <= 0);
a8f3ef61 1409
94ca719e 1410 max_clock = common_len - 1;
a4fc5ed6 1411
bc7d38a4 1412 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1413 pipe_config->has_pch_encoder = true;
1414
03afc4a2 1415 pipe_config->has_dp_encoder = true;
f769cd24 1416 pipe_config->has_drrs = false;
9fcb1704 1417 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1418
dd06f90e
JN
1419 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1420 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1421 adjusted_mode);
a1b2278e
CK
1422
1423 if (INTEL_INFO(dev)->gen >= 9) {
1424 int ret;
e435d6e5 1425 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1426 if (ret)
1427 return ret;
1428 }
1429
2dd24552
JB
1430 if (!HAS_PCH_SPLIT(dev))
1431 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1432 intel_connector->panel.fitting_mode);
1433 else
b074cec8
JB
1434 intel_pch_panel_fitting(intel_crtc, pipe_config,
1435 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1436 }
1437
cb1793ce 1438 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1439 return false;
1440
083f9560 1441 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1442 "max bw %d pixel clock %iKHz\n",
94ca719e 1443 max_lane_count, common_rates[max_clock],
241bfc38 1444 adjusted_mode->crtc_clock);
083f9560 1445
36008365
DV
1446 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1447 * bpc in between. */
3e7ca985 1448 bpp = pipe_config->pipe_bpp;
56071a20 1449 if (is_edp(intel_dp)) {
22ce5628
TS
1450
1451 /* Get bpp from vbt only for panels that dont have bpp in edid */
1452 if (intel_connector->base.display_info.bpc == 0 &&
1453 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1454 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1455 dev_priv->vbt.edp_bpp);
1456 bpp = dev_priv->vbt.edp_bpp;
1457 }
1458
344c5bbc
JN
1459 /*
1460 * Use the maximum clock and number of lanes the eDP panel
1461 * advertizes being capable of. The panels are generally
1462 * designed to support only a single clock and lane
1463 * configuration, and typically these values correspond to the
1464 * native resolution of the panel.
1465 */
1466 min_lane_count = max_lane_count;
1467 min_clock = max_clock;
7984211e 1468 }
657445fe 1469
36008365 1470 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1471 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1472 bpp);
36008365 1473
c6930992 1474 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1475 for (lane_count = min_lane_count;
1476 lane_count <= max_lane_count;
1477 lane_count <<= 1) {
1478
94ca719e 1479 link_clock = common_rates[clock];
36008365
DV
1480 link_avail = intel_dp_max_data_rate(link_clock,
1481 lane_count);
1482
1483 if (mode_rate <= link_avail) {
1484 goto found;
1485 }
1486 }
1487 }
1488 }
c4867936 1489
36008365 1490 return false;
3685a8f3 1491
36008365 1492found:
55bc60db
VS
1493 if (intel_dp->color_range_auto) {
1494 /*
1495 * See:
1496 * CEA-861-E - 5.1 Default Encoding Parameters
1497 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1498 */
18316c8c 1499 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1500 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1501 else
1502 intel_dp->color_range = 0;
1503 }
1504
3685a8f3 1505 if (intel_dp->color_range)
50f3b016 1506 pipe_config->limited_color_range = true;
a4fc5ed6 1507
36008365 1508 intel_dp->lane_count = lane_count;
a8f3ef61 1509
94ca719e 1510 if (intel_dp->num_sink_rates) {
bc27b7d3 1511 intel_dp->link_bw = 0;
a8f3ef61 1512 intel_dp->rate_select =
94ca719e 1513 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1514 } else {
1515 intel_dp->link_bw =
94ca719e 1516 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1517 intel_dp->rate_select = 0;
a8f3ef61
SJ
1518 }
1519
657445fe 1520 pipe_config->pipe_bpp = bpp;
94ca719e 1521 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1522
36008365
DV
1523 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1524 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1525 pipe_config->port_clock, bpp);
36008365
DV
1526 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1527 mode_rate, link_avail);
a4fc5ed6 1528
03afc4a2 1529 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1530 adjusted_mode->crtc_clock,
1531 pipe_config->port_clock,
03afc4a2 1532 &pipe_config->dp_m_n);
9d1a455b 1533
439d7ac0 1534 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1535 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1536 pipe_config->has_drrs = true;
439d7ac0
PB
1537 intel_link_compute_m_n(bpp, lane_count,
1538 intel_connector->panel.downclock_mode->clock,
1539 pipe_config->port_clock,
1540 &pipe_config->dp_m2_n2);
1541 }
1542
5416d871 1543 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1544 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
977bb38d
S
1545 else if (IS_BROXTON(dev))
1546 /* handled in ddi */;
5416d871 1547 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1548 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1549 else
1550 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1551
03afc4a2 1552 return true;
a4fc5ed6
KP
1553}
1554
7c62a164 1555static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1556{
7c62a164
DV
1557 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1558 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1559 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1560 struct drm_i915_private *dev_priv = dev->dev_private;
1561 u32 dpa_ctl;
1562
6e3c9717
ACO
1563 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1564 crtc->config->port_clock);
ea9b6006
DV
1565 dpa_ctl = I915_READ(DP_A);
1566 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1567
6e3c9717 1568 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1569 /* For a long time we've carried around a ILK-DevA w/a for the
1570 * 160MHz clock. If we're really unlucky, it's still required.
1571 */
1572 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1573 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1574 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1575 } else {
1576 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1577 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1578 }
1ce17038 1579
ea9b6006
DV
1580 I915_WRITE(DP_A, dpa_ctl);
1581
1582 POSTING_READ(DP_A);
1583 udelay(500);
1584}
1585
8ac33ed3 1586static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1587{
b934223d 1588 struct drm_device *dev = encoder->base.dev;
417e822d 1589 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1590 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1591 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1592 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1593 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1594
417e822d 1595 /*
1a2eb460 1596 * There are four kinds of DP registers:
417e822d
KP
1597 *
1598 * IBX PCH
1a2eb460
KP
1599 * SNB CPU
1600 * IVB CPU
417e822d
KP
1601 * CPT PCH
1602 *
1603 * IBX PCH and CPU are the same for almost everything,
1604 * except that the CPU DP PLL is configured in this
1605 * register
1606 *
1607 * CPT PCH is quite different, having many bits moved
1608 * to the TRANS_DP_CTL register instead. That
1609 * configuration happens (oddly) in ironlake_pch_enable
1610 */
9c9e7927 1611
417e822d
KP
1612 /* Preserve the BIOS-computed detected bit. This is
1613 * supposed to be read-only.
1614 */
1615 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1616
417e822d 1617 /* Handle DP bits in common between all three register formats */
417e822d 1618 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1619 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1620
6e3c9717 1621 if (crtc->config->has_audio)
ea5b213a 1622 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1623
417e822d 1624 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1625
39e5fa88 1626 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1627 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1628 intel_dp->DP |= DP_SYNC_HS_HIGH;
1629 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1630 intel_dp->DP |= DP_SYNC_VS_HIGH;
1631 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1632
6aba5b6c 1633 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1634 intel_dp->DP |= DP_ENHANCED_FRAMING;
1635
7c62a164 1636 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1637 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1638 u32 trans_dp;
1639
39e5fa88 1640 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1641
1642 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1643 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1644 trans_dp |= TRANS_DP_ENH_FRAMING;
1645 else
1646 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1647 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1648 } else {
b2634017 1649 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1650 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1651
1652 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1653 intel_dp->DP |= DP_SYNC_HS_HIGH;
1654 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1655 intel_dp->DP |= DP_SYNC_VS_HIGH;
1656 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1657
6aba5b6c 1658 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1659 intel_dp->DP |= DP_ENHANCED_FRAMING;
1660
39e5fa88 1661 if (IS_CHERRYVIEW(dev))
44f37d1f 1662 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1663 else if (crtc->pipe == PIPE_B)
1664 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1665 }
a4fc5ed6
KP
1666}
1667
ffd6749d
PZ
1668#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1669#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1670
1a5ef5b7
PZ
1671#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1672#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1673
ffd6749d
PZ
1674#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1675#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1676
4be73780 1677static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1678 u32 mask,
1679 u32 value)
bd943159 1680{
30add22d 1681 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1682 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1683 u32 pp_stat_reg, pp_ctrl_reg;
1684
e39b999a
VS
1685 lockdep_assert_held(&dev_priv->pps_mutex);
1686
bf13e81b
JN
1687 pp_stat_reg = _pp_stat_reg(intel_dp);
1688 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1689
99ea7127 1690 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1691 mask, value,
1692 I915_READ(pp_stat_reg),
1693 I915_READ(pp_ctrl_reg));
32ce697c 1694
453c5420 1695 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1696 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1697 I915_READ(pp_stat_reg),
1698 I915_READ(pp_ctrl_reg));
32ce697c 1699 }
54c136d4
CW
1700
1701 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1702}
32ce697c 1703
4be73780 1704static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1705{
1706 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1707 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1708}
1709
4be73780 1710static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1711{
1712 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1713 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1714}
1715
4be73780 1716static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1717{
1718 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1719
1720 /* When we disable the VDD override bit last we have to do the manual
1721 * wait. */
1722 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1723 intel_dp->panel_power_cycle_delay);
1724
4be73780 1725 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1726}
1727
4be73780 1728static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1729{
1730 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1731 intel_dp->backlight_on_delay);
1732}
1733
4be73780 1734static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1735{
1736 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1737 intel_dp->backlight_off_delay);
1738}
99ea7127 1739
832dd3c1
KP
1740/* Read the current pp_control value, unlocking the register if it
1741 * is locked
1742 */
1743
453c5420 1744static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1745{
453c5420
JB
1746 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1747 struct drm_i915_private *dev_priv = dev->dev_private;
1748 u32 control;
832dd3c1 1749
e39b999a
VS
1750 lockdep_assert_held(&dev_priv->pps_mutex);
1751
bf13e81b 1752 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1753 if (!IS_BROXTON(dev)) {
1754 control &= ~PANEL_UNLOCK_MASK;
1755 control |= PANEL_UNLOCK_REGS;
1756 }
832dd3c1 1757 return control;
bd943159
KP
1758}
1759
951468f3
VS
1760/*
1761 * Must be paired with edp_panel_vdd_off().
1762 * Must hold pps_mutex around the whole on/off sequence.
1763 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1764 */
1e0560e0 1765static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1766{
30add22d 1767 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1768 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1769 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1770 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1771 enum intel_display_power_domain power_domain;
5d613501 1772 u32 pp;
453c5420 1773 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1774 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1775
e39b999a
VS
1776 lockdep_assert_held(&dev_priv->pps_mutex);
1777
97af61f5 1778 if (!is_edp(intel_dp))
adddaaf4 1779 return false;
bd943159 1780
2c623c11 1781 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1782 intel_dp->want_panel_vdd = true;
99ea7127 1783
4be73780 1784 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1785 return need_to_disable;
b0665d57 1786
4e6e1a54
ID
1787 power_domain = intel_display_port_power_domain(intel_encoder);
1788 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1789
3936fcf4
VS
1790 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1791 port_name(intel_dig_port->port));
bd943159 1792
4be73780
DV
1793 if (!edp_have_panel_power(intel_dp))
1794 wait_panel_power_cycle(intel_dp);
99ea7127 1795
453c5420 1796 pp = ironlake_get_pp_control(intel_dp);
5d613501 1797 pp |= EDP_FORCE_VDD;
ebf33b18 1798
bf13e81b
JN
1799 pp_stat_reg = _pp_stat_reg(intel_dp);
1800 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1801
1802 I915_WRITE(pp_ctrl_reg, pp);
1803 POSTING_READ(pp_ctrl_reg);
1804 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1805 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1806 /*
1807 * If the panel wasn't on, delay before accessing aux channel
1808 */
4be73780 1809 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1810 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1811 port_name(intel_dig_port->port));
f01eca2e 1812 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1813 }
adddaaf4
JN
1814
1815 return need_to_disable;
1816}
1817
951468f3
VS
1818/*
1819 * Must be paired with intel_edp_panel_vdd_off() or
1820 * intel_edp_panel_off().
1821 * Nested calls to these functions are not allowed since
1822 * we drop the lock. Caller must use some higher level
1823 * locking to prevent nested calls from other threads.
1824 */
b80d6c78 1825void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1826{
c695b6b6 1827 bool vdd;
adddaaf4 1828
c695b6b6
VS
1829 if (!is_edp(intel_dp))
1830 return;
1831
773538e8 1832 pps_lock(intel_dp);
c695b6b6 1833 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1834 pps_unlock(intel_dp);
c695b6b6 1835
e2c719b7 1836 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1837 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1838}
1839
4be73780 1840static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1841{
30add22d 1842 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1843 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1844 struct intel_digital_port *intel_dig_port =
1845 dp_to_dig_port(intel_dp);
1846 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1847 enum intel_display_power_domain power_domain;
5d613501 1848 u32 pp;
453c5420 1849 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1850
e39b999a 1851 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1852
15e899a0 1853 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1854
15e899a0 1855 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1856 return;
b0665d57 1857
3936fcf4
VS
1858 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1859 port_name(intel_dig_port->port));
bd943159 1860
be2c9196
VS
1861 pp = ironlake_get_pp_control(intel_dp);
1862 pp &= ~EDP_FORCE_VDD;
453c5420 1863
be2c9196
VS
1864 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1865 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1866
be2c9196
VS
1867 I915_WRITE(pp_ctrl_reg, pp);
1868 POSTING_READ(pp_ctrl_reg);
90791a5c 1869
be2c9196
VS
1870 /* Make sure sequencer is idle before allowing subsequent activity */
1871 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1872 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1873
be2c9196
VS
1874 if ((pp & POWER_TARGET_ON) == 0)
1875 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1876
be2c9196
VS
1877 power_domain = intel_display_port_power_domain(intel_encoder);
1878 intel_display_power_put(dev_priv, power_domain);
bd943159 1879}
5d613501 1880
4be73780 1881static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1882{
1883 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1884 struct intel_dp, panel_vdd_work);
bd943159 1885
773538e8 1886 pps_lock(intel_dp);
15e899a0
VS
1887 if (!intel_dp->want_panel_vdd)
1888 edp_panel_vdd_off_sync(intel_dp);
773538e8 1889 pps_unlock(intel_dp);
bd943159
KP
1890}
1891
aba86890
ID
1892static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1893{
1894 unsigned long delay;
1895
1896 /*
1897 * Queue the timer to fire a long time from now (relative to the power
1898 * down delay) to keep the panel power up across a sequence of
1899 * operations.
1900 */
1901 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1902 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1903}
1904
951468f3
VS
1905/*
1906 * Must be paired with edp_panel_vdd_on().
1907 * Must hold pps_mutex around the whole on/off sequence.
1908 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1909 */
4be73780 1910static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1911{
e39b999a
VS
1912 struct drm_i915_private *dev_priv =
1913 intel_dp_to_dev(intel_dp)->dev_private;
1914
1915 lockdep_assert_held(&dev_priv->pps_mutex);
1916
97af61f5
KP
1917 if (!is_edp(intel_dp))
1918 return;
5d613501 1919
e2c719b7 1920 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1921 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1922
bd943159
KP
1923 intel_dp->want_panel_vdd = false;
1924
aba86890 1925 if (sync)
4be73780 1926 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1927 else
1928 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1929}
1930
9f0fb5be 1931static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1932{
30add22d 1933 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1934 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1935 u32 pp;
453c5420 1936 u32 pp_ctrl_reg;
9934c132 1937
9f0fb5be
VS
1938 lockdep_assert_held(&dev_priv->pps_mutex);
1939
97af61f5 1940 if (!is_edp(intel_dp))
bd943159 1941 return;
99ea7127 1942
3936fcf4
VS
1943 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1944 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1945
e7a89ace
VS
1946 if (WARN(edp_have_panel_power(intel_dp),
1947 "eDP port %c panel power already on\n",
1948 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1949 return;
9934c132 1950
4be73780 1951 wait_panel_power_cycle(intel_dp);
37c6c9b0 1952
bf13e81b 1953 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1954 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1955 if (IS_GEN5(dev)) {
1956 /* ILK workaround: disable reset around power sequence */
1957 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1958 I915_WRITE(pp_ctrl_reg, pp);
1959 POSTING_READ(pp_ctrl_reg);
05ce1a49 1960 }
37c6c9b0 1961
1c0ae80a 1962 pp |= POWER_TARGET_ON;
99ea7127
KP
1963 if (!IS_GEN5(dev))
1964 pp |= PANEL_POWER_RESET;
1965
453c5420
JB
1966 I915_WRITE(pp_ctrl_reg, pp);
1967 POSTING_READ(pp_ctrl_reg);
9934c132 1968
4be73780 1969 wait_panel_on(intel_dp);
dce56b3c 1970 intel_dp->last_power_on = jiffies;
9934c132 1971
05ce1a49
KP
1972 if (IS_GEN5(dev)) {
1973 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1974 I915_WRITE(pp_ctrl_reg, pp);
1975 POSTING_READ(pp_ctrl_reg);
05ce1a49 1976 }
9f0fb5be 1977}
e39b999a 1978
9f0fb5be
VS
1979void intel_edp_panel_on(struct intel_dp *intel_dp)
1980{
1981 if (!is_edp(intel_dp))
1982 return;
1983
1984 pps_lock(intel_dp);
1985 edp_panel_on(intel_dp);
773538e8 1986 pps_unlock(intel_dp);
9934c132
JB
1987}
1988
9f0fb5be
VS
1989
1990static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1991{
4e6e1a54
ID
1992 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1993 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1994 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1995 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1996 enum intel_display_power_domain power_domain;
99ea7127 1997 u32 pp;
453c5420 1998 u32 pp_ctrl_reg;
9934c132 1999
9f0fb5be
VS
2000 lockdep_assert_held(&dev_priv->pps_mutex);
2001
97af61f5
KP
2002 if (!is_edp(intel_dp))
2003 return;
37c6c9b0 2004
3936fcf4
VS
2005 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2006 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2007
3936fcf4
VS
2008 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2009 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2010
453c5420 2011 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2012 /* We need to switch off panel power _and_ force vdd, for otherwise some
2013 * panels get very unhappy and cease to work. */
b3064154
PJ
2014 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2015 EDP_BLC_ENABLE);
453c5420 2016
bf13e81b 2017 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2018
849e39f5
PZ
2019 intel_dp->want_panel_vdd = false;
2020
453c5420
JB
2021 I915_WRITE(pp_ctrl_reg, pp);
2022 POSTING_READ(pp_ctrl_reg);
9934c132 2023
dce56b3c 2024 intel_dp->last_power_cycle = jiffies;
4be73780 2025 wait_panel_off(intel_dp);
849e39f5
PZ
2026
2027 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2028 power_domain = intel_display_port_power_domain(intel_encoder);
2029 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2030}
e39b999a 2031
9f0fb5be
VS
2032void intel_edp_panel_off(struct intel_dp *intel_dp)
2033{
2034 if (!is_edp(intel_dp))
2035 return;
e39b999a 2036
9f0fb5be
VS
2037 pps_lock(intel_dp);
2038 edp_panel_off(intel_dp);
773538e8 2039 pps_unlock(intel_dp);
9934c132
JB
2040}
2041
1250d107
JN
2042/* Enable backlight in the panel power control. */
2043static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2044{
da63a9f2
PZ
2045 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2046 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2047 struct drm_i915_private *dev_priv = dev->dev_private;
2048 u32 pp;
453c5420 2049 u32 pp_ctrl_reg;
32f9d658 2050
01cb9ea6
JB
2051 /*
2052 * If we enable the backlight right away following a panel power
2053 * on, we may see slight flicker as the panel syncs with the eDP
2054 * link. So delay a bit to make sure the image is solid before
2055 * allowing it to appear.
2056 */
4be73780 2057 wait_backlight_on(intel_dp);
e39b999a 2058
773538e8 2059 pps_lock(intel_dp);
e39b999a 2060
453c5420 2061 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2062 pp |= EDP_BLC_ENABLE;
453c5420 2063
bf13e81b 2064 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2065
2066 I915_WRITE(pp_ctrl_reg, pp);
2067 POSTING_READ(pp_ctrl_reg);
e39b999a 2068
773538e8 2069 pps_unlock(intel_dp);
32f9d658
ZW
2070}
2071
1250d107
JN
2072/* Enable backlight PWM and backlight PP control. */
2073void intel_edp_backlight_on(struct intel_dp *intel_dp)
2074{
2075 if (!is_edp(intel_dp))
2076 return;
2077
2078 DRM_DEBUG_KMS("\n");
2079
2080 intel_panel_enable_backlight(intel_dp->attached_connector);
2081 _intel_edp_backlight_on(intel_dp);
2082}
2083
2084/* Disable backlight in the panel power control. */
2085static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2086{
30add22d 2087 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2088 struct drm_i915_private *dev_priv = dev->dev_private;
2089 u32 pp;
453c5420 2090 u32 pp_ctrl_reg;
32f9d658 2091
f01eca2e
KP
2092 if (!is_edp(intel_dp))
2093 return;
2094
773538e8 2095 pps_lock(intel_dp);
e39b999a 2096
453c5420 2097 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2098 pp &= ~EDP_BLC_ENABLE;
453c5420 2099
bf13e81b 2100 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2101
2102 I915_WRITE(pp_ctrl_reg, pp);
2103 POSTING_READ(pp_ctrl_reg);
f7d2323c 2104
773538e8 2105 pps_unlock(intel_dp);
e39b999a
VS
2106
2107 intel_dp->last_backlight_off = jiffies;
f7d2323c 2108 edp_wait_backlight_off(intel_dp);
1250d107 2109}
f7d2323c 2110
1250d107
JN
2111/* Disable backlight PP control and backlight PWM. */
2112void intel_edp_backlight_off(struct intel_dp *intel_dp)
2113{
2114 if (!is_edp(intel_dp))
2115 return;
2116
2117 DRM_DEBUG_KMS("\n");
f7d2323c 2118
1250d107 2119 _intel_edp_backlight_off(intel_dp);
f7d2323c 2120 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2121}
a4fc5ed6 2122
73580fb7
JN
2123/*
2124 * Hook for controlling the panel power control backlight through the bl_power
2125 * sysfs attribute. Take care to handle multiple calls.
2126 */
2127static void intel_edp_backlight_power(struct intel_connector *connector,
2128 bool enable)
2129{
2130 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2131 bool is_enabled;
2132
773538e8 2133 pps_lock(intel_dp);
e39b999a 2134 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2135 pps_unlock(intel_dp);
73580fb7
JN
2136
2137 if (is_enabled == enable)
2138 return;
2139
23ba9373
JN
2140 DRM_DEBUG_KMS("panel power control backlight %s\n",
2141 enable ? "enable" : "disable");
73580fb7
JN
2142
2143 if (enable)
2144 _intel_edp_backlight_on(intel_dp);
2145 else
2146 _intel_edp_backlight_off(intel_dp);
2147}
2148
2bd2ad64 2149static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2150{
da63a9f2
PZ
2151 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2152 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2153 struct drm_device *dev = crtc->dev;
d240f20f
JB
2154 struct drm_i915_private *dev_priv = dev->dev_private;
2155 u32 dpa_ctl;
2156
2bd2ad64
DV
2157 assert_pipe_disabled(dev_priv,
2158 to_intel_crtc(crtc)->pipe);
2159
d240f20f
JB
2160 DRM_DEBUG_KMS("\n");
2161 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2162 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2163 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2164
2165 /* We don't adjust intel_dp->DP while tearing down the link, to
2166 * facilitate link retraining (e.g. after hotplug). Hence clear all
2167 * enable bits here to ensure that we don't enable too much. */
2168 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2169 intel_dp->DP |= DP_PLL_ENABLE;
2170 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2171 POSTING_READ(DP_A);
2172 udelay(200);
d240f20f
JB
2173}
2174
2bd2ad64 2175static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2176{
da63a9f2
PZ
2177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2178 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2179 struct drm_device *dev = crtc->dev;
d240f20f
JB
2180 struct drm_i915_private *dev_priv = dev->dev_private;
2181 u32 dpa_ctl;
2182
2bd2ad64
DV
2183 assert_pipe_disabled(dev_priv,
2184 to_intel_crtc(crtc)->pipe);
2185
d240f20f 2186 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2187 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2188 "dp pll off, should be on\n");
2189 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2190
2191 /* We can't rely on the value tracked for the DP register in
2192 * intel_dp->DP because link_down must not change that (otherwise link
2193 * re-training will fail. */
298b0b39 2194 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2195 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2196 POSTING_READ(DP_A);
d240f20f
JB
2197 udelay(200);
2198}
2199
c7ad3810 2200/* If the sink supports it, try to set the power state appropriately */
c19b0669 2201void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2202{
2203 int ret, i;
2204
2205 /* Should have a valid DPCD by this point */
2206 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2207 return;
2208
2209 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2210 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2211 DP_SET_POWER_D3);
c7ad3810
JB
2212 } else {
2213 /*
2214 * When turning on, we need to retry for 1ms to give the sink
2215 * time to wake up.
2216 */
2217 for (i = 0; i < 3; i++) {
9d1a1031
JN
2218 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2219 DP_SET_POWER_D0);
c7ad3810
JB
2220 if (ret == 1)
2221 break;
2222 msleep(1);
2223 }
2224 }
f9cac721
JN
2225
2226 if (ret != 1)
2227 DRM_DEBUG_KMS("failed to %s sink power state\n",
2228 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2229}
2230
19d8fe15
DV
2231static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2232 enum pipe *pipe)
d240f20f 2233{
19d8fe15 2234 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2235 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2236 struct drm_device *dev = encoder->base.dev;
2237 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2238 enum intel_display_power_domain power_domain;
2239 u32 tmp;
2240
2241 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2242 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2243 return false;
2244
2245 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2246
2247 if (!(tmp & DP_PORT_EN))
2248 return false;
2249
39e5fa88 2250 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2251 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2252 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2253 enum pipe p;
19d8fe15 2254
adc289d7
VS
2255 for_each_pipe(dev_priv, p) {
2256 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2257 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2258 *pipe = p;
19d8fe15
DV
2259 return true;
2260 }
2261 }
19d8fe15 2262
4a0833ec
DV
2263 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2264 intel_dp->output_reg);
39e5fa88
VS
2265 } else if (IS_CHERRYVIEW(dev)) {
2266 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2267 } else {
2268 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2269 }
d240f20f 2270
19d8fe15
DV
2271 return true;
2272}
d240f20f 2273
045ac3b5 2274static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2275 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2276{
2277 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2278 u32 tmp, flags = 0;
63000ef6
XZ
2279 struct drm_device *dev = encoder->base.dev;
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 enum port port = dp_to_dig_port(intel_dp)->port;
2282 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2283 int dotclock;
045ac3b5 2284
9ed109a7 2285 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2286
2287 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2288
39e5fa88
VS
2289 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2290 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2291 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2292 flags |= DRM_MODE_FLAG_PHSYNC;
2293 else
2294 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2295
39e5fa88 2296 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2297 flags |= DRM_MODE_FLAG_PVSYNC;
2298 else
2299 flags |= DRM_MODE_FLAG_NVSYNC;
2300 } else {
39e5fa88 2301 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2302 flags |= DRM_MODE_FLAG_PHSYNC;
2303 else
2304 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2305
39e5fa88 2306 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2307 flags |= DRM_MODE_FLAG_PVSYNC;
2308 else
2309 flags |= DRM_MODE_FLAG_NVSYNC;
2310 }
045ac3b5 2311
2d112de7 2312 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2313
8c875fca
VS
2314 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2315 tmp & DP_COLOR_RANGE_16_235)
2316 pipe_config->limited_color_range = true;
2317
eb14cb74
VS
2318 pipe_config->has_dp_encoder = true;
2319
2320 intel_dp_get_m_n(crtc, pipe_config);
2321
18442d08 2322 if (port == PORT_A) {
f1f644dc
JB
2323 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2324 pipe_config->port_clock = 162000;
2325 else
2326 pipe_config->port_clock = 270000;
2327 }
18442d08
VS
2328
2329 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2330 &pipe_config->dp_m_n);
2331
2332 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2333 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2334
2d112de7 2335 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2336
c6cd2ee2
JN
2337 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2338 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2339 /*
2340 * This is a big fat ugly hack.
2341 *
2342 * Some machines in UEFI boot mode provide us a VBT that has 18
2343 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2344 * unknown we fail to light up. Yet the same BIOS boots up with
2345 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2346 * max, not what it tells us to use.
2347 *
2348 * Note: This will still be broken if the eDP panel is not lit
2349 * up by the BIOS, and thus we can't get the mode at module
2350 * load.
2351 */
2352 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2353 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2354 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2355 }
045ac3b5
JB
2356}
2357
e8cb4558 2358static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2359{
e8cb4558 2360 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2361 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2362 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2363
6e3c9717 2364 if (crtc->config->has_audio)
495a5bb8 2365 intel_audio_codec_disable(encoder);
6cb49835 2366
b32c6f48
RV
2367 if (HAS_PSR(dev) && !HAS_DDI(dev))
2368 intel_psr_disable(intel_dp);
2369
6cb49835
DV
2370 /* Make sure the panel is off before trying to change the mode. But also
2371 * ensure that we have vdd while we switch off the panel. */
24f3e092 2372 intel_edp_panel_vdd_on(intel_dp);
4be73780 2373 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2374 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2375 intel_edp_panel_off(intel_dp);
3739850b 2376
08aff3fe
VS
2377 /* disable the port before the pipe on g4x */
2378 if (INTEL_INFO(dev)->gen < 5)
3739850b 2379 intel_dp_link_down(intel_dp);
d240f20f
JB
2380}
2381
08aff3fe 2382static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2383{
2bd2ad64 2384 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2385 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2386
49277c31 2387 intel_dp_link_down(intel_dp);
08aff3fe
VS
2388 if (port == PORT_A)
2389 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2390}
2391
2392static void vlv_post_disable_dp(struct intel_encoder *encoder)
2393{
2394 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2395
2396 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2397}
2398
580d3811
VS
2399static void chv_post_disable_dp(struct intel_encoder *encoder)
2400{
2401 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2402 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2403 struct drm_device *dev = encoder->base.dev;
2404 struct drm_i915_private *dev_priv = dev->dev_private;
2405 struct intel_crtc *intel_crtc =
2406 to_intel_crtc(encoder->base.crtc);
2407 enum dpio_channel ch = vlv_dport_to_channel(dport);
2408 enum pipe pipe = intel_crtc->pipe;
2409 u32 val;
2410
2411 intel_dp_link_down(intel_dp);
2412
a580516d 2413 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2414
2415 /* Propagate soft reset to data lane reset */
97fd4d5c 2416 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2417 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2418 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2419
97fd4d5c
VS
2420 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2421 val |= CHV_PCS_REQ_SOFTRESET_EN;
2422 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2423
2424 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2425 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2426 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2427
2428 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2429 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2430 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811 2431
a580516d 2432 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2433}
2434
7b13b58a
VS
2435static void
2436_intel_dp_set_link_train(struct intel_dp *intel_dp,
2437 uint32_t *DP,
2438 uint8_t dp_train_pat)
2439{
2440 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2441 struct drm_device *dev = intel_dig_port->base.base.dev;
2442 struct drm_i915_private *dev_priv = dev->dev_private;
2443 enum port port = intel_dig_port->port;
2444
2445 if (HAS_DDI(dev)) {
2446 uint32_t temp = I915_READ(DP_TP_CTL(port));
2447
2448 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2449 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2450 else
2451 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2452
2453 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2454 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2455 case DP_TRAINING_PATTERN_DISABLE:
2456 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2457
2458 break;
2459 case DP_TRAINING_PATTERN_1:
2460 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2461 break;
2462 case DP_TRAINING_PATTERN_2:
2463 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2464 break;
2465 case DP_TRAINING_PATTERN_3:
2466 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2467 break;
2468 }
2469 I915_WRITE(DP_TP_CTL(port), temp);
2470
39e5fa88
VS
2471 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2472 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2473 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2474
2475 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2476 case DP_TRAINING_PATTERN_DISABLE:
2477 *DP |= DP_LINK_TRAIN_OFF_CPT;
2478 break;
2479 case DP_TRAINING_PATTERN_1:
2480 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2481 break;
2482 case DP_TRAINING_PATTERN_2:
2483 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2484 break;
2485 case DP_TRAINING_PATTERN_3:
2486 DRM_ERROR("DP training pattern 3 not supported\n");
2487 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2488 break;
2489 }
2490
2491 } else {
2492 if (IS_CHERRYVIEW(dev))
2493 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2494 else
2495 *DP &= ~DP_LINK_TRAIN_MASK;
2496
2497 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2498 case DP_TRAINING_PATTERN_DISABLE:
2499 *DP |= DP_LINK_TRAIN_OFF;
2500 break;
2501 case DP_TRAINING_PATTERN_1:
2502 *DP |= DP_LINK_TRAIN_PAT_1;
2503 break;
2504 case DP_TRAINING_PATTERN_2:
2505 *DP |= DP_LINK_TRAIN_PAT_2;
2506 break;
2507 case DP_TRAINING_PATTERN_3:
2508 if (IS_CHERRYVIEW(dev)) {
2509 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2510 } else {
2511 DRM_ERROR("DP training pattern 3 not supported\n");
2512 *DP |= DP_LINK_TRAIN_PAT_2;
2513 }
2514 break;
2515 }
2516 }
2517}
2518
2519static void intel_dp_enable_port(struct intel_dp *intel_dp)
2520{
2521 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2522 struct drm_i915_private *dev_priv = dev->dev_private;
2523
7b13b58a
VS
2524 /* enable with pattern 1 (as per spec) */
2525 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2526 DP_TRAINING_PATTERN_1);
2527
2528 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2529 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2530
2531 /*
2532 * Magic for VLV/CHV. We _must_ first set up the register
2533 * without actually enabling the port, and then do another
2534 * write to enable the port. Otherwise link training will
2535 * fail when the power sequencer is freshly used for this port.
2536 */
2537 intel_dp->DP |= DP_PORT_EN;
2538
2539 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2540 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2541}
2542
e8cb4558 2543static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2544{
e8cb4558
DV
2545 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2546 struct drm_device *dev = encoder->base.dev;
2547 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2548 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2549 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2550 unsigned int lane_mask = 0x0;
5d613501 2551
0c33d8d7
DV
2552 if (WARN_ON(dp_reg & DP_PORT_EN))
2553 return;
5d613501 2554
093e3f13
VS
2555 pps_lock(intel_dp);
2556
2557 if (IS_VALLEYVIEW(dev))
2558 vlv_init_panel_power_sequencer(intel_dp);
2559
7b13b58a 2560 intel_dp_enable_port(intel_dp);
093e3f13
VS
2561
2562 edp_panel_vdd_on(intel_dp);
2563 edp_panel_on(intel_dp);
2564 edp_panel_vdd_off(intel_dp, true);
2565
2566 pps_unlock(intel_dp);
2567
61234fa5 2568 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2569 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2570 lane_mask);
61234fa5 2571
f01eca2e 2572 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2573 intel_dp_start_link_train(intel_dp);
33a34e4e 2574 intel_dp_complete_link_train(intel_dp);
3ab9c637 2575 intel_dp_stop_link_train(intel_dp);
c1dec79a 2576
6e3c9717 2577 if (crtc->config->has_audio) {
c1dec79a
JN
2578 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2579 pipe_name(crtc->pipe));
2580 intel_audio_codec_enable(encoder);
2581 }
ab1f90f9 2582}
89b667f8 2583
ecff4f3b
JN
2584static void g4x_enable_dp(struct intel_encoder *encoder)
2585{
828f5c6e
JN
2586 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2587
ecff4f3b 2588 intel_enable_dp(encoder);
4be73780 2589 intel_edp_backlight_on(intel_dp);
ab1f90f9 2590}
89b667f8 2591
ab1f90f9
JN
2592static void vlv_enable_dp(struct intel_encoder *encoder)
2593{
828f5c6e
JN
2594 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2595
4be73780 2596 intel_edp_backlight_on(intel_dp);
b32c6f48 2597 intel_psr_enable(intel_dp);
d240f20f
JB
2598}
2599
ecff4f3b 2600static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2601{
2602 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2603 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2604
8ac33ed3
DV
2605 intel_dp_prepare(encoder);
2606
d41f1efb
DV
2607 /* Only ilk+ has port A */
2608 if (dport->port == PORT_A) {
2609 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2610 ironlake_edp_pll_on(intel_dp);
d41f1efb 2611 }
ab1f90f9
JN
2612}
2613
83b84597
VS
2614static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2615{
2616 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2617 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2618 enum pipe pipe = intel_dp->pps_pipe;
2619 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2620
2621 edp_panel_vdd_off_sync(intel_dp);
2622
2623 /*
2624 * VLV seems to get confused when multiple power seqeuencers
2625 * have the same port selected (even if only one has power/vdd
2626 * enabled). The failure manifests as vlv_wait_port_ready() failing
2627 * CHV on the other hand doesn't seem to mind having the same port
2628 * selected in multiple power seqeuencers, but let's clear the
2629 * port select always when logically disconnecting a power sequencer
2630 * from a port.
2631 */
2632 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2633 pipe_name(pipe), port_name(intel_dig_port->port));
2634 I915_WRITE(pp_on_reg, 0);
2635 POSTING_READ(pp_on_reg);
2636
2637 intel_dp->pps_pipe = INVALID_PIPE;
2638}
2639
a4a5d2f8
VS
2640static void vlv_steal_power_sequencer(struct drm_device *dev,
2641 enum pipe pipe)
2642{
2643 struct drm_i915_private *dev_priv = dev->dev_private;
2644 struct intel_encoder *encoder;
2645
2646 lockdep_assert_held(&dev_priv->pps_mutex);
2647
ac3c12e4
VS
2648 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2649 return;
2650
a4a5d2f8
VS
2651 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2652 base.head) {
2653 struct intel_dp *intel_dp;
773538e8 2654 enum port port;
a4a5d2f8
VS
2655
2656 if (encoder->type != INTEL_OUTPUT_EDP)
2657 continue;
2658
2659 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2660 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2661
2662 if (intel_dp->pps_pipe != pipe)
2663 continue;
2664
2665 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2666 pipe_name(pipe), port_name(port));
a4a5d2f8 2667
e02f9a06 2668 WARN(encoder->base.crtc,
034e43c6
VS
2669 "stealing pipe %c power sequencer from active eDP port %c\n",
2670 pipe_name(pipe), port_name(port));
a4a5d2f8 2671
a4a5d2f8 2672 /* make sure vdd is off before we steal it */
83b84597 2673 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2674 }
2675}
2676
2677static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2678{
2679 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2680 struct intel_encoder *encoder = &intel_dig_port->base;
2681 struct drm_device *dev = encoder->base.dev;
2682 struct drm_i915_private *dev_priv = dev->dev_private;
2683 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2684
2685 lockdep_assert_held(&dev_priv->pps_mutex);
2686
093e3f13
VS
2687 if (!is_edp(intel_dp))
2688 return;
2689
a4a5d2f8
VS
2690 if (intel_dp->pps_pipe == crtc->pipe)
2691 return;
2692
2693 /*
2694 * If another power sequencer was being used on this
2695 * port previously make sure to turn off vdd there while
2696 * we still have control of it.
2697 */
2698 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2699 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2700
2701 /*
2702 * We may be stealing the power
2703 * sequencer from another port.
2704 */
2705 vlv_steal_power_sequencer(dev, crtc->pipe);
2706
2707 /* now it's all ours */
2708 intel_dp->pps_pipe = crtc->pipe;
2709
2710 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2711 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2712
2713 /* init power sequencer on this pipe and port */
36b5f425
VS
2714 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2715 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2716}
2717
ab1f90f9 2718static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2719{
2bd2ad64 2720 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2721 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2722 struct drm_device *dev = encoder->base.dev;
89b667f8 2723 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2724 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2725 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2726 int pipe = intel_crtc->pipe;
2727 u32 val;
a4fc5ed6 2728
a580516d 2729 mutex_lock(&dev_priv->sb_lock);
89b667f8 2730
ab3c759a 2731 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2732 val = 0;
2733 if (pipe)
2734 val |= (1<<21);
2735 else
2736 val &= ~(1<<21);
2737 val |= 0x001000c4;
ab3c759a
CML
2738 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2739 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2740 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2741
a580516d 2742 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2743
2744 intel_enable_dp(encoder);
89b667f8
JB
2745}
2746
ecff4f3b 2747static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2748{
2749 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2750 struct drm_device *dev = encoder->base.dev;
2751 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2752 struct intel_crtc *intel_crtc =
2753 to_intel_crtc(encoder->base.crtc);
e4607fcf 2754 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2755 int pipe = intel_crtc->pipe;
89b667f8 2756
8ac33ed3
DV
2757 intel_dp_prepare(encoder);
2758
89b667f8 2759 /* Program Tx lane resets to default */
a580516d 2760 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2761 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2762 DPIO_PCS_TX_LANE2_RESET |
2763 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2764 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2765 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2766 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2767 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2768 DPIO_PCS_CLK_SOFT_RESET);
2769
2770 /* Fix up inter-pair skew failure */
ab3c759a
CML
2771 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2772 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2773 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2774 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2775}
2776
e4a1d846
CML
2777static void chv_pre_enable_dp(struct intel_encoder *encoder)
2778{
2779 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2780 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2781 struct drm_device *dev = encoder->base.dev;
2782 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2783 struct intel_crtc *intel_crtc =
2784 to_intel_crtc(encoder->base.crtc);
2785 enum dpio_channel ch = vlv_dport_to_channel(dport);
2786 int pipe = intel_crtc->pipe;
2e523e98 2787 int data, i, stagger;
949c1d43 2788 u32 val;
e4a1d846 2789
a580516d 2790 mutex_lock(&dev_priv->sb_lock);
949c1d43 2791
570e2a74
VS
2792 /* allow hardware to manage TX FIFO reset source */
2793 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2794 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2795 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2796
2797 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2798 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2799 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2800
949c1d43 2801 /* Deassert soft data lane reset*/
97fd4d5c 2802 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2803 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2804 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2805
2806 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2807 val |= CHV_PCS_REQ_SOFTRESET_EN;
2808 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2809
2810 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2811 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2812 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2813
97fd4d5c 2814 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2815 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2816 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2817
2818 /* Program Tx lane latency optimal setting*/
e4a1d846 2819 for (i = 0; i < 4; i++) {
e4a1d846
CML
2820 /* Set the upar bit */
2821 data = (i == 1) ? 0x0 : 0x1;
2822 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2823 data << DPIO_UPAR_SHIFT);
2824 }
2825
2826 /* Data lane stagger programming */
2e523e98
VS
2827 if (intel_crtc->config->port_clock > 270000)
2828 stagger = 0x18;
2829 else if (intel_crtc->config->port_clock > 135000)
2830 stagger = 0xd;
2831 else if (intel_crtc->config->port_clock > 67500)
2832 stagger = 0x7;
2833 else if (intel_crtc->config->port_clock > 33750)
2834 stagger = 0x4;
2835 else
2836 stagger = 0x2;
2837
2838 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2839 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2840 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2841
2842 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2843 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2844 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2845
2846 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2847 DPIO_LANESTAGGER_STRAP(stagger) |
2848 DPIO_LANESTAGGER_STRAP_OVRD |
2849 DPIO_TX1_STAGGER_MASK(0x1f) |
2850 DPIO_TX1_STAGGER_MULT(6) |
2851 DPIO_TX2_STAGGER_MULT(0));
2852
2853 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2854 DPIO_LANESTAGGER_STRAP(stagger) |
2855 DPIO_LANESTAGGER_STRAP_OVRD |
2856 DPIO_TX1_STAGGER_MASK(0x1f) |
2857 DPIO_TX1_STAGGER_MULT(7) |
2858 DPIO_TX2_STAGGER_MULT(5));
e4a1d846 2859
a580516d 2860 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2861
e4a1d846 2862 intel_enable_dp(encoder);
e4a1d846
CML
2863}
2864
9197c88b
VS
2865static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2866{
2867 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2868 struct drm_device *dev = encoder->base.dev;
2869 struct drm_i915_private *dev_priv = dev->dev_private;
2870 struct intel_crtc *intel_crtc =
2871 to_intel_crtc(encoder->base.crtc);
2872 enum dpio_channel ch = vlv_dport_to_channel(dport);
2873 enum pipe pipe = intel_crtc->pipe;
2874 u32 val;
2875
625695f8
VS
2876 intel_dp_prepare(encoder);
2877
a580516d 2878 mutex_lock(&dev_priv->sb_lock);
9197c88b 2879
b9e5ac3c
VS
2880 /* program left/right clock distribution */
2881 if (pipe != PIPE_B) {
2882 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2883 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2884 if (ch == DPIO_CH0)
2885 val |= CHV_BUFLEFTENA1_FORCE;
2886 if (ch == DPIO_CH1)
2887 val |= CHV_BUFRIGHTENA1_FORCE;
2888 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2889 } else {
2890 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2891 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2892 if (ch == DPIO_CH0)
2893 val |= CHV_BUFLEFTENA2_FORCE;
2894 if (ch == DPIO_CH1)
2895 val |= CHV_BUFRIGHTENA2_FORCE;
2896 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2897 }
2898
9197c88b
VS
2899 /* program clock channel usage */
2900 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2901 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2902 if (pipe != PIPE_B)
2903 val &= ~CHV_PCS_USEDCLKCHANNEL;
2904 else
2905 val |= CHV_PCS_USEDCLKCHANNEL;
2906 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2907
2908 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2909 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2910 if (pipe != PIPE_B)
2911 val &= ~CHV_PCS_USEDCLKCHANNEL;
2912 else
2913 val |= CHV_PCS_USEDCLKCHANNEL;
2914 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2915
2916 /*
2917 * This a a bit weird since generally CL
2918 * matches the pipe, but here we need to
2919 * pick the CL based on the port.
2920 */
2921 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2922 if (pipe != PIPE_B)
2923 val &= ~CHV_CMN_USEDCLKCHANNEL;
2924 else
2925 val |= CHV_CMN_USEDCLKCHANNEL;
2926 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2927
a580516d 2928 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2929}
2930
a4fc5ed6 2931/*
df0c237d
JB
2932 * Native read with retry for link status and receiver capability reads for
2933 * cases where the sink may still be asleep.
9d1a1031
JN
2934 *
2935 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2936 * supposed to retry 3 times per the spec.
a4fc5ed6 2937 */
9d1a1031
JN
2938static ssize_t
2939intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2940 void *buffer, size_t size)
a4fc5ed6 2941{
9d1a1031
JN
2942 ssize_t ret;
2943 int i;
61da5fab 2944
f6a19066
VS
2945 /*
2946 * Sometime we just get the same incorrect byte repeated
2947 * over the entire buffer. Doing just one throw away read
2948 * initially seems to "solve" it.
2949 */
2950 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2951
61da5fab 2952 for (i = 0; i < 3; i++) {
9d1a1031
JN
2953 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2954 if (ret == size)
2955 return ret;
61da5fab
JB
2956 msleep(1);
2957 }
a4fc5ed6 2958
9d1a1031 2959 return ret;
a4fc5ed6
KP
2960}
2961
2962/*
2963 * Fetch AUX CH registers 0x202 - 0x207 which contain
2964 * link status information
2965 */
2966static bool
93f62dad 2967intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2968{
9d1a1031
JN
2969 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2970 DP_LANE0_1_STATUS,
2971 link_status,
2972 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2973}
2974
1100244e 2975/* These are source-specific values. */
a4fc5ed6 2976static uint8_t
1a2eb460 2977intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2978{
30add22d 2979 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2980 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2981 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2982
9314726b
VK
2983 if (IS_BROXTON(dev))
2984 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2985 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 2986 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 2987 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2988 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2989 } else if (IS_VALLEYVIEW(dev))
bd60018a 2990 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2991 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2992 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2993 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2994 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2995 else
bd60018a 2996 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2997}
2998
2999static uint8_t
3000intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3001{
30add22d 3002 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3003 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3004
5a9d1f1a
DL
3005 if (INTEL_INFO(dev)->gen >= 9) {
3006 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3008 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3009 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3010 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3012 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3013 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3014 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3015 default:
3016 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3017 }
3018 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3019 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3020 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3021 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3023 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3025 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3027 default:
bd60018a 3028 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3029 }
e2fa6fba
P
3030 } else if (IS_VALLEYVIEW(dev)) {
3031 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3033 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3034 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3035 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3036 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3037 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3039 default:
bd60018a 3040 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3041 }
bc7d38a4 3042 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3043 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3045 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3047 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3048 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3049 default:
bd60018a 3050 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3051 }
3052 } else {
3053 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3054 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3055 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3057 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3058 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3059 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3061 default:
bd60018a 3062 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3063 }
a4fc5ed6
KP
3064 }
3065}
3066
5829975c 3067static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3068{
3069 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3070 struct drm_i915_private *dev_priv = dev->dev_private;
3071 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3072 struct intel_crtc *intel_crtc =
3073 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3074 unsigned long demph_reg_value, preemph_reg_value,
3075 uniqtranscale_reg_value;
3076 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3077 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3078 int pipe = intel_crtc->pipe;
e2fa6fba
P
3079
3080 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3081 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3082 preemph_reg_value = 0x0004000;
3083 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3084 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3085 demph_reg_value = 0x2B405555;
3086 uniqtranscale_reg_value = 0x552AB83A;
3087 break;
bd60018a 3088 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3089 demph_reg_value = 0x2B404040;
3090 uniqtranscale_reg_value = 0x5548B83A;
3091 break;
bd60018a 3092 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3093 demph_reg_value = 0x2B245555;
3094 uniqtranscale_reg_value = 0x5560B83A;
3095 break;
bd60018a 3096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3097 demph_reg_value = 0x2B405555;
3098 uniqtranscale_reg_value = 0x5598DA3A;
3099 break;
3100 default:
3101 return 0;
3102 }
3103 break;
bd60018a 3104 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3105 preemph_reg_value = 0x0002000;
3106 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3108 demph_reg_value = 0x2B404040;
3109 uniqtranscale_reg_value = 0x5552B83A;
3110 break;
bd60018a 3111 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3112 demph_reg_value = 0x2B404848;
3113 uniqtranscale_reg_value = 0x5580B83A;
3114 break;
bd60018a 3115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3116 demph_reg_value = 0x2B404040;
3117 uniqtranscale_reg_value = 0x55ADDA3A;
3118 break;
3119 default:
3120 return 0;
3121 }
3122 break;
bd60018a 3123 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3124 preemph_reg_value = 0x0000000;
3125 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3127 demph_reg_value = 0x2B305555;
3128 uniqtranscale_reg_value = 0x5570B83A;
3129 break;
bd60018a 3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3131 demph_reg_value = 0x2B2B4040;
3132 uniqtranscale_reg_value = 0x55ADDA3A;
3133 break;
3134 default:
3135 return 0;
3136 }
3137 break;
bd60018a 3138 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3139 preemph_reg_value = 0x0006000;
3140 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3142 demph_reg_value = 0x1B405555;
3143 uniqtranscale_reg_value = 0x55ADDA3A;
3144 break;
3145 default:
3146 return 0;
3147 }
3148 break;
3149 default:
3150 return 0;
3151 }
3152
a580516d 3153 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3154 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3155 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3156 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3157 uniqtranscale_reg_value);
ab3c759a
CML
3158 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3159 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3160 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3161 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3162 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3163
3164 return 0;
3165}
3166
5829975c 3167static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3168{
3169 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3170 struct drm_i915_private *dev_priv = dev->dev_private;
3171 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3172 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3173 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3174 uint8_t train_set = intel_dp->train_set[0];
3175 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3176 enum pipe pipe = intel_crtc->pipe;
3177 int i;
e4a1d846
CML
3178
3179 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3180 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3181 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3182 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3183 deemph_reg_value = 128;
3184 margin_reg_value = 52;
3185 break;
bd60018a 3186 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3187 deemph_reg_value = 128;
3188 margin_reg_value = 77;
3189 break;
bd60018a 3190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3191 deemph_reg_value = 128;
3192 margin_reg_value = 102;
3193 break;
bd60018a 3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3195 deemph_reg_value = 128;
3196 margin_reg_value = 154;
3197 /* FIXME extra to set for 1200 */
3198 break;
3199 default:
3200 return 0;
3201 }
3202 break;
bd60018a 3203 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3204 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3205 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3206 deemph_reg_value = 85;
3207 margin_reg_value = 78;
3208 break;
bd60018a 3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3210 deemph_reg_value = 85;
3211 margin_reg_value = 116;
3212 break;
bd60018a 3213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3214 deemph_reg_value = 85;
3215 margin_reg_value = 154;
3216 break;
3217 default:
3218 return 0;
3219 }
3220 break;
bd60018a 3221 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3222 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3223 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3224 deemph_reg_value = 64;
3225 margin_reg_value = 104;
3226 break;
bd60018a 3227 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3228 deemph_reg_value = 64;
3229 margin_reg_value = 154;
3230 break;
3231 default:
3232 return 0;
3233 }
3234 break;
bd60018a 3235 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3236 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3238 deemph_reg_value = 43;
3239 margin_reg_value = 154;
3240 break;
3241 default:
3242 return 0;
3243 }
3244 break;
3245 default:
3246 return 0;
3247 }
3248
a580516d 3249 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3250
3251 /* Clear calc init */
1966e59e
VS
3252 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3253 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3254 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3255 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3256 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3257
3258 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3259 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3260 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3261 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3262 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3263
a02ef3c7
VS
3264 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3265 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3266 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3267 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3268
3269 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3270 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3271 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3272 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3273
e4a1d846 3274 /* Program swing deemph */
f72df8db
VS
3275 for (i = 0; i < 4; i++) {
3276 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3277 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3278 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3279 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3280 }
e4a1d846
CML
3281
3282 /* Program swing margin */
f72df8db
VS
3283 for (i = 0; i < 4; i++) {
3284 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3285 val &= ~DPIO_SWING_MARGIN000_MASK;
3286 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3287 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3288 }
e4a1d846
CML
3289
3290 /* Disable unique transition scale */
f72df8db
VS
3291 for (i = 0; i < 4; i++) {
3292 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3293 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3294 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3295 }
e4a1d846
CML
3296
3297 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3298 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3299 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3300 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3301
3302 /*
3303 * The document said it needs to set bit 27 for ch0 and bit 26
3304 * for ch1. Might be a typo in the doc.
3305 * For now, for this unique transition scale selection, set bit
3306 * 27 for ch0 and ch1.
3307 */
f72df8db
VS
3308 for (i = 0; i < 4; i++) {
3309 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3310 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3311 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3312 }
e4a1d846 3313
f72df8db
VS
3314 for (i = 0; i < 4; i++) {
3315 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3316 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3317 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3318 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3319 }
e4a1d846
CML
3320 }
3321
3322 /* Start swing calculation */
1966e59e
VS
3323 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3324 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3325 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3326
3327 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3328 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3329 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3330
3331 /* LRC Bypass */
3332 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3333 val |= DPIO_LRC_BYPASS;
3334 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3335
a580516d 3336 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3337
3338 return 0;
3339}
3340
a4fc5ed6 3341static void
0301b3ac
JN
3342intel_get_adjust_train(struct intel_dp *intel_dp,
3343 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3344{
3345 uint8_t v = 0;
3346 uint8_t p = 0;
3347 int lane;
1a2eb460
KP
3348 uint8_t voltage_max;
3349 uint8_t preemph_max;
a4fc5ed6 3350
33a34e4e 3351 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3352 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3353 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3354
3355 if (this_v > v)
3356 v = this_v;
3357 if (this_p > p)
3358 p = this_p;
3359 }
3360
1a2eb460 3361 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3362 if (v >= voltage_max)
3363 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3364
1a2eb460
KP
3365 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3366 if (p >= preemph_max)
3367 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3368
3369 for (lane = 0; lane < 4; lane++)
33a34e4e 3370 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3371}
3372
3373static uint32_t
5829975c 3374gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3375{
3cf2efb1 3376 uint32_t signal_levels = 0;
a4fc5ed6 3377
3cf2efb1 3378 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3380 default:
3381 signal_levels |= DP_VOLTAGE_0_4;
3382 break;
bd60018a 3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3384 signal_levels |= DP_VOLTAGE_0_6;
3385 break;
bd60018a 3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3387 signal_levels |= DP_VOLTAGE_0_8;
3388 break;
bd60018a 3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3390 signal_levels |= DP_VOLTAGE_1_2;
3391 break;
3392 }
3cf2efb1 3393 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3394 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3395 default:
3396 signal_levels |= DP_PRE_EMPHASIS_0;
3397 break;
bd60018a 3398 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3399 signal_levels |= DP_PRE_EMPHASIS_3_5;
3400 break;
bd60018a 3401 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3402 signal_levels |= DP_PRE_EMPHASIS_6;
3403 break;
bd60018a 3404 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3405 signal_levels |= DP_PRE_EMPHASIS_9_5;
3406 break;
3407 }
3408 return signal_levels;
3409}
3410
e3421a18
ZW
3411/* Gen6's DP voltage swing and pre-emphasis control */
3412static uint32_t
5829975c 3413gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3414{
3c5a62b5
YL
3415 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3416 DP_TRAIN_PRE_EMPHASIS_MASK);
3417 switch (signal_levels) {
bd60018a
SJ
3418 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3419 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3420 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3422 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3423 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3425 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3426 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3428 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3431 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3432 default:
3c5a62b5
YL
3433 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3434 "0x%x\n", signal_levels);
3435 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3436 }
3437}
3438
1a2eb460
KP
3439/* Gen7's DP voltage swing and pre-emphasis control */
3440static uint32_t
5829975c 3441gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3442{
3443 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3444 DP_TRAIN_PRE_EMPHASIS_MASK);
3445 switch (signal_levels) {
bd60018a 3446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3447 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3448 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3449 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3451 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3452
bd60018a 3453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3454 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3456 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3457
bd60018a 3458 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3459 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3461 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3462
3463 default:
3464 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3465 "0x%x\n", signal_levels);
3466 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3467 }
3468}
3469
f0a3424e
PZ
3470/* Properly updates "DP" with the correct signal levels. */
3471static void
3472intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3473{
3474 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3475 enum port port = intel_dig_port->port;
f0a3424e 3476 struct drm_device *dev = intel_dig_port->base.base.dev;
f8896f5d 3477 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3478 uint8_t train_set = intel_dp->train_set[0];
3479
f8896f5d
DW
3480 if (HAS_DDI(dev)) {
3481 signal_levels = ddi_signal_levels(intel_dp);
3482
3483 if (IS_BROXTON(dev))
3484 signal_levels = 0;
3485 else
3486 mask = DDI_BUF_EMP_MASK;
e4a1d846 3487 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3488 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3489 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3490 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3491 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3492 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3493 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3494 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3495 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3496 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3497 } else {
5829975c 3498 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3499 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3500 }
3501
96fb9f9b
VK
3502 if (mask)
3503 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3504
3505 DRM_DEBUG_KMS("Using vswing level %d\n",
3506 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3507 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3508 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3509 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3510
3511 *DP = (*DP & ~mask) | signal_levels;
3512}
3513
a4fc5ed6 3514static bool
ea5b213a 3515intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3516 uint32_t *DP,
58e10eb9 3517 uint8_t dp_train_pat)
a4fc5ed6 3518{
174edf1f
PZ
3519 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3520 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3521 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3522 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3523 int ret, len;
a4fc5ed6 3524
7b13b58a 3525 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3526
70aff66c 3527 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3528 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3529
2cdfe6c8
JN
3530 buf[0] = dp_train_pat;
3531 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3532 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3533 /* don't write DP_TRAINING_LANEx_SET on disable */
3534 len = 1;
3535 } else {
3536 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3537 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3538 len = intel_dp->lane_count + 1;
47ea7542 3539 }
a4fc5ed6 3540
9d1a1031
JN
3541 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3542 buf, len);
2cdfe6c8
JN
3543
3544 return ret == len;
a4fc5ed6
KP
3545}
3546
70aff66c
JN
3547static bool
3548intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3549 uint8_t dp_train_pat)
3550{
4e96c977
MK
3551 if (!intel_dp->train_set_valid)
3552 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3553 intel_dp_set_signal_levels(intel_dp, DP);
3554 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3555}
3556
3557static bool
3558intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3559 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3560{
3561 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3562 struct drm_device *dev = intel_dig_port->base.base.dev;
3563 struct drm_i915_private *dev_priv = dev->dev_private;
3564 int ret;
3565
3566 intel_get_adjust_train(intel_dp, link_status);
3567 intel_dp_set_signal_levels(intel_dp, DP);
3568
3569 I915_WRITE(intel_dp->output_reg, *DP);
3570 POSTING_READ(intel_dp->output_reg);
3571
9d1a1031
JN
3572 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3573 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3574
3575 return ret == intel_dp->lane_count;
3576}
3577
3ab9c637
ID
3578static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3579{
3580 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3581 struct drm_device *dev = intel_dig_port->base.base.dev;
3582 struct drm_i915_private *dev_priv = dev->dev_private;
3583 enum port port = intel_dig_port->port;
3584 uint32_t val;
3585
3586 if (!HAS_DDI(dev))
3587 return;
3588
3589 val = I915_READ(DP_TP_CTL(port));
3590 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3591 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3592 I915_WRITE(DP_TP_CTL(port), val);
3593
3594 /*
3595 * On PORT_A we can have only eDP in SST mode. There the only reason
3596 * we need to set idle transmission mode is to work around a HW issue
3597 * where we enable the pipe while not in idle link-training mode.
3598 * In this case there is requirement to wait for a minimum number of
3599 * idle patterns to be sent.
3600 */
3601 if (port == PORT_A)
3602 return;
3603
3604 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3605 1))
3606 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3607}
3608
33a34e4e 3609/* Enable corresponding port and start training pattern 1 */
c19b0669 3610void
33a34e4e 3611intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3612{
da63a9f2 3613 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3614 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3615 int i;
3616 uint8_t voltage;
cdb0e95b 3617 int voltage_tries, loop_tries;
ea5b213a 3618 uint32_t DP = intel_dp->DP;
6aba5b6c 3619 uint8_t link_config[2];
a4fc5ed6 3620
affa9354 3621 if (HAS_DDI(dev))
c19b0669
PZ
3622 intel_ddi_prepare_link_retrain(encoder);
3623
3cf2efb1 3624 /* Write the link configuration data */
6aba5b6c
JN
3625 link_config[0] = intel_dp->link_bw;
3626 link_config[1] = intel_dp->lane_count;
3627 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3628 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3629 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3630 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3631 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3632 &intel_dp->rate_select, 1);
6aba5b6c
JN
3633
3634 link_config[0] = 0;
3635 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3636 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3637
3638 DP |= DP_PORT_EN;
1a2eb460 3639
70aff66c
JN
3640 /* clock recovery */
3641 if (!intel_dp_reset_link_train(intel_dp, &DP,
3642 DP_TRAINING_PATTERN_1 |
3643 DP_LINK_SCRAMBLING_DISABLE)) {
3644 DRM_ERROR("failed to enable link training\n");
3645 return;
3646 }
3647
a4fc5ed6 3648 voltage = 0xff;
cdb0e95b
KP
3649 voltage_tries = 0;
3650 loop_tries = 0;
a4fc5ed6 3651 for (;;) {
70aff66c 3652 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3653
a7c9655f 3654 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3655 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3656 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3657 break;
93f62dad 3658 }
a4fc5ed6 3659
01916270 3660 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3661 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3662 break;
3663 }
3664
4e96c977
MK
3665 /*
3666 * if we used previously trained voltage and pre-emphasis values
3667 * and we don't get clock recovery, reset link training values
3668 */
3669 if (intel_dp->train_set_valid) {
3670 DRM_DEBUG_KMS("clock recovery not ok, reset");
3671 /* clear the flag as we are not reusing train set */
3672 intel_dp->train_set_valid = false;
3673 if (!intel_dp_reset_link_train(intel_dp, &DP,
3674 DP_TRAINING_PATTERN_1 |
3675 DP_LINK_SCRAMBLING_DISABLE)) {
3676 DRM_ERROR("failed to enable link training\n");
3677 return;
3678 }
3679 continue;
3680 }
3681
3cf2efb1
CW
3682 /* Check to see if we've tried the max voltage */
3683 for (i = 0; i < intel_dp->lane_count; i++)
3684 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3685 break;
3b4f819d 3686 if (i == intel_dp->lane_count) {
b06fbda3
DV
3687 ++loop_tries;
3688 if (loop_tries == 5) {
3def84b3 3689 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3690 break;
3691 }
70aff66c
JN
3692 intel_dp_reset_link_train(intel_dp, &DP,
3693 DP_TRAINING_PATTERN_1 |
3694 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3695 voltage_tries = 0;
3696 continue;
3697 }
a4fc5ed6 3698
3cf2efb1 3699 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3700 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3701 ++voltage_tries;
b06fbda3 3702 if (voltage_tries == 5) {
3def84b3 3703 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3704 break;
3705 }
3706 } else
3707 voltage_tries = 0;
3708 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3709
70aff66c
JN
3710 /* Update training set as requested by target */
3711 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3712 DRM_ERROR("failed to update link training\n");
3713 break;
3714 }
a4fc5ed6
KP
3715 }
3716
33a34e4e
JB
3717 intel_dp->DP = DP;
3718}
3719
c19b0669 3720void
33a34e4e
JB
3721intel_dp_complete_link_train(struct intel_dp *intel_dp)
3722{
33a34e4e 3723 bool channel_eq = false;
37f80975 3724 int tries, cr_tries;
33a34e4e 3725 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3726 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3727
3728 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3729 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3730 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3731
a4fc5ed6 3732 /* channel equalization */
70aff66c 3733 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3734 training_pattern |
70aff66c
JN
3735 DP_LINK_SCRAMBLING_DISABLE)) {
3736 DRM_ERROR("failed to start channel equalization\n");
3737 return;
3738 }
3739
a4fc5ed6 3740 tries = 0;
37f80975 3741 cr_tries = 0;
a4fc5ed6
KP
3742 channel_eq = false;
3743 for (;;) {
70aff66c 3744 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3745
37f80975
JB
3746 if (cr_tries > 5) {
3747 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3748 break;
3749 }
3750
a7c9655f 3751 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3752 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3753 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3754 break;
70aff66c 3755 }
a4fc5ed6 3756
37f80975 3757 /* Make sure clock is still ok */
01916270 3758 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
4e96c977 3759 intel_dp->train_set_valid = false;
37f80975 3760 intel_dp_start_link_train(intel_dp);
70aff66c 3761 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3762 training_pattern |
70aff66c 3763 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3764 cr_tries++;
3765 continue;
3766 }
3767
1ffdff13 3768 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3769 channel_eq = true;
3770 break;
3771 }
a4fc5ed6 3772
37f80975
JB
3773 /* Try 5 times, then try clock recovery if that fails */
3774 if (tries > 5) {
4e96c977 3775 intel_dp->train_set_valid = false;
37f80975 3776 intel_dp_start_link_train(intel_dp);
70aff66c 3777 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3778 training_pattern |
70aff66c 3779 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3780 tries = 0;
3781 cr_tries++;
3782 continue;
3783 }
a4fc5ed6 3784
70aff66c
JN
3785 /* Update training set as requested by target */
3786 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3787 DRM_ERROR("failed to update link training\n");
3788 break;
3789 }
3cf2efb1 3790 ++tries;
869184a6 3791 }
3cf2efb1 3792
3ab9c637
ID
3793 intel_dp_set_idle_link_train(intel_dp);
3794
3795 intel_dp->DP = DP;
3796
4e96c977 3797 if (channel_eq) {
5fa836a9 3798 intel_dp->train_set_valid = true;
07f42258 3799 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3800 }
3ab9c637
ID
3801}
3802
3803void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3804{
70aff66c 3805 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3806 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3807}
3808
3809static void
ea5b213a 3810intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3811{
da63a9f2 3812 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3813 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3814 enum port port = intel_dig_port->port;
da63a9f2 3815 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3816 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3817 uint32_t DP = intel_dp->DP;
a4fc5ed6 3818
bc76e320 3819 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3820 return;
3821
0c33d8d7 3822 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3823 return;
3824
28c97730 3825 DRM_DEBUG_KMS("\n");
32f9d658 3826
39e5fa88
VS
3827 if ((IS_GEN7(dev) && port == PORT_A) ||
3828 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3829 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3830 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3831 } else {
aad3d14d
VS
3832 if (IS_CHERRYVIEW(dev))
3833 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3834 else
3835 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3836 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3837 }
1612c8bd 3838 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3839 POSTING_READ(intel_dp->output_reg);
5eb08b69 3840
1612c8bd
VS
3841 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3842 I915_WRITE(intel_dp->output_reg, DP);
3843 POSTING_READ(intel_dp->output_reg);
3844
3845 /*
3846 * HW workaround for IBX, we need to move the port
3847 * to transcoder A after disabling it to allow the
3848 * matching HDMI port to be enabled on transcoder A.
3849 */
3850 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3851 /* always enable with pattern 1 (as per spec) */
3852 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3853 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3854 I915_WRITE(intel_dp->output_reg, DP);
3855 POSTING_READ(intel_dp->output_reg);
3856
3857 DP &= ~DP_PORT_EN;
5bddd17f 3858 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3859 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3860 }
3861
f01eca2e 3862 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3863}
3864
26d61aad
KP
3865static bool
3866intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3867{
a031d709
RV
3868 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3869 struct drm_device *dev = dig_port->base.base.dev;
3870 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3871 uint8_t rev;
a031d709 3872
9d1a1031
JN
3873 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3874 sizeof(intel_dp->dpcd)) < 0)
edb39244 3875 return false; /* aux transfer failed */
92fd8fd1 3876
a8e98153 3877 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3878
edb39244
AJ
3879 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3880 return false; /* DPCD not present */
3881
2293bb5c
SK
3882 /* Check if the panel supports PSR */
3883 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3884 if (is_edp(intel_dp)) {
9d1a1031
JN
3885 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3886 intel_dp->psr_dpcd,
3887 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3888 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3889 dev_priv->psr.sink_support = true;
50003939 3890 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3891 }
474d1ec4
SJ
3892
3893 if (INTEL_INFO(dev)->gen >= 9 &&
3894 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3895 uint8_t frame_sync_cap;
3896
3897 dev_priv->psr.sink_support = true;
3898 intel_dp_dpcd_read_wake(&intel_dp->aux,
3899 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3900 &frame_sync_cap, 1);
3901 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3902 /* PSR2 needs frame sync as well */
3903 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3904 DRM_DEBUG_KMS("PSR2 %s on sink",
3905 dev_priv->psr.psr2_support ? "supported" : "not supported");
3906 }
50003939
JN
3907 }
3908
ed63baaf
TS
3909 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3910 * have support for TP3 hence that check is used along with dpcd check
3911 * to ensure TP3 can be enabled.
3912 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3913 * supported but still not enabled.
3914 */
06ea66b6 3915 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611 3916 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
ed63baaf 3917 intel_dp_source_supports_hbr2(dev)) {
06ea66b6 3918 intel_dp->use_tps3 = true;
f8d8a672 3919 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3920 } else
3921 intel_dp->use_tps3 = false;
3922
fc0f8e25
SJ
3923 /* Intermediate frequency support */
3924 if (is_edp(intel_dp) &&
3925 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3926 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3927 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3928 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3929 int i;
3930
fc0f8e25
SJ
3931 intel_dp_dpcd_read_wake(&intel_dp->aux,
3932 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3933 sink_rates,
3934 sizeof(sink_rates));
ea2d8a42 3935
94ca719e
VS
3936 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3937 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3938
3939 if (val == 0)
3940 break;
3941
af77b974
SJ
3942 /* Value read is in kHz while drm clock is saved in deca-kHz */
3943 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3944 }
94ca719e 3945 intel_dp->num_sink_rates = i;
fc0f8e25 3946 }
0336400e
VS
3947
3948 intel_dp_print_rates(intel_dp);
3949
edb39244
AJ
3950 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3951 DP_DWN_STRM_PORT_PRESENT))
3952 return true; /* native DP sink */
3953
3954 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3955 return true; /* no per-port downstream info */
3956
9d1a1031
JN
3957 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3958 intel_dp->downstream_ports,
3959 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3960 return false; /* downstream port status fetch failed */
3961
3962 return true;
92fd8fd1
KP
3963}
3964
0d198328
AJ
3965static void
3966intel_dp_probe_oui(struct intel_dp *intel_dp)
3967{
3968 u8 buf[3];
3969
3970 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3971 return;
3972
9d1a1031 3973 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3974 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3975 buf[0], buf[1], buf[2]);
3976
9d1a1031 3977 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3978 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3979 buf[0], buf[1], buf[2]);
3980}
3981
0e32b39c
DA
3982static bool
3983intel_dp_probe_mst(struct intel_dp *intel_dp)
3984{
3985 u8 buf[1];
3986
3987 if (!intel_dp->can_mst)
3988 return false;
3989
3990 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3991 return false;
3992
0e32b39c
DA
3993 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3994 if (buf[0] & DP_MST_CAP) {
3995 DRM_DEBUG_KMS("Sink is MST capable\n");
3996 intel_dp->is_mst = true;
3997 } else {
3998 DRM_DEBUG_KMS("Sink is not MST capable\n");
3999 intel_dp->is_mst = false;
4000 }
4001 }
0e32b39c
DA
4002
4003 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4004 return intel_dp->is_mst;
4005}
4006
082dcc7c 4007static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 4008{
082dcc7c
RV
4009 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4010 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 4011 u8 buf;
d2e216d0 4012
082dcc7c
RV
4013 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4014 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4015 return;
4373f0f2
PZ
4016 }
4017
082dcc7c
RV
4018 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4019 buf & ~DP_TEST_SINK_START) < 0)
4020 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
d2e216d0 4021
082dcc7c
RV
4022 hsw_enable_ips(intel_crtc);
4023}
4024
4025static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4026{
4027 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4028 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4029 u8 buf;
4030
4031 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4032 return -EIO;
4033
4034 if (!(buf & DP_TEST_CRC_SUPPORTED))
4035 return -ENOTTY;
4036
4037 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4038 return -EIO;
4039
4040 hsw_disable_ips(intel_crtc);
1dda5f93 4041
9d1a1031 4042 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4043 buf | DP_TEST_SINK_START) < 0) {
4044 hsw_enable_ips(intel_crtc);
4045 return -EIO;
4373f0f2
PZ
4046 }
4047
082dcc7c
RV
4048 return 0;
4049}
4050
4051int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4052{
4053 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4054 struct drm_device *dev = dig_port->base.base.dev;
4055 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4056 u8 buf;
4057 int test_crc_count;
4058 int attempts = 6;
4059 int ret;
4060
4061 ret = intel_dp_sink_crc_start(intel_dp);
4062 if (ret)
4063 return ret;
4064
4373f0f2
PZ
4065 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4066 ret = -EIO;
afe0d67e 4067 goto stop;
4373f0f2 4068 }
d2e216d0 4069
ad9dc91b 4070 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 4071
ad9dc91b 4072 do {
1dda5f93 4073 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4074 DP_TEST_SINK_MISC, &buf) < 0) {
4075 ret = -EIO;
afe0d67e 4076 goto stop;
4373f0f2 4077 }
ad9dc91b
RV
4078 intel_wait_for_vblank(dev, intel_crtc->pipe);
4079 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4080
4081 if (attempts == 0) {
90bd1f46 4082 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4373f0f2 4083 ret = -ETIMEDOUT;
afe0d67e 4084 goto stop;
ad9dc91b 4085 }
d2e216d0 4086
082dcc7c 4087 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4373f0f2 4088 ret = -EIO;
afe0d67e 4089stop:
082dcc7c 4090 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4091 return ret;
d2e216d0
RV
4092}
4093
a60f0e38
JB
4094static bool
4095intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4096{
9d1a1031
JN
4097 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4098 DP_DEVICE_SERVICE_IRQ_VECTOR,
4099 sink_irq_vector, 1) == 1;
a60f0e38
JB
4100}
4101
0e32b39c
DA
4102static bool
4103intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4104{
4105 int ret;
4106
4107 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4108 DP_SINK_COUNT_ESI,
4109 sink_irq_vector, 14);
4110 if (ret != 14)
4111 return false;
4112
4113 return true;
4114}
4115
c5d5ab7a
TP
4116static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4117{
4118 uint8_t test_result = DP_TEST_ACK;
4119 return test_result;
4120}
4121
4122static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4123{
4124 uint8_t test_result = DP_TEST_NAK;
4125 return test_result;
4126}
4127
4128static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4129{
c5d5ab7a 4130 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4131 struct intel_connector *intel_connector = intel_dp->attached_connector;
4132 struct drm_connector *connector = &intel_connector->base;
4133
4134 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4135 connector->edid_corrupt ||
559be30c
TP
4136 intel_dp->aux.i2c_defer_count > 6) {
4137 /* Check EDID read for NACKs, DEFERs and corruption
4138 * (DP CTS 1.2 Core r1.1)
4139 * 4.2.2.4 : Failed EDID read, I2C_NAK
4140 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4141 * 4.2.2.6 : EDID corruption detected
4142 * Use failsafe mode for all cases
4143 */
4144 if (intel_dp->aux.i2c_nack_count > 0 ||
4145 intel_dp->aux.i2c_defer_count > 0)
4146 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4147 intel_dp->aux.i2c_nack_count,
4148 intel_dp->aux.i2c_defer_count);
4149 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4150 } else {
f79b468e
TS
4151 struct edid *block = intel_connector->detect_edid;
4152
4153 /* We have to write the checksum
4154 * of the last block read
4155 */
4156 block += intel_connector->detect_edid->extensions;
4157
559be30c
TP
4158 if (!drm_dp_dpcd_write(&intel_dp->aux,
4159 DP_TEST_EDID_CHECKSUM,
f79b468e 4160 &block->checksum,
5a1cc655 4161 1))
559be30c
TP
4162 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4163
4164 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4165 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4166 }
4167
4168 /* Set test active flag here so userspace doesn't interrupt things */
4169 intel_dp->compliance_test_active = 1;
4170
c5d5ab7a
TP
4171 return test_result;
4172}
4173
4174static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4175{
c5d5ab7a
TP
4176 uint8_t test_result = DP_TEST_NAK;
4177 return test_result;
4178}
4179
4180static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4181{
4182 uint8_t response = DP_TEST_NAK;
4183 uint8_t rxdata = 0;
4184 int status = 0;
4185
559be30c 4186 intel_dp->compliance_test_active = 0;
c5d5ab7a 4187 intel_dp->compliance_test_type = 0;
559be30c
TP
4188 intel_dp->compliance_test_data = 0;
4189
c5d5ab7a
TP
4190 intel_dp->aux.i2c_nack_count = 0;
4191 intel_dp->aux.i2c_defer_count = 0;
4192
4193 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4194 if (status <= 0) {
4195 DRM_DEBUG_KMS("Could not read test request from sink\n");
4196 goto update_status;
4197 }
4198
4199 switch (rxdata) {
4200 case DP_TEST_LINK_TRAINING:
4201 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4202 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4203 response = intel_dp_autotest_link_training(intel_dp);
4204 break;
4205 case DP_TEST_LINK_VIDEO_PATTERN:
4206 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4207 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4208 response = intel_dp_autotest_video_pattern(intel_dp);
4209 break;
4210 case DP_TEST_LINK_EDID_READ:
4211 DRM_DEBUG_KMS("EDID test requested\n");
4212 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4213 response = intel_dp_autotest_edid(intel_dp);
4214 break;
4215 case DP_TEST_LINK_PHY_TEST_PATTERN:
4216 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4217 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4218 response = intel_dp_autotest_phy_pattern(intel_dp);
4219 break;
4220 default:
4221 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4222 break;
4223 }
4224
4225update_status:
4226 status = drm_dp_dpcd_write(&intel_dp->aux,
4227 DP_TEST_RESPONSE,
4228 &response, 1);
4229 if (status <= 0)
4230 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4231}
4232
0e32b39c
DA
4233static int
4234intel_dp_check_mst_status(struct intel_dp *intel_dp)
4235{
4236 bool bret;
4237
4238 if (intel_dp->is_mst) {
4239 u8 esi[16] = { 0 };
4240 int ret = 0;
4241 int retry;
4242 bool handled;
4243 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4244go_again:
4245 if (bret == true) {
4246
4247 /* check link status - esi[10] = 0x200c */
4248 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4249 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4250 intel_dp_start_link_train(intel_dp);
4251 intel_dp_complete_link_train(intel_dp);
4252 intel_dp_stop_link_train(intel_dp);
4253 }
4254
6f34cc39 4255 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4256 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4257
4258 if (handled) {
4259 for (retry = 0; retry < 3; retry++) {
4260 int wret;
4261 wret = drm_dp_dpcd_write(&intel_dp->aux,
4262 DP_SINK_COUNT_ESI+1,
4263 &esi[1], 3);
4264 if (wret == 3) {
4265 break;
4266 }
4267 }
4268
4269 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4270 if (bret == true) {
6f34cc39 4271 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4272 goto go_again;
4273 }
4274 } else
4275 ret = 0;
4276
4277 return ret;
4278 } else {
4279 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4280 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4281 intel_dp->is_mst = false;
4282 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4283 /* send a hotplug event */
4284 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4285 }
4286 }
4287 return -EINVAL;
4288}
4289
a4fc5ed6
KP
4290/*
4291 * According to DP spec
4292 * 5.1.2:
4293 * 1. Read DPCD
4294 * 2. Configure link according to Receiver Capabilities
4295 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4296 * 4. Check link status on receipt of hot-plug interrupt
4297 */
a5146200 4298static void
ea5b213a 4299intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4300{
5b215bcf 4301 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4302 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4303 u8 sink_irq_vector;
93f62dad 4304 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4305
5b215bcf
DA
4306 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4307
e02f9a06 4308 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4309 return;
4310
1a125d8a
ID
4311 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4312 return;
4313
92fd8fd1 4314 /* Try to read receiver status if the link appears to be up */
93f62dad 4315 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4316 return;
4317 }
4318
92fd8fd1 4319 /* Now read the DPCD to see if it's actually running */
26d61aad 4320 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4321 return;
4322 }
4323
a60f0e38
JB
4324 /* Try to read the source of the interrupt */
4325 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4326 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4327 /* Clear interrupt source */
9d1a1031
JN
4328 drm_dp_dpcd_writeb(&intel_dp->aux,
4329 DP_DEVICE_SERVICE_IRQ_VECTOR,
4330 sink_irq_vector);
a60f0e38
JB
4331
4332 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4333 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4334 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4335 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4336 }
4337
1ffdff13 4338 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4339 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4340 intel_encoder->base.name);
33a34e4e
JB
4341 intel_dp_start_link_train(intel_dp);
4342 intel_dp_complete_link_train(intel_dp);
3ab9c637 4343 intel_dp_stop_link_train(intel_dp);
33a34e4e 4344 }
a4fc5ed6 4345}
a4fc5ed6 4346
caf9ab24 4347/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4348static enum drm_connector_status
26d61aad 4349intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4350{
caf9ab24 4351 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4352 uint8_t type;
4353
4354 if (!intel_dp_get_dpcd(intel_dp))
4355 return connector_status_disconnected;
4356
4357 /* if there's no downstream port, we're done */
4358 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4359 return connector_status_connected;
caf9ab24
AJ
4360
4361 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4362 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4363 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4364 uint8_t reg;
9d1a1031
JN
4365
4366 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4367 &reg, 1) < 0)
caf9ab24 4368 return connector_status_unknown;
9d1a1031 4369
23235177
AJ
4370 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4371 : connector_status_disconnected;
caf9ab24
AJ
4372 }
4373
4374 /* If no HPD, poke DDC gently */
0b99836f 4375 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4376 return connector_status_connected;
caf9ab24
AJ
4377
4378 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4379 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4380 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4381 if (type == DP_DS_PORT_TYPE_VGA ||
4382 type == DP_DS_PORT_TYPE_NON_EDID)
4383 return connector_status_unknown;
4384 } else {
4385 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4386 DP_DWN_STRM_PORT_TYPE_MASK;
4387 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4388 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4389 return connector_status_unknown;
4390 }
caf9ab24
AJ
4391
4392 /* Anything else is out of spec, warn and ignore */
4393 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4394 return connector_status_disconnected;
71ba9000
AJ
4395}
4396
d410b56d
CW
4397static enum drm_connector_status
4398edp_detect(struct intel_dp *intel_dp)
4399{
4400 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4401 enum drm_connector_status status;
4402
4403 status = intel_panel_detect(dev);
4404 if (status == connector_status_unknown)
4405 status = connector_status_connected;
4406
4407 return status;
4408}
4409
5eb08b69 4410static enum drm_connector_status
a9756bb5 4411ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4412{
30add22d 4413 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4414 struct drm_i915_private *dev_priv = dev->dev_private;
4415 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4416
1b469639
DL
4417 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4418 return connector_status_disconnected;
4419
26d61aad 4420 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4421}
4422
2a592bec
DA
4423static int g4x_digital_port_connected(struct drm_device *dev,
4424 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4425{
a4fc5ed6 4426 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4427 uint32_t bit;
5eb08b69 4428
232a6ee9
TP
4429 if (IS_VALLEYVIEW(dev)) {
4430 switch (intel_dig_port->port) {
4431 case PORT_B:
4432 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4433 break;
4434 case PORT_C:
4435 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4436 break;
4437 case PORT_D:
4438 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4439 break;
4440 default:
2a592bec 4441 return -EINVAL;
232a6ee9
TP
4442 }
4443 } else {
4444 switch (intel_dig_port->port) {
4445 case PORT_B:
4446 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4447 break;
4448 case PORT_C:
4449 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4450 break;
4451 case PORT_D:
4452 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4453 break;
4454 default:
2a592bec 4455 return -EINVAL;
232a6ee9 4456 }
a4fc5ed6
KP
4457 }
4458
10f76a38 4459 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4460 return 0;
4461 return 1;
4462}
4463
4464static enum drm_connector_status
4465g4x_dp_detect(struct intel_dp *intel_dp)
4466{
4467 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4468 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4469 int ret;
4470
4471 /* Can't disconnect eDP, but you can close the lid... */
4472 if (is_edp(intel_dp)) {
4473 enum drm_connector_status status;
4474
4475 status = intel_panel_detect(dev);
4476 if (status == connector_status_unknown)
4477 status = connector_status_connected;
4478 return status;
4479 }
4480
4481 ret = g4x_digital_port_connected(dev, intel_dig_port);
4482 if (ret == -EINVAL)
4483 return connector_status_unknown;
4484 else if (ret == 0)
a4fc5ed6
KP
4485 return connector_status_disconnected;
4486
26d61aad 4487 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4488}
4489
8c241fef 4490static struct edid *
beb60608 4491intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4492{
beb60608 4493 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4494
9cd300e0
JN
4495 /* use cached edid if we have one */
4496 if (intel_connector->edid) {
9cd300e0
JN
4497 /* invalid edid */
4498 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4499 return NULL;
4500
55e9edeb 4501 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4502 } else
4503 return drm_get_edid(&intel_connector->base,
4504 &intel_dp->aux.ddc);
4505}
8c241fef 4506
beb60608
CW
4507static void
4508intel_dp_set_edid(struct intel_dp *intel_dp)
4509{
4510 struct intel_connector *intel_connector = intel_dp->attached_connector;
4511 struct edid *edid;
8c241fef 4512
beb60608
CW
4513 edid = intel_dp_get_edid(intel_dp);
4514 intel_connector->detect_edid = edid;
4515
4516 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4517 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4518 else
4519 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4520}
4521
beb60608
CW
4522static void
4523intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4524{
beb60608 4525 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4526
beb60608
CW
4527 kfree(intel_connector->detect_edid);
4528 intel_connector->detect_edid = NULL;
9cd300e0 4529
beb60608
CW
4530 intel_dp->has_audio = false;
4531}
d6f24d0f 4532
beb60608
CW
4533static enum intel_display_power_domain
4534intel_dp_power_get(struct intel_dp *dp)
4535{
4536 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4537 enum intel_display_power_domain power_domain;
4538
4539 power_domain = intel_display_port_power_domain(encoder);
4540 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4541
4542 return power_domain;
4543}
d6f24d0f 4544
beb60608
CW
4545static void
4546intel_dp_power_put(struct intel_dp *dp,
4547 enum intel_display_power_domain power_domain)
4548{
4549 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4550 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4551}
4552
a9756bb5
ZW
4553static enum drm_connector_status
4554intel_dp_detect(struct drm_connector *connector, bool force)
4555{
4556 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4558 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4559 struct drm_device *dev = connector->dev;
a9756bb5 4560 enum drm_connector_status status;
671dedd2 4561 enum intel_display_power_domain power_domain;
0e32b39c 4562 bool ret;
09b1eb13 4563 u8 sink_irq_vector;
a9756bb5 4564
164c8598 4565 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4566 connector->base.id, connector->name);
beb60608 4567 intel_dp_unset_edid(intel_dp);
164c8598 4568
0e32b39c
DA
4569 if (intel_dp->is_mst) {
4570 /* MST devices are disconnected from a monitor POV */
4571 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4572 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4573 return connector_status_disconnected;
0e32b39c
DA
4574 }
4575
beb60608 4576 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4577
d410b56d
CW
4578 /* Can't disconnect eDP, but you can close the lid... */
4579 if (is_edp(intel_dp))
4580 status = edp_detect(intel_dp);
4581 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4582 status = ironlake_dp_detect(intel_dp);
4583 else
4584 status = g4x_dp_detect(intel_dp);
4585 if (status != connector_status_connected)
c8c8fb33 4586 goto out;
a9756bb5 4587
0d198328
AJ
4588 intel_dp_probe_oui(intel_dp);
4589
0e32b39c
DA
4590 ret = intel_dp_probe_mst(intel_dp);
4591 if (ret) {
4592 /* if we are in MST mode then this connector
4593 won't appear connected or have anything with EDID on it */
4594 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4595 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4596 status = connector_status_disconnected;
4597 goto out;
4598 }
4599
beb60608 4600 intel_dp_set_edid(intel_dp);
a9756bb5 4601
d63885da
PZ
4602 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4603 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4604 status = connector_status_connected;
4605
09b1eb13
TP
4606 /* Try to read the source of the interrupt */
4607 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4608 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4609 /* Clear interrupt source */
4610 drm_dp_dpcd_writeb(&intel_dp->aux,
4611 DP_DEVICE_SERVICE_IRQ_VECTOR,
4612 sink_irq_vector);
4613
4614 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4615 intel_dp_handle_test_request(intel_dp);
4616 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4617 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4618 }
4619
c8c8fb33 4620out:
beb60608 4621 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4622 return status;
a4fc5ed6
KP
4623}
4624
beb60608
CW
4625static void
4626intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4627{
df0e9248 4628 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4629 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4630 enum intel_display_power_domain power_domain;
a4fc5ed6 4631
beb60608
CW
4632 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4633 connector->base.id, connector->name);
4634 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4635
beb60608
CW
4636 if (connector->status != connector_status_connected)
4637 return;
671dedd2 4638
beb60608
CW
4639 power_domain = intel_dp_power_get(intel_dp);
4640
4641 intel_dp_set_edid(intel_dp);
4642
4643 intel_dp_power_put(intel_dp, power_domain);
4644
4645 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4646 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4647}
4648
4649static int intel_dp_get_modes(struct drm_connector *connector)
4650{
4651 struct intel_connector *intel_connector = to_intel_connector(connector);
4652 struct edid *edid;
4653
4654 edid = intel_connector->detect_edid;
4655 if (edid) {
4656 int ret = intel_connector_update_modes(connector, edid);
4657 if (ret)
4658 return ret;
4659 }
32f9d658 4660
f8779fda 4661 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4662 if (is_edp(intel_attached_dp(connector)) &&
4663 intel_connector->panel.fixed_mode) {
f8779fda 4664 struct drm_display_mode *mode;
beb60608
CW
4665
4666 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4667 intel_connector->panel.fixed_mode);
f8779fda 4668 if (mode) {
32f9d658
ZW
4669 drm_mode_probed_add(connector, mode);
4670 return 1;
4671 }
4672 }
beb60608 4673
32f9d658 4674 return 0;
a4fc5ed6
KP
4675}
4676
1aad7ac0
CW
4677static bool
4678intel_dp_detect_audio(struct drm_connector *connector)
4679{
1aad7ac0 4680 bool has_audio = false;
beb60608 4681 struct edid *edid;
1aad7ac0 4682
beb60608
CW
4683 edid = to_intel_connector(connector)->detect_edid;
4684 if (edid)
1aad7ac0 4685 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4686
1aad7ac0
CW
4687 return has_audio;
4688}
4689
f684960e
CW
4690static int
4691intel_dp_set_property(struct drm_connector *connector,
4692 struct drm_property *property,
4693 uint64_t val)
4694{
e953fd7b 4695 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4696 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4697 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4698 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4699 int ret;
4700
662595df 4701 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4702 if (ret)
4703 return ret;
4704
3f43c48d 4705 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4706 int i = val;
4707 bool has_audio;
4708
4709 if (i == intel_dp->force_audio)
f684960e
CW
4710 return 0;
4711
1aad7ac0 4712 intel_dp->force_audio = i;
f684960e 4713
c3e5f67b 4714 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4715 has_audio = intel_dp_detect_audio(connector);
4716 else
c3e5f67b 4717 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4718
4719 if (has_audio == intel_dp->has_audio)
f684960e
CW
4720 return 0;
4721
1aad7ac0 4722 intel_dp->has_audio = has_audio;
f684960e
CW
4723 goto done;
4724 }
4725
e953fd7b 4726 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4727 bool old_auto = intel_dp->color_range_auto;
4728 uint32_t old_range = intel_dp->color_range;
4729
55bc60db
VS
4730 switch (val) {
4731 case INTEL_BROADCAST_RGB_AUTO:
4732 intel_dp->color_range_auto = true;
4733 break;
4734 case INTEL_BROADCAST_RGB_FULL:
4735 intel_dp->color_range_auto = false;
4736 intel_dp->color_range = 0;
4737 break;
4738 case INTEL_BROADCAST_RGB_LIMITED:
4739 intel_dp->color_range_auto = false;
4740 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4741 break;
4742 default:
4743 return -EINVAL;
4744 }
ae4edb80
DV
4745
4746 if (old_auto == intel_dp->color_range_auto &&
4747 old_range == intel_dp->color_range)
4748 return 0;
4749
e953fd7b
CW
4750 goto done;
4751 }
4752
53b41837
YN
4753 if (is_edp(intel_dp) &&
4754 property == connector->dev->mode_config.scaling_mode_property) {
4755 if (val == DRM_MODE_SCALE_NONE) {
4756 DRM_DEBUG_KMS("no scaling not supported\n");
4757 return -EINVAL;
4758 }
4759
4760 if (intel_connector->panel.fitting_mode == val) {
4761 /* the eDP scaling property is not changed */
4762 return 0;
4763 }
4764 intel_connector->panel.fitting_mode = val;
4765
4766 goto done;
4767 }
4768
f684960e
CW
4769 return -EINVAL;
4770
4771done:
c0c36b94
CW
4772 if (intel_encoder->base.crtc)
4773 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4774
4775 return 0;
4776}
4777
a4fc5ed6 4778static void
73845adf 4779intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4780{
1d508706 4781 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4782
10e972d3 4783 kfree(intel_connector->detect_edid);
beb60608 4784
9cd300e0
JN
4785 if (!IS_ERR_OR_NULL(intel_connector->edid))
4786 kfree(intel_connector->edid);
4787
acd8db10
PZ
4788 /* Can't call is_edp() since the encoder may have been destroyed
4789 * already. */
4790 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4791 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4792
a4fc5ed6 4793 drm_connector_cleanup(connector);
55f78c43 4794 kfree(connector);
a4fc5ed6
KP
4795}
4796
00c09d70 4797void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4798{
da63a9f2
PZ
4799 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4800 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4801
4f71d0cb 4802 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4803 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4804 if (is_edp(intel_dp)) {
4805 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4806 /*
4807 * vdd might still be enabled do to the delayed vdd off.
4808 * Make sure vdd is actually turned off here.
4809 */
773538e8 4810 pps_lock(intel_dp);
4be73780 4811 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4812 pps_unlock(intel_dp);
4813
01527b31
CT
4814 if (intel_dp->edp_notifier.notifier_call) {
4815 unregister_reboot_notifier(&intel_dp->edp_notifier);
4816 intel_dp->edp_notifier.notifier_call = NULL;
4817 }
bd943159 4818 }
c8bd0e49 4819 drm_encoder_cleanup(encoder);
da63a9f2 4820 kfree(intel_dig_port);
24d05927
DV
4821}
4822
07f9cd0b
ID
4823static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4824{
4825 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4826
4827 if (!is_edp(intel_dp))
4828 return;
4829
951468f3
VS
4830 /*
4831 * vdd might still be enabled do to the delayed vdd off.
4832 * Make sure vdd is actually turned off here.
4833 */
afa4e53a 4834 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4835 pps_lock(intel_dp);
07f9cd0b 4836 edp_panel_vdd_off_sync(intel_dp);
773538e8 4837 pps_unlock(intel_dp);
07f9cd0b
ID
4838}
4839
49e6bc51
VS
4840static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4841{
4842 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4843 struct drm_device *dev = intel_dig_port->base.base.dev;
4844 struct drm_i915_private *dev_priv = dev->dev_private;
4845 enum intel_display_power_domain power_domain;
4846
4847 lockdep_assert_held(&dev_priv->pps_mutex);
4848
4849 if (!edp_have_panel_vdd(intel_dp))
4850 return;
4851
4852 /*
4853 * The VDD bit needs a power domain reference, so if the bit is
4854 * already enabled when we boot or resume, grab this reference and
4855 * schedule a vdd off, so we don't hold on to the reference
4856 * indefinitely.
4857 */
4858 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4859 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4860 intel_display_power_get(dev_priv, power_domain);
4861
4862 edp_panel_vdd_schedule_off(intel_dp);
4863}
4864
6d93c0c4
ID
4865static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4866{
49e6bc51
VS
4867 struct intel_dp *intel_dp;
4868
4869 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4870 return;
4871
4872 intel_dp = enc_to_intel_dp(encoder);
4873
4874 pps_lock(intel_dp);
4875
4876 /*
4877 * Read out the current power sequencer assignment,
4878 * in case the BIOS did something with it.
4879 */
4880 if (IS_VALLEYVIEW(encoder->dev))
4881 vlv_initial_power_sequencer_setup(intel_dp);
4882
4883 intel_edp_panel_vdd_sanitize(intel_dp);
4884
4885 pps_unlock(intel_dp);
6d93c0c4
ID
4886}
4887
a4fc5ed6 4888static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4889 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4890 .detect = intel_dp_detect,
beb60608 4891 .force = intel_dp_force,
a4fc5ed6 4892 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4893 .set_property = intel_dp_set_property,
2545e4a6 4894 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4895 .destroy = intel_dp_connector_destroy,
c6f95f27 4896 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4897 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4898};
4899
4900static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4901 .get_modes = intel_dp_get_modes,
4902 .mode_valid = intel_dp_mode_valid,
df0e9248 4903 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4904};
4905
a4fc5ed6 4906static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4907 .reset = intel_dp_encoder_reset,
24d05927 4908 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4909};
4910
b2c5c181 4911enum irqreturn
13cf5504
DA
4912intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4913{
4914 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4915 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4916 struct drm_device *dev = intel_dig_port->base.base.dev;
4917 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4918 enum intel_display_power_domain power_domain;
b2c5c181 4919 enum irqreturn ret = IRQ_NONE;
1c767b33 4920
0e32b39c
DA
4921 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4922 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4923
7a7f84cc
VS
4924 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4925 /*
4926 * vdd off can generate a long pulse on eDP which
4927 * would require vdd on to handle it, and thus we
4928 * would end up in an endless cycle of
4929 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4930 */
4931 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4932 port_name(intel_dig_port->port));
a8b3d52f 4933 return IRQ_HANDLED;
7a7f84cc
VS
4934 }
4935
26fbb774
VS
4936 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4937 port_name(intel_dig_port->port),
0e32b39c 4938 long_hpd ? "long" : "short");
13cf5504 4939
1c767b33
ID
4940 power_domain = intel_display_port_power_domain(intel_encoder);
4941 intel_display_power_get(dev_priv, power_domain);
4942
0e32b39c 4943 if (long_hpd) {
5fa836a9
MK
4944 /* indicate that we need to restart link training */
4945 intel_dp->train_set_valid = false;
2a592bec
DA
4946
4947 if (HAS_PCH_SPLIT(dev)) {
4948 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4949 goto mst_fail;
4950 } else {
4951 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4952 goto mst_fail;
4953 }
0e32b39c
DA
4954
4955 if (!intel_dp_get_dpcd(intel_dp)) {
4956 goto mst_fail;
4957 }
4958
4959 intel_dp_probe_oui(intel_dp);
4960
4961 if (!intel_dp_probe_mst(intel_dp))
4962 goto mst_fail;
4963
4964 } else {
4965 if (intel_dp->is_mst) {
1c767b33 4966 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4967 goto mst_fail;
4968 }
4969
4970 if (!intel_dp->is_mst) {
4971 /*
4972 * we'll check the link status via the normal hot plug path later -
4973 * but for short hpds we should check it now
4974 */
5b215bcf 4975 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4976 intel_dp_check_link_status(intel_dp);
5b215bcf 4977 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4978 }
4979 }
b2c5c181
DV
4980
4981 ret = IRQ_HANDLED;
4982
1c767b33 4983 goto put_power;
0e32b39c
DA
4984mst_fail:
4985 /* if we were in MST mode, and device is not there get out of MST mode */
4986 if (intel_dp->is_mst) {
4987 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4988 intel_dp->is_mst = false;
4989 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4990 }
1c767b33
ID
4991put_power:
4992 intel_display_power_put(dev_priv, power_domain);
4993
4994 return ret;
13cf5504
DA
4995}
4996
e3421a18
ZW
4997/* Return which DP Port should be selected for Transcoder DP control */
4998int
0206e353 4999intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5000{
5001 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5002 struct intel_encoder *intel_encoder;
5003 struct intel_dp *intel_dp;
e3421a18 5004
fa90ecef
PZ
5005 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5006 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5007
fa90ecef
PZ
5008 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5009 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5010 return intel_dp->output_reg;
e3421a18 5011 }
ea5b213a 5012
e3421a18
ZW
5013 return -1;
5014}
5015
36e83a18 5016/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5017bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5018{
5019 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5020 union child_device_config *p_child;
36e83a18 5021 int i;
5d8a7752
VS
5022 static const short port_mapping[] = {
5023 [PORT_B] = PORT_IDPB,
5024 [PORT_C] = PORT_IDPC,
5025 [PORT_D] = PORT_IDPD,
5026 };
36e83a18 5027
3b32a35b
VS
5028 if (port == PORT_A)
5029 return true;
5030
41aa3448 5031 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5032 return false;
5033
41aa3448
RV
5034 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5035 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5036
5d8a7752 5037 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5038 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5039 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5040 return true;
5041 }
5042 return false;
5043}
5044
0e32b39c 5045void
f684960e
CW
5046intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5047{
53b41837
YN
5048 struct intel_connector *intel_connector = to_intel_connector(connector);
5049
3f43c48d 5050 intel_attach_force_audio_property(connector);
e953fd7b 5051 intel_attach_broadcast_rgb_property(connector);
55bc60db 5052 intel_dp->color_range_auto = true;
53b41837
YN
5053
5054 if (is_edp(intel_dp)) {
5055 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5056 drm_object_attach_property(
5057 &connector->base,
53b41837 5058 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5059 DRM_MODE_SCALE_ASPECT);
5060 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5061 }
f684960e
CW
5062}
5063
dada1a9f
ID
5064static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5065{
5066 intel_dp->last_power_cycle = jiffies;
5067 intel_dp->last_power_on = jiffies;
5068 intel_dp->last_backlight_off = jiffies;
5069}
5070
67a54566
DV
5071static void
5072intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5073 struct intel_dp *intel_dp)
67a54566
DV
5074{
5075 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5076 struct edp_power_seq cur, vbt, spec,
5077 *final = &intel_dp->pps_delays;
b0a08bec
VK
5078 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5079 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5080
e39b999a
VS
5081 lockdep_assert_held(&dev_priv->pps_mutex);
5082
81ddbc69
VS
5083 /* already initialized? */
5084 if (final->t11_t12 != 0)
5085 return;
5086
b0a08bec
VK
5087 if (IS_BROXTON(dev)) {
5088 /*
5089 * TODO: BXT has 2 sets of PPS registers.
5090 * Correct Register for Broxton need to be identified
5091 * using VBT. hardcoding for now
5092 */
5093 pp_ctrl_reg = BXT_PP_CONTROL(0);
5094 pp_on_reg = BXT_PP_ON_DELAYS(0);
5095 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5096 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5097 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5098 pp_on_reg = PCH_PP_ON_DELAYS;
5099 pp_off_reg = PCH_PP_OFF_DELAYS;
5100 pp_div_reg = PCH_PP_DIVISOR;
5101 } else {
bf13e81b
JN
5102 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5103
5104 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5105 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5106 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5107 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5108 }
67a54566
DV
5109
5110 /* Workaround: Need to write PP_CONTROL with the unlock key as
5111 * the very first thing. */
b0a08bec 5112 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5113
453c5420
JB
5114 pp_on = I915_READ(pp_on_reg);
5115 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5116 if (!IS_BROXTON(dev)) {
5117 I915_WRITE(pp_ctrl_reg, pp_ctl);
5118 pp_div = I915_READ(pp_div_reg);
5119 }
67a54566
DV
5120
5121 /* Pull timing values out of registers */
5122 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5123 PANEL_POWER_UP_DELAY_SHIFT;
5124
5125 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5126 PANEL_LIGHT_ON_DELAY_SHIFT;
5127
5128 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5129 PANEL_LIGHT_OFF_DELAY_SHIFT;
5130
5131 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5132 PANEL_POWER_DOWN_DELAY_SHIFT;
5133
b0a08bec
VK
5134 if (IS_BROXTON(dev)) {
5135 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5136 BXT_POWER_CYCLE_DELAY_SHIFT;
5137 if (tmp > 0)
5138 cur.t11_t12 = (tmp - 1) * 1000;
5139 else
5140 cur.t11_t12 = 0;
5141 } else {
5142 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5143 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5144 }
67a54566
DV
5145
5146 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5147 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5148
41aa3448 5149 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5150
5151 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5152 * our hw here, which are all in 100usec. */
5153 spec.t1_t3 = 210 * 10;
5154 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5155 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5156 spec.t10 = 500 * 10;
5157 /* This one is special and actually in units of 100ms, but zero
5158 * based in the hw (so we need to add 100 ms). But the sw vbt
5159 * table multiplies it with 1000 to make it in units of 100usec,
5160 * too. */
5161 spec.t11_t12 = (510 + 100) * 10;
5162
5163 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5164 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5165
5166 /* Use the max of the register settings and vbt. If both are
5167 * unset, fall back to the spec limits. */
36b5f425 5168#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5169 spec.field : \
5170 max(cur.field, vbt.field))
5171 assign_final(t1_t3);
5172 assign_final(t8);
5173 assign_final(t9);
5174 assign_final(t10);
5175 assign_final(t11_t12);
5176#undef assign_final
5177
36b5f425 5178#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5179 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5180 intel_dp->backlight_on_delay = get_delay(t8);
5181 intel_dp->backlight_off_delay = get_delay(t9);
5182 intel_dp->panel_power_down_delay = get_delay(t10);
5183 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5184#undef get_delay
5185
f30d26e4
JN
5186 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5187 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5188 intel_dp->panel_power_cycle_delay);
5189
5190 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5191 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5192}
5193
5194static void
5195intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5196 struct intel_dp *intel_dp)
f30d26e4
JN
5197{
5198 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5199 u32 pp_on, pp_off, pp_div, port_sel = 0;
5200 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5201 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5202 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5203 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5204
e39b999a 5205 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5206
b0a08bec
VK
5207 if (IS_BROXTON(dev)) {
5208 /*
5209 * TODO: BXT has 2 sets of PPS registers.
5210 * Correct Register for Broxton need to be identified
5211 * using VBT. hardcoding for now
5212 */
5213 pp_ctrl_reg = BXT_PP_CONTROL(0);
5214 pp_on_reg = BXT_PP_ON_DELAYS(0);
5215 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5216
5217 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5218 pp_on_reg = PCH_PP_ON_DELAYS;
5219 pp_off_reg = PCH_PP_OFF_DELAYS;
5220 pp_div_reg = PCH_PP_DIVISOR;
5221 } else {
bf13e81b
JN
5222 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5223
5224 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5225 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5226 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5227 }
5228
b2f19d1a
PZ
5229 /*
5230 * And finally store the new values in the power sequencer. The
5231 * backlight delays are set to 1 because we do manual waits on them. For
5232 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5233 * we'll end up waiting for the backlight off delay twice: once when we
5234 * do the manual sleep, and once when we disable the panel and wait for
5235 * the PP_STATUS bit to become zero.
5236 */
f30d26e4 5237 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5238 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5239 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5240 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5241 /* Compute the divisor for the pp clock, simply match the Bspec
5242 * formula. */
b0a08bec
VK
5243 if (IS_BROXTON(dev)) {
5244 pp_div = I915_READ(pp_ctrl_reg);
5245 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5246 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5247 << BXT_POWER_CYCLE_DELAY_SHIFT);
5248 } else {
5249 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5250 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5251 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5252 }
67a54566
DV
5253
5254 /* Haswell doesn't have any port selection bits for the panel
5255 * power sequencer any more. */
bc7d38a4 5256 if (IS_VALLEYVIEW(dev)) {
ad933b56 5257 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5258 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5259 if (port == PORT_A)
a24c144c 5260 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5261 else
a24c144c 5262 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5263 }
5264
453c5420
JB
5265 pp_on |= port_sel;
5266
5267 I915_WRITE(pp_on_reg, pp_on);
5268 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5269 if (IS_BROXTON(dev))
5270 I915_WRITE(pp_ctrl_reg, pp_div);
5271 else
5272 I915_WRITE(pp_div_reg, pp_div);
67a54566 5273
67a54566 5274 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5275 I915_READ(pp_on_reg),
5276 I915_READ(pp_off_reg),
b0a08bec
VK
5277 IS_BROXTON(dev) ?
5278 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5279 I915_READ(pp_div_reg));
f684960e
CW
5280}
5281
b33a2815
VK
5282/**
5283 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5284 * @dev: DRM device
5285 * @refresh_rate: RR to be programmed
5286 *
5287 * This function gets called when refresh rate (RR) has to be changed from
5288 * one frequency to another. Switches can be between high and low RR
5289 * supported by the panel or to any other RR based on media playback (in
5290 * this case, RR value needs to be passed from user space).
5291 *
5292 * The caller of this function needs to take a lock on dev_priv->drrs.
5293 */
96178eeb 5294static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5295{
5296 struct drm_i915_private *dev_priv = dev->dev_private;
5297 struct intel_encoder *encoder;
96178eeb
VK
5298 struct intel_digital_port *dig_port = NULL;
5299 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5300 struct intel_crtc_state *config = NULL;
439d7ac0 5301 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5302 u32 reg, val;
96178eeb 5303 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5304
5305 if (refresh_rate <= 0) {
5306 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5307 return;
5308 }
5309
96178eeb
VK
5310 if (intel_dp == NULL) {
5311 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5312 return;
5313 }
5314
1fcc9d1c 5315 /*
e4d59f6b
RV
5316 * FIXME: This needs proper synchronization with psr state for some
5317 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5318 */
439d7ac0 5319
96178eeb
VK
5320 dig_port = dp_to_dig_port(intel_dp);
5321 encoder = &dig_port->base;
723f9aab 5322 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5323
5324 if (!intel_crtc) {
5325 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5326 return;
5327 }
5328
6e3c9717 5329 config = intel_crtc->config;
439d7ac0 5330
96178eeb 5331 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5332 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5333 return;
5334 }
5335
96178eeb
VK
5336 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5337 refresh_rate)
439d7ac0
PB
5338 index = DRRS_LOW_RR;
5339
96178eeb 5340 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5341 DRM_DEBUG_KMS(
5342 "DRRS requested for previously set RR...ignoring\n");
5343 return;
5344 }
5345
5346 if (!intel_crtc->active) {
5347 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5348 return;
5349 }
5350
44395bfe 5351 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5352 switch (index) {
5353 case DRRS_HIGH_RR:
5354 intel_dp_set_m_n(intel_crtc, M1_N1);
5355 break;
5356 case DRRS_LOW_RR:
5357 intel_dp_set_m_n(intel_crtc, M2_N2);
5358 break;
5359 case DRRS_MAX_RR:
5360 default:
5361 DRM_ERROR("Unsupported refreshrate type\n");
5362 }
5363 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5364 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5365 val = I915_READ(reg);
a4c30b1d 5366
439d7ac0 5367 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5368 if (IS_VALLEYVIEW(dev))
5369 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5370 else
5371 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5372 } else {
6fa7aec1
VK
5373 if (IS_VALLEYVIEW(dev))
5374 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5375 else
5376 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5377 }
5378 I915_WRITE(reg, val);
5379 }
5380
4e9ac947
VK
5381 dev_priv->drrs.refresh_rate_type = index;
5382
5383 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5384}
5385
b33a2815
VK
5386/**
5387 * intel_edp_drrs_enable - init drrs struct if supported
5388 * @intel_dp: DP struct
5389 *
5390 * Initializes frontbuffer_bits and drrs.dp
5391 */
c395578e
VK
5392void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5393{
5394 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5395 struct drm_i915_private *dev_priv = dev->dev_private;
5396 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5397 struct drm_crtc *crtc = dig_port->base.base.crtc;
5398 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5399
5400 if (!intel_crtc->config->has_drrs) {
5401 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5402 return;
5403 }
5404
5405 mutex_lock(&dev_priv->drrs.mutex);
5406 if (WARN_ON(dev_priv->drrs.dp)) {
5407 DRM_ERROR("DRRS already enabled\n");
5408 goto unlock;
5409 }
5410
5411 dev_priv->drrs.busy_frontbuffer_bits = 0;
5412
5413 dev_priv->drrs.dp = intel_dp;
5414
5415unlock:
5416 mutex_unlock(&dev_priv->drrs.mutex);
5417}
5418
b33a2815
VK
5419/**
5420 * intel_edp_drrs_disable - Disable DRRS
5421 * @intel_dp: DP struct
5422 *
5423 */
c395578e
VK
5424void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5425{
5426 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5427 struct drm_i915_private *dev_priv = dev->dev_private;
5428 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5429 struct drm_crtc *crtc = dig_port->base.base.crtc;
5430 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5431
5432 if (!intel_crtc->config->has_drrs)
5433 return;
5434
5435 mutex_lock(&dev_priv->drrs.mutex);
5436 if (!dev_priv->drrs.dp) {
5437 mutex_unlock(&dev_priv->drrs.mutex);
5438 return;
5439 }
5440
5441 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5442 intel_dp_set_drrs_state(dev_priv->dev,
5443 intel_dp->attached_connector->panel.
5444 fixed_mode->vrefresh);
5445
5446 dev_priv->drrs.dp = NULL;
5447 mutex_unlock(&dev_priv->drrs.mutex);
5448
5449 cancel_delayed_work_sync(&dev_priv->drrs.work);
5450}
5451
4e9ac947
VK
5452static void intel_edp_drrs_downclock_work(struct work_struct *work)
5453{
5454 struct drm_i915_private *dev_priv =
5455 container_of(work, typeof(*dev_priv), drrs.work.work);
5456 struct intel_dp *intel_dp;
5457
5458 mutex_lock(&dev_priv->drrs.mutex);
5459
5460 intel_dp = dev_priv->drrs.dp;
5461
5462 if (!intel_dp)
5463 goto unlock;
5464
439d7ac0 5465 /*
4e9ac947
VK
5466 * The delayed work can race with an invalidate hence we need to
5467 * recheck.
439d7ac0
PB
5468 */
5469
4e9ac947
VK
5470 if (dev_priv->drrs.busy_frontbuffer_bits)
5471 goto unlock;
439d7ac0 5472
4e9ac947
VK
5473 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5474 intel_dp_set_drrs_state(dev_priv->dev,
5475 intel_dp->attached_connector->panel.
5476 downclock_mode->vrefresh);
439d7ac0 5477
4e9ac947 5478unlock:
4e9ac947 5479 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5480}
5481
b33a2815 5482/**
0ddfd203 5483 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5484 * @dev: DRM device
5485 * @frontbuffer_bits: frontbuffer plane tracking bits
5486 *
0ddfd203
R
5487 * This function gets called everytime rendering on the given planes start.
5488 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5489 *
5490 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5491 */
a93fad0f
VK
5492void intel_edp_drrs_invalidate(struct drm_device *dev,
5493 unsigned frontbuffer_bits)
5494{
5495 struct drm_i915_private *dev_priv = dev->dev_private;
5496 struct drm_crtc *crtc;
5497 enum pipe pipe;
5498
9da7d693 5499 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5500 return;
5501
88f933a8 5502 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5503
a93fad0f 5504 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5505 if (!dev_priv->drrs.dp) {
5506 mutex_unlock(&dev_priv->drrs.mutex);
5507 return;
5508 }
5509
a93fad0f
VK
5510 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5511 pipe = to_intel_crtc(crtc)->pipe;
5512
c1d038c6
DV
5513 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5514 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5515
0ddfd203 5516 /* invalidate means busy screen hence upclock */
c1d038c6 5517 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5518 intel_dp_set_drrs_state(dev_priv->dev,
5519 dev_priv->drrs.dp->attached_connector->panel.
5520 fixed_mode->vrefresh);
a93fad0f 5521
a93fad0f
VK
5522 mutex_unlock(&dev_priv->drrs.mutex);
5523}
5524
b33a2815 5525/**
0ddfd203 5526 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5527 * @dev: DRM device
5528 * @frontbuffer_bits: frontbuffer plane tracking bits
5529 *
0ddfd203
R
5530 * This function gets called every time rendering on the given planes has
5531 * completed or flip on a crtc is completed. So DRRS should be upclocked
5532 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5533 * if no other planes are dirty.
b33a2815
VK
5534 *
5535 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5536 */
a93fad0f
VK
5537void intel_edp_drrs_flush(struct drm_device *dev,
5538 unsigned frontbuffer_bits)
5539{
5540 struct drm_i915_private *dev_priv = dev->dev_private;
5541 struct drm_crtc *crtc;
5542 enum pipe pipe;
5543
9da7d693 5544 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5545 return;
5546
88f933a8 5547 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5548
a93fad0f 5549 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5550 if (!dev_priv->drrs.dp) {
5551 mutex_unlock(&dev_priv->drrs.mutex);
5552 return;
5553 }
5554
a93fad0f
VK
5555 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5556 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5557
5558 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5559 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5560
0ddfd203 5561 /* flush means busy screen hence upclock */
c1d038c6 5562 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5563 intel_dp_set_drrs_state(dev_priv->dev,
5564 dev_priv->drrs.dp->attached_connector->panel.
5565 fixed_mode->vrefresh);
5566
5567 /*
5568 * flush also means no more activity hence schedule downclock, if all
5569 * other fbs are quiescent too
5570 */
5571 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5572 schedule_delayed_work(&dev_priv->drrs.work,
5573 msecs_to_jiffies(1000));
5574 mutex_unlock(&dev_priv->drrs.mutex);
5575}
5576
b33a2815
VK
5577/**
5578 * DOC: Display Refresh Rate Switching (DRRS)
5579 *
5580 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5581 * which enables swtching between low and high refresh rates,
5582 * dynamically, based on the usage scenario. This feature is applicable
5583 * for internal panels.
5584 *
5585 * Indication that the panel supports DRRS is given by the panel EDID, which
5586 * would list multiple refresh rates for one resolution.
5587 *
5588 * DRRS is of 2 types - static and seamless.
5589 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5590 * (may appear as a blink on screen) and is used in dock-undock scenario.
5591 * Seamless DRRS involves changing RR without any visual effect to the user
5592 * and can be used during normal system usage. This is done by programming
5593 * certain registers.
5594 *
5595 * Support for static/seamless DRRS may be indicated in the VBT based on
5596 * inputs from the panel spec.
5597 *
5598 * DRRS saves power by switching to low RR based on usage scenarios.
5599 *
5600 * eDP DRRS:-
5601 * The implementation is based on frontbuffer tracking implementation.
5602 * When there is a disturbance on the screen triggered by user activity or a
5603 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5604 * When there is no movement on screen, after a timeout of 1 second, a switch
5605 * to low RR is made.
5606 * For integration with frontbuffer tracking code,
5607 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5608 *
5609 * DRRS can be further extended to support other internal panels and also
5610 * the scenario of video playback wherein RR is set based on the rate
5611 * requested by userspace.
5612 */
5613
5614/**
5615 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5616 * @intel_connector: eDP connector
5617 * @fixed_mode: preferred mode of panel
5618 *
5619 * This function is called only once at driver load to initialize basic
5620 * DRRS stuff.
5621 *
5622 * Returns:
5623 * Downclock mode if panel supports it, else return NULL.
5624 * DRRS support is determined by the presence of downclock mode (apart
5625 * from VBT setting).
5626 */
4f9db5b5 5627static struct drm_display_mode *
96178eeb
VK
5628intel_dp_drrs_init(struct intel_connector *intel_connector,
5629 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5630{
5631 struct drm_connector *connector = &intel_connector->base;
96178eeb 5632 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5633 struct drm_i915_private *dev_priv = dev->dev_private;
5634 struct drm_display_mode *downclock_mode = NULL;
5635
9da7d693
DV
5636 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5637 mutex_init(&dev_priv->drrs.mutex);
5638
4f9db5b5
PB
5639 if (INTEL_INFO(dev)->gen <= 6) {
5640 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5641 return NULL;
5642 }
5643
5644 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5645 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5646 return NULL;
5647 }
5648
5649 downclock_mode = intel_find_panel_downclock
5650 (dev, fixed_mode, connector);
5651
5652 if (!downclock_mode) {
a1d26342 5653 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5654 return NULL;
5655 }
5656
96178eeb 5657 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5658
96178eeb 5659 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5660 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5661 return downclock_mode;
5662}
5663
ed92f0b2 5664static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5665 struct intel_connector *intel_connector)
ed92f0b2
PZ
5666{
5667 struct drm_connector *connector = &intel_connector->base;
5668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5669 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5670 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5671 struct drm_i915_private *dev_priv = dev->dev_private;
5672 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5673 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5674 bool has_dpcd;
5675 struct drm_display_mode *scan;
5676 struct edid *edid;
6517d273 5677 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5678
5679 if (!is_edp(intel_dp))
5680 return true;
5681
49e6bc51
VS
5682 pps_lock(intel_dp);
5683 intel_edp_panel_vdd_sanitize(intel_dp);
5684 pps_unlock(intel_dp);
63635217 5685
ed92f0b2 5686 /* Cache DPCD and EDID for edp. */
ed92f0b2 5687 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5688
5689 if (has_dpcd) {
5690 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5691 dev_priv->no_aux_handshake =
5692 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5693 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5694 } else {
5695 /* if this fails, presume the device is a ghost */
5696 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5697 return false;
5698 }
5699
5700 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5701 pps_lock(intel_dp);
36b5f425 5702 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5703 pps_unlock(intel_dp);
ed92f0b2 5704
060c8778 5705 mutex_lock(&dev->mode_config.mutex);
0b99836f 5706 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5707 if (edid) {
5708 if (drm_add_edid_modes(connector, edid)) {
5709 drm_mode_connector_update_edid_property(connector,
5710 edid);
5711 drm_edid_to_eld(connector, edid);
5712 } else {
5713 kfree(edid);
5714 edid = ERR_PTR(-EINVAL);
5715 }
5716 } else {
5717 edid = ERR_PTR(-ENOENT);
5718 }
5719 intel_connector->edid = edid;
5720
5721 /* prefer fixed mode from EDID if available */
5722 list_for_each_entry(scan, &connector->probed_modes, head) {
5723 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5724 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5725 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5726 intel_connector, fixed_mode);
ed92f0b2
PZ
5727 break;
5728 }
5729 }
5730
5731 /* fallback to VBT if available for eDP */
5732 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5733 fixed_mode = drm_mode_duplicate(dev,
5734 dev_priv->vbt.lfp_lvds_vbt_mode);
5735 if (fixed_mode)
5736 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5737 }
060c8778 5738 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5739
01527b31
CT
5740 if (IS_VALLEYVIEW(dev)) {
5741 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5742 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5743
5744 /*
5745 * Figure out the current pipe for the initial backlight setup.
5746 * If the current pipe isn't valid, try the PPS pipe, and if that
5747 * fails just assume pipe A.
5748 */
5749 if (IS_CHERRYVIEW(dev))
5750 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5751 else
5752 pipe = PORT_TO_PIPE(intel_dp->DP);
5753
5754 if (pipe != PIPE_A && pipe != PIPE_B)
5755 pipe = intel_dp->pps_pipe;
5756
5757 if (pipe != PIPE_A && pipe != PIPE_B)
5758 pipe = PIPE_A;
5759
5760 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5761 pipe_name(pipe));
01527b31
CT
5762 }
5763
4f9db5b5 5764 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5765 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5766 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5767
5768 return true;
5769}
5770
16c25533 5771bool
f0fec3f2
PZ
5772intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5773 struct intel_connector *intel_connector)
a4fc5ed6 5774{
f0fec3f2
PZ
5775 struct drm_connector *connector = &intel_connector->base;
5776 struct intel_dp *intel_dp = &intel_dig_port->dp;
5777 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5778 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5779 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5780 enum port port = intel_dig_port->port;
0b99836f 5781 int type;
a4fc5ed6 5782
a4a5d2f8
VS
5783 intel_dp->pps_pipe = INVALID_PIPE;
5784
ec5b01dd 5785 /* intel_dp vfuncs */
b6b5e383
DL
5786 if (INTEL_INFO(dev)->gen >= 9)
5787 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5788 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5789 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5790 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5791 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5792 else if (HAS_PCH_SPLIT(dev))
5793 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5794 else
5795 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5796
b9ca5fad
DL
5797 if (INTEL_INFO(dev)->gen >= 9)
5798 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5799 else
5800 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5801
0767935e
DV
5802 /* Preserve the current hw state. */
5803 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5804 intel_dp->attached_connector = intel_connector;
3d3dc149 5805
3b32a35b 5806 if (intel_dp_is_edp(dev, port))
b329530c 5807 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5808 else
5809 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5810
f7d24902
ID
5811 /*
5812 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5813 * for DP the encoder type can be set by the caller to
5814 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5815 */
5816 if (type == DRM_MODE_CONNECTOR_eDP)
5817 intel_encoder->type = INTEL_OUTPUT_EDP;
5818
c17ed5b5
VS
5819 /* eDP only on port B and/or C on vlv/chv */
5820 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5821 port != PORT_B && port != PORT_C))
5822 return false;
5823
e7281eab
ID
5824 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5825 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5826 port_name(port));
5827
b329530c 5828 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5829 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5830
a4fc5ed6
KP
5831 connector->interlace_allowed = true;
5832 connector->doublescan_allowed = 0;
5833
f0fec3f2 5834 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5835 edp_panel_vdd_work);
a4fc5ed6 5836
df0e9248 5837 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5838 drm_connector_register(connector);
a4fc5ed6 5839
affa9354 5840 if (HAS_DDI(dev))
bcbc889b
PZ
5841 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5842 else
5843 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5844 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5845
0b99836f 5846 /* Set up the hotplug pin. */
ab9d7c30
PZ
5847 switch (port) {
5848 case PORT_A:
1d843f9d 5849 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5850 break;
5851 case PORT_B:
1d843f9d 5852 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5853 break;
5854 case PORT_C:
1d843f9d 5855 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5856 break;
5857 case PORT_D:
1d843f9d 5858 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5859 break;
5860 default:
ad1c0b19 5861 BUG();
5eb08b69
ZW
5862 }
5863
dada1a9f 5864 if (is_edp(intel_dp)) {
773538e8 5865 pps_lock(intel_dp);
1e74a324
VS
5866 intel_dp_init_panel_power_timestamps(intel_dp);
5867 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5868 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5869 else
36b5f425 5870 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5871 pps_unlock(intel_dp);
dada1a9f 5872 }
0095e6dc 5873
9d1a1031 5874 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5875
0e32b39c 5876 /* init MST on ports that can support it */
0c9b3715
JN
5877 if (HAS_DP_MST(dev) &&
5878 (port == PORT_B || port == PORT_C || port == PORT_D))
5879 intel_dp_mst_encoder_init(intel_dig_port,
5880 intel_connector->base.base.id);
0e32b39c 5881
36b5f425 5882 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5883 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5884 if (is_edp(intel_dp)) {
5885 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5886 /*
5887 * vdd might still be enabled do to the delayed vdd off.
5888 * Make sure vdd is actually turned off here.
5889 */
773538e8 5890 pps_lock(intel_dp);
4be73780 5891 edp_panel_vdd_off_sync(intel_dp);
773538e8 5892 pps_unlock(intel_dp);
15b1d171 5893 }
34ea3d38 5894 drm_connector_unregister(connector);
b2f246a8 5895 drm_connector_cleanup(connector);
16c25533 5896 return false;
b2f246a8 5897 }
32f9d658 5898
f684960e
CW
5899 intel_dp_add_properties(intel_dp, connector);
5900
a4fc5ed6
KP
5901 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5902 * 0xd. Failure to do so will result in spurious interrupts being
5903 * generated on the port when a cable is not attached.
5904 */
5905 if (IS_G4X(dev) && !IS_GM45(dev)) {
5906 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5907 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5908 }
16c25533 5909
aa7471d2
JN
5910 i915_debugfs_connector_add(connector);
5911
16c25533 5912 return true;
a4fc5ed6 5913}
f0fec3f2
PZ
5914
5915void
5916intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5917{
13cf5504 5918 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5919 struct intel_digital_port *intel_dig_port;
5920 struct intel_encoder *intel_encoder;
5921 struct drm_encoder *encoder;
5922 struct intel_connector *intel_connector;
5923
b14c5679 5924 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5925 if (!intel_dig_port)
5926 return;
5927
08d9bc92 5928 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5929 if (!intel_connector) {
5930 kfree(intel_dig_port);
5931 return;
5932 }
5933
5934 intel_encoder = &intel_dig_port->base;
5935 encoder = &intel_encoder->base;
5936
5937 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5938 DRM_MODE_ENCODER_TMDS);
5939
5bfe2ac0 5940 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5941 intel_encoder->disable = intel_disable_dp;
00c09d70 5942 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5943 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5944 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5945 if (IS_CHERRYVIEW(dev)) {
9197c88b 5946 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5947 intel_encoder->pre_enable = chv_pre_enable_dp;
5948 intel_encoder->enable = vlv_enable_dp;
580d3811 5949 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5950 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5951 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5952 intel_encoder->pre_enable = vlv_pre_enable_dp;
5953 intel_encoder->enable = vlv_enable_dp;
49277c31 5954 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5955 } else {
ecff4f3b
JN
5956 intel_encoder->pre_enable = g4x_pre_enable_dp;
5957 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5958 if (INTEL_INFO(dev)->gen >= 5)
5959 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5960 }
f0fec3f2 5961
174edf1f 5962 intel_dig_port->port = port;
f0fec3f2
PZ
5963 intel_dig_port->dp.output_reg = output_reg;
5964
00c09d70 5965 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5966 if (IS_CHERRYVIEW(dev)) {
5967 if (port == PORT_D)
5968 intel_encoder->crtc_mask = 1 << 2;
5969 else
5970 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5971 } else {
5972 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5973 }
bc079e8b 5974 intel_encoder->cloneable = 0;
f0fec3f2 5975
13cf5504 5976 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 5977 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 5978
15b1d171
PZ
5979 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5980 drm_encoder_cleanup(encoder);
5981 kfree(intel_dig_port);
b2f246a8 5982 kfree(intel_connector);
15b1d171 5983 }
f0fec3f2 5984}
0e32b39c
DA
5985
5986void intel_dp_mst_suspend(struct drm_device *dev)
5987{
5988 struct drm_i915_private *dev_priv = dev->dev_private;
5989 int i;
5990
5991 /* disable MST */
5992 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 5993 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
5994 if (!intel_dig_port)
5995 continue;
5996
5997 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5998 if (!intel_dig_port->dp.can_mst)
5999 continue;
6000 if (intel_dig_port->dp.is_mst)
6001 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6002 }
6003 }
6004}
6005
6006void intel_dp_mst_resume(struct drm_device *dev)
6007{
6008 struct drm_i915_private *dev_priv = dev->dev_private;
6009 int i;
6010
6011 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6012 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6013 if (!intel_dig_port)
6014 continue;
6015 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6016 int ret;
6017
6018 if (!intel_dig_port->dp.can_mst)
6019 continue;
6020
6021 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6022 if (ret != 0) {
6023 intel_dp_check_mst_status(&intel_dig_port->dp);
6024 }
6025 }
6026 }
6027}