]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Trick CL2 into life on CHV when using pipe B with port B
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15 97 324000, 432000, 540000 };
fe51bfb9
VS
98static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
f4896f15 101static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 102
cfcb0fc9
JB
103/**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110static bool is_edp(struct intel_dp *intel_dp)
111{
da63a9f2
PZ
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
115}
116
68b4d824 117static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 118{
68b4d824
ID
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
122}
123
df0e9248
CW
124static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125{
fa90ecef 126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
127}
128
ea5b213a 129static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 130static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 131static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 132static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
133static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
a4fc5ed6 135
e0fce78f
VS
136static unsigned int intel_dp_unused_lane_mask(int lane_count)
137{
138 return ~((1 << lane_count) - 1) & 0xf;
139}
140
ed4e9c1d
VS
141static int
142intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 143{
7183dc29 144 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
145
146 switch (max_link_bw) {
147 case DP_LINK_BW_1_62:
148 case DP_LINK_BW_2_7:
1db10e28 149 case DP_LINK_BW_5_4:
d4eead50 150 break;
a4fc5ed6 151 default:
d4eead50
ID
152 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
153 max_link_bw);
a4fc5ed6
KP
154 max_link_bw = DP_LINK_BW_1_62;
155 break;
156 }
157 return max_link_bw;
158}
159
eeb6324d
PZ
160static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
161{
162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
163 struct drm_device *dev = intel_dig_port->base.base.dev;
164 u8 source_max, sink_max;
165
166 source_max = 4;
167 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
168 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
169 source_max = 2;
170
171 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
172
173 return min(source_max, sink_max);
174}
175
cd9dde44
AJ
176/*
177 * The units on the numbers in the next two are... bizarre. Examples will
178 * make it clearer; this one parallels an example in the eDP spec.
179 *
180 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
181 *
182 * 270000 * 1 * 8 / 10 == 216000
183 *
184 * The actual data capacity of that configuration is 2.16Gbit/s, so the
185 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
186 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
187 * 119000. At 18bpp that's 2142000 kilobits per second.
188 *
189 * Thus the strange-looking division by 10 in intel_dp_link_required, to
190 * get the result in decakilobits instead of kilobits.
191 */
192
a4fc5ed6 193static int
c898261c 194intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 195{
cd9dde44 196 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
197}
198
fe27d53e
DA
199static int
200intel_dp_max_data_rate(int max_link_clock, int max_lanes)
201{
202 return (max_link_clock * max_lanes * 8) / 10;
203}
204
c19de8eb 205static enum drm_mode_status
a4fc5ed6
KP
206intel_dp_mode_valid(struct drm_connector *connector,
207 struct drm_display_mode *mode)
208{
df0e9248 209 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
210 struct intel_connector *intel_connector = to_intel_connector(connector);
211 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
212 int target_clock = mode->clock;
213 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 214
dd06f90e
JN
215 if (is_edp(intel_dp) && fixed_mode) {
216 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
217 return MODE_PANEL;
218
dd06f90e 219 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 220 return MODE_PANEL;
03afc4a2
DV
221
222 target_clock = fixed_mode->clock;
7de56f43
ZY
223 }
224
50fec21a 225 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 226 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
227
228 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
229 mode_rate = intel_dp_link_required(target_clock, 18);
230
231 if (mode_rate > max_rate)
c4867936 232 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
233
234 if (mode->clock < 10000)
235 return MODE_CLOCK_LOW;
236
0af78a2b
DV
237 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
238 return MODE_H_ILLEGAL;
239
a4fc5ed6
KP
240 return MODE_OK;
241}
242
a4f1289e 243uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
244{
245 int i;
246 uint32_t v = 0;
247
248 if (src_bytes > 4)
249 src_bytes = 4;
250 for (i = 0; i < src_bytes; i++)
251 v |= ((uint32_t) src[i]) << ((3-i) * 8);
252 return v;
253}
254
c2af70e2 255static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
256{
257 int i;
258 if (dst_bytes > 4)
259 dst_bytes = 4;
260 for (i = 0; i < dst_bytes; i++)
261 dst[i] = src >> ((3-i) * 8);
262}
263
fb0f8fbf
KP
264/* hrawclock is 1/4 the FSB frequency */
265static int
266intel_hrawclk(struct drm_device *dev)
267{
268 struct drm_i915_private *dev_priv = dev->dev_private;
269 uint32_t clkcfg;
270
9473c8f4
VP
271 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
272 if (IS_VALLEYVIEW(dev))
273 return 200;
274
fb0f8fbf
KP
275 clkcfg = I915_READ(CLKCFG);
276 switch (clkcfg & CLKCFG_FSB_MASK) {
277 case CLKCFG_FSB_400:
278 return 100;
279 case CLKCFG_FSB_533:
280 return 133;
281 case CLKCFG_FSB_667:
282 return 166;
283 case CLKCFG_FSB_800:
284 return 200;
285 case CLKCFG_FSB_1067:
286 return 266;
287 case CLKCFG_FSB_1333:
288 return 333;
289 /* these two are just a guess; one of them might be right */
290 case CLKCFG_FSB_1600:
291 case CLKCFG_FSB_1600_ALT:
292 return 400;
293 default:
294 return 133;
295 }
296}
297
bf13e81b
JN
298static void
299intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 300 struct intel_dp *intel_dp);
bf13e81b
JN
301static void
302intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 303 struct intel_dp *intel_dp);
bf13e81b 304
773538e8
VS
305static void pps_lock(struct intel_dp *intel_dp)
306{
307 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
308 struct intel_encoder *encoder = &intel_dig_port->base;
309 struct drm_device *dev = encoder->base.dev;
310 struct drm_i915_private *dev_priv = dev->dev_private;
311 enum intel_display_power_domain power_domain;
312
313 /*
314 * See vlv_power_sequencer_reset() why we need
315 * a power domain reference here.
316 */
317 power_domain = intel_display_port_power_domain(encoder);
318 intel_display_power_get(dev_priv, power_domain);
319
320 mutex_lock(&dev_priv->pps_mutex);
321}
322
323static void pps_unlock(struct intel_dp *intel_dp)
324{
325 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
326 struct intel_encoder *encoder = &intel_dig_port->base;
327 struct drm_device *dev = encoder->base.dev;
328 struct drm_i915_private *dev_priv = dev->dev_private;
329 enum intel_display_power_domain power_domain;
330
331 mutex_unlock(&dev_priv->pps_mutex);
332
333 power_domain = intel_display_port_power_domain(encoder);
334 intel_display_power_put(dev_priv, power_domain);
335}
336
961a0db0
VS
337static void
338vlv_power_sequencer_kick(struct intel_dp *intel_dp)
339{
340 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
341 struct drm_device *dev = intel_dig_port->base.base.dev;
342 struct drm_i915_private *dev_priv = dev->dev_private;
343 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 344 bool pll_enabled;
961a0db0
VS
345 uint32_t DP;
346
347 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
348 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
349 pipe_name(pipe), port_name(intel_dig_port->port)))
350 return;
351
352 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
353 pipe_name(pipe), port_name(intel_dig_port->port));
354
355 /* Preserve the BIOS-computed detected bit. This is
356 * supposed to be read-only.
357 */
358 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
359 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
360 DP |= DP_PORT_WIDTH(1);
361 DP |= DP_LINK_TRAIN_PAT_1;
362
363 if (IS_CHERRYVIEW(dev))
364 DP |= DP_PIPE_SELECT_CHV(pipe);
365 else if (pipe == PIPE_B)
366 DP |= DP_PIPEB_SELECT;
367
d288f65f
VS
368 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
369
370 /*
371 * The DPLL for the pipe must be enabled for this to work.
372 * So enable temporarily it if it's not already enabled.
373 */
374 if (!pll_enabled)
375 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
376 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
377
961a0db0
VS
378 /*
379 * Similar magic as in intel_dp_enable_port().
380 * We _must_ do this port enable + disable trick
381 * to make this power seqeuencer lock onto the port.
382 * Otherwise even VDD force bit won't work.
383 */
384 I915_WRITE(intel_dp->output_reg, DP);
385 POSTING_READ(intel_dp->output_reg);
386
387 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
388 POSTING_READ(intel_dp->output_reg);
389
390 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
391 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
392
393 if (!pll_enabled)
394 vlv_force_pll_off(dev, pipe);
961a0db0
VS
395}
396
bf13e81b
JN
397static enum pipe
398vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
399{
400 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
401 struct drm_device *dev = intel_dig_port->base.base.dev;
402 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
403 struct intel_encoder *encoder;
404 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 405 enum pipe pipe;
bf13e81b 406
e39b999a 407 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 408
a8c3344e
VS
409 /* We should never land here with regular DP ports */
410 WARN_ON(!is_edp(intel_dp));
411
a4a5d2f8
VS
412 if (intel_dp->pps_pipe != INVALID_PIPE)
413 return intel_dp->pps_pipe;
414
415 /*
416 * We don't have power sequencer currently.
417 * Pick one that's not used by other ports.
418 */
419 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
420 base.head) {
421 struct intel_dp *tmp;
422
423 if (encoder->type != INTEL_OUTPUT_EDP)
424 continue;
425
426 tmp = enc_to_intel_dp(&encoder->base);
427
428 if (tmp->pps_pipe != INVALID_PIPE)
429 pipes &= ~(1 << tmp->pps_pipe);
430 }
431
432 /*
433 * Didn't find one. This should not happen since there
434 * are two power sequencers and up to two eDP ports.
435 */
436 if (WARN_ON(pipes == 0))
a8c3344e
VS
437 pipe = PIPE_A;
438 else
439 pipe = ffs(pipes) - 1;
a4a5d2f8 440
a8c3344e
VS
441 vlv_steal_power_sequencer(dev, pipe);
442 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
443
444 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
445 pipe_name(intel_dp->pps_pipe),
446 port_name(intel_dig_port->port));
447
448 /* init power sequencer on this pipe and port */
36b5f425
VS
449 intel_dp_init_panel_power_sequencer(dev, intel_dp);
450 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 451
961a0db0
VS
452 /*
453 * Even vdd force doesn't work until we've made
454 * the power sequencer lock in on the port.
455 */
456 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
457
458 return intel_dp->pps_pipe;
459}
460
6491ab27
VS
461typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
462 enum pipe pipe);
463
464static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
465 enum pipe pipe)
466{
467 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
468}
469
470static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
471 enum pipe pipe)
472{
473 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
474}
475
476static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
477 enum pipe pipe)
478{
479 return true;
480}
bf13e81b 481
a4a5d2f8 482static enum pipe
6491ab27
VS
483vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
484 enum port port,
485 vlv_pipe_check pipe_check)
a4a5d2f8
VS
486{
487 enum pipe pipe;
bf13e81b 488
bf13e81b
JN
489 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
490 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
491 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
492
493 if (port_sel != PANEL_PORT_SELECT_VLV(port))
494 continue;
495
6491ab27
VS
496 if (!pipe_check(dev_priv, pipe))
497 continue;
498
a4a5d2f8 499 return pipe;
bf13e81b
JN
500 }
501
a4a5d2f8
VS
502 return INVALID_PIPE;
503}
504
505static void
506vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
507{
508 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
509 struct drm_device *dev = intel_dig_port->base.base.dev;
510 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
511 enum port port = intel_dig_port->port;
512
513 lockdep_assert_held(&dev_priv->pps_mutex);
514
515 /* try to find a pipe with this port selected */
6491ab27
VS
516 /* first pick one where the panel is on */
517 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
518 vlv_pipe_has_pp_on);
519 /* didn't find one? pick one where vdd is on */
520 if (intel_dp->pps_pipe == INVALID_PIPE)
521 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
522 vlv_pipe_has_vdd_on);
523 /* didn't find one? pick one with just the correct port */
524 if (intel_dp->pps_pipe == INVALID_PIPE)
525 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
526 vlv_pipe_any);
a4a5d2f8
VS
527
528 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
529 if (intel_dp->pps_pipe == INVALID_PIPE) {
530 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
531 port_name(port));
532 return;
bf13e81b
JN
533 }
534
a4a5d2f8
VS
535 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
536 port_name(port), pipe_name(intel_dp->pps_pipe));
537
36b5f425
VS
538 intel_dp_init_panel_power_sequencer(dev, intel_dp);
539 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
540}
541
773538e8
VS
542void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
543{
544 struct drm_device *dev = dev_priv->dev;
545 struct intel_encoder *encoder;
546
547 if (WARN_ON(!IS_VALLEYVIEW(dev)))
548 return;
549
550 /*
551 * We can't grab pps_mutex here due to deadlock with power_domain
552 * mutex when power_domain functions are called while holding pps_mutex.
553 * That also means that in order to use pps_pipe the code needs to
554 * hold both a power domain reference and pps_mutex, and the power domain
555 * reference get/put must be done while _not_ holding pps_mutex.
556 * pps_{lock,unlock}() do these steps in the correct order, so one
557 * should use them always.
558 */
559
560 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
561 struct intel_dp *intel_dp;
562
563 if (encoder->type != INTEL_OUTPUT_EDP)
564 continue;
565
566 intel_dp = enc_to_intel_dp(&encoder->base);
567 intel_dp->pps_pipe = INVALID_PIPE;
568 }
bf13e81b
JN
569}
570
571static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
572{
573 struct drm_device *dev = intel_dp_to_dev(intel_dp);
574
b0a08bec
VK
575 if (IS_BROXTON(dev))
576 return BXT_PP_CONTROL(0);
577 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
578 return PCH_PP_CONTROL;
579 else
580 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
581}
582
583static u32 _pp_stat_reg(struct intel_dp *intel_dp)
584{
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586
b0a08bec
VK
587 if (IS_BROXTON(dev))
588 return BXT_PP_STATUS(0);
589 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
590 return PCH_PP_STATUS;
591 else
592 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
593}
594
01527b31
CT
595/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
596 This function only applicable when panel PM state is not to be tracked */
597static int edp_notify_handler(struct notifier_block *this, unsigned long code,
598 void *unused)
599{
600 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
601 edp_notifier);
602 struct drm_device *dev = intel_dp_to_dev(intel_dp);
603 struct drm_i915_private *dev_priv = dev->dev_private;
604 u32 pp_div;
605 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
606
607 if (!is_edp(intel_dp) || code != SYS_RESTART)
608 return 0;
609
773538e8 610 pps_lock(intel_dp);
e39b999a 611
01527b31 612 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
613 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
614
01527b31
CT
615 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
616 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
617 pp_div = I915_READ(pp_div_reg);
618 pp_div &= PP_REFERENCE_DIVIDER_MASK;
619
620 /* 0x1F write to PP_DIV_REG sets max cycle delay */
621 I915_WRITE(pp_div_reg, pp_div | 0x1F);
622 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
623 msleep(intel_dp->panel_power_cycle_delay);
624 }
625
773538e8 626 pps_unlock(intel_dp);
e39b999a 627
01527b31
CT
628 return 0;
629}
630
4be73780 631static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 632{
30add22d 633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
634 struct drm_i915_private *dev_priv = dev->dev_private;
635
e39b999a
VS
636 lockdep_assert_held(&dev_priv->pps_mutex);
637
9a42356b
VS
638 if (IS_VALLEYVIEW(dev) &&
639 intel_dp->pps_pipe == INVALID_PIPE)
640 return false;
641
bf13e81b 642 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
643}
644
4be73780 645static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 646{
30add22d 647 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
648 struct drm_i915_private *dev_priv = dev->dev_private;
649
e39b999a
VS
650 lockdep_assert_held(&dev_priv->pps_mutex);
651
9a42356b
VS
652 if (IS_VALLEYVIEW(dev) &&
653 intel_dp->pps_pipe == INVALID_PIPE)
654 return false;
655
773538e8 656 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
657}
658
9b984dae
KP
659static void
660intel_dp_check_edp(struct intel_dp *intel_dp)
661{
30add22d 662 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 663 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 664
9b984dae
KP
665 if (!is_edp(intel_dp))
666 return;
453c5420 667
4be73780 668 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
669 WARN(1, "eDP powered off while attempting aux channel communication.\n");
670 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
671 I915_READ(_pp_stat_reg(intel_dp)),
672 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
673 }
674}
675
9ee32fea
DV
676static uint32_t
677intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
678{
679 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
680 struct drm_device *dev = intel_dig_port->base.base.dev;
681 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 682 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
683 uint32_t status;
684 bool done;
685
ef04f00d 686#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 687 if (has_aux_irq)
b18ac466 688 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 689 msecs_to_jiffies_timeout(10));
9ee32fea
DV
690 else
691 done = wait_for_atomic(C, 10) == 0;
692 if (!done)
693 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
694 has_aux_irq);
695#undef C
696
697 return status;
698}
699
ec5b01dd 700static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 701{
174edf1f
PZ
702 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
703 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 704
ec5b01dd
DL
705 /*
706 * The clock divider is based off the hrawclk, and would like to run at
707 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 708 */
ec5b01dd
DL
709 return index ? 0 : intel_hrawclk(dev) / 2;
710}
711
712static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
713{
714 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
715 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 716 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
717
718 if (index)
719 return 0;
720
721 if (intel_dig_port->port == PORT_A) {
05024da3
VS
722 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
723
ec5b01dd
DL
724 } else {
725 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
726 }
727}
728
729static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730{
731 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
732 struct drm_device *dev = intel_dig_port->base.base.dev;
733 struct drm_i915_private *dev_priv = dev->dev_private;
734
735 if (intel_dig_port->port == PORT_A) {
736 if (index)
737 return 0;
05024da3 738 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
739 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
740 /* Workaround for non-ULT HSW */
bc86625a
CW
741 switch (index) {
742 case 0: return 63;
743 case 1: return 72;
744 default: return 0;
745 }
ec5b01dd 746 } else {
bc86625a 747 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 748 }
b84a1cf8
RV
749}
750
ec5b01dd
DL
751static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
752{
753 return index ? 0 : 100;
754}
755
b6b5e383
DL
756static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
757{
758 /*
759 * SKL doesn't need us to program the AUX clock divider (Hardware will
760 * derive the clock from CDCLK automatically). We still implement the
761 * get_aux_clock_divider vfunc to plug-in into the existing code.
762 */
763 return index ? 0 : 1;
764}
765
5ed12a19
DL
766static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
767 bool has_aux_irq,
768 int send_bytes,
769 uint32_t aux_clock_divider)
770{
771 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
772 struct drm_device *dev = intel_dig_port->base.base.dev;
773 uint32_t precharge, timeout;
774
775 if (IS_GEN6(dev))
776 precharge = 3;
777 else
778 precharge = 5;
779
780 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
781 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
782 else
783 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
784
785 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 786 DP_AUX_CH_CTL_DONE |
5ed12a19 787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 789 timeout |
788d4433 790 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 793 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
794}
795
b9ca5fad
DL
796static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
797 bool has_aux_irq,
798 int send_bytes,
799 uint32_t unused)
800{
801 return DP_AUX_CH_CTL_SEND_BUSY |
802 DP_AUX_CH_CTL_DONE |
803 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
804 DP_AUX_CH_CTL_TIME_OUT_ERROR |
805 DP_AUX_CH_CTL_TIME_OUT_1600us |
806 DP_AUX_CH_CTL_RECEIVE_ERROR |
807 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
808 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
809}
810
b84a1cf8
RV
811static int
812intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 813 const uint8_t *send, int send_bytes,
b84a1cf8
RV
814 uint8_t *recv, int recv_size)
815{
816 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
817 struct drm_device *dev = intel_dig_port->base.base.dev;
818 struct drm_i915_private *dev_priv = dev->dev_private;
819 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
820 uint32_t ch_data = ch_ctl + 4;
bc86625a 821 uint32_t aux_clock_divider;
b84a1cf8
RV
822 int i, ret, recv_bytes;
823 uint32_t status;
5ed12a19 824 int try, clock = 0;
4e6b788c 825 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
826 bool vdd;
827
773538e8 828 pps_lock(intel_dp);
e39b999a 829
72c3500a
VS
830 /*
831 * We will be called with VDD already enabled for dpcd/edid/oui reads.
832 * In such cases we want to leave VDD enabled and it's up to upper layers
833 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
834 * ourselves.
835 */
1e0560e0 836 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
837
838 /* dp aux is extremely sensitive to irq latency, hence request the
839 * lowest possible wakeup latency and so prevent the cpu from going into
840 * deep sleep states.
841 */
842 pm_qos_update_request(&dev_priv->pm_qos, 0);
843
844 intel_dp_check_edp(intel_dp);
5eb08b69 845
c67a470b
PZ
846 intel_aux_display_runtime_get(dev_priv);
847
11bee43e
JB
848 /* Try to wait for any previous AUX channel activity */
849 for (try = 0; try < 3; try++) {
ef04f00d 850 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
851 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
852 break;
853 msleep(1);
854 }
855
856 if (try == 3) {
02196c77
MK
857 static u32 last_status = -1;
858 const u32 status = I915_READ(ch_ctl);
859
860 if (status != last_status) {
861 WARN(1, "dp_aux_ch not started status 0x%08x\n",
862 status);
863 last_status = status;
864 }
865
9ee32fea
DV
866 ret = -EBUSY;
867 goto out;
4f7f7b7e
CW
868 }
869
46a5ae9f
PZ
870 /* Only 5 data registers! */
871 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
872 ret = -E2BIG;
873 goto out;
874 }
875
ec5b01dd 876 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
877 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
878 has_aux_irq,
879 send_bytes,
880 aux_clock_divider);
5ed12a19 881
bc86625a
CW
882 /* Must try at least 3 times according to DP spec */
883 for (try = 0; try < 5; try++) {
884 /* Load the send data into the aux channel data registers */
885 for (i = 0; i < send_bytes; i += 4)
886 I915_WRITE(ch_data + i,
a4f1289e
RV
887 intel_dp_pack_aux(send + i,
888 send_bytes - i));
bc86625a
CW
889
890 /* Send the command and wait for it to complete */
5ed12a19 891 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
892
893 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
894
895 /* Clear done status and any errors */
896 I915_WRITE(ch_ctl,
897 status |
898 DP_AUX_CH_CTL_DONE |
899 DP_AUX_CH_CTL_TIME_OUT_ERROR |
900 DP_AUX_CH_CTL_RECEIVE_ERROR);
901
74ebf294 902 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 903 continue;
74ebf294
TP
904
905 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
906 * 400us delay required for errors and timeouts
907 * Timeout errors from the HW already meet this
908 * requirement so skip to next iteration
909 */
910 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
911 usleep_range(400, 500);
bc86625a 912 continue;
74ebf294 913 }
bc86625a 914 if (status & DP_AUX_CH_CTL_DONE)
e058c945 915 goto done;
bc86625a 916 }
a4fc5ed6
KP
917 }
918
a4fc5ed6 919 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 920 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
921 ret = -EBUSY;
922 goto out;
a4fc5ed6
KP
923 }
924
e058c945 925done:
a4fc5ed6
KP
926 /* Check for timeout or receive error.
927 * Timeouts occur when the sink is not connected
928 */
a5b3da54 929 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 930 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
931 ret = -EIO;
932 goto out;
a5b3da54 933 }
1ae8c0a5
KP
934
935 /* Timeouts occur when the device isn't connected, so they're
936 * "normal" -- don't fill the kernel log with these */
a5b3da54 937 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 938 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
939 ret = -ETIMEDOUT;
940 goto out;
a4fc5ed6
KP
941 }
942
943 /* Unload any bytes sent back from the other side */
944 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
945 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
946 if (recv_bytes > recv_size)
947 recv_bytes = recv_size;
0206e353 948
4f7f7b7e 949 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
950 intel_dp_unpack_aux(I915_READ(ch_data + i),
951 recv + i, recv_bytes - i);
a4fc5ed6 952
9ee32fea
DV
953 ret = recv_bytes;
954out:
955 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 956 intel_aux_display_runtime_put(dev_priv);
9ee32fea 957
884f19e9
JN
958 if (vdd)
959 edp_panel_vdd_off(intel_dp, false);
960
773538e8 961 pps_unlock(intel_dp);
e39b999a 962
9ee32fea 963 return ret;
a4fc5ed6
KP
964}
965
a6c8aff0
JN
966#define BARE_ADDRESS_SIZE 3
967#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
968static ssize_t
969intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 970{
9d1a1031
JN
971 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
972 uint8_t txbuf[20], rxbuf[20];
973 size_t txsize, rxsize;
a4fc5ed6 974 int ret;
a4fc5ed6 975
d2d9cbbd
VS
976 txbuf[0] = (msg->request << 4) |
977 ((msg->address >> 16) & 0xf);
978 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
979 txbuf[2] = msg->address & 0xff;
980 txbuf[3] = msg->size - 1;
46a5ae9f 981
9d1a1031
JN
982 switch (msg->request & ~DP_AUX_I2C_MOT) {
983 case DP_AUX_NATIVE_WRITE:
984 case DP_AUX_I2C_WRITE:
a6c8aff0 985 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 986 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 987
9d1a1031
JN
988 if (WARN_ON(txsize > 20))
989 return -E2BIG;
a4fc5ed6 990
9d1a1031 991 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 992
9d1a1031
JN
993 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
994 if (ret > 0) {
995 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 996
a1ddefd8
JN
997 if (ret > 1) {
998 /* Number of bytes written in a short write. */
999 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1000 } else {
1001 /* Return payload size. */
1002 ret = msg->size;
1003 }
9d1a1031
JN
1004 }
1005 break;
46a5ae9f 1006
9d1a1031
JN
1007 case DP_AUX_NATIVE_READ:
1008 case DP_AUX_I2C_READ:
a6c8aff0 1009 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1010 rxsize = msg->size + 1;
a4fc5ed6 1011
9d1a1031
JN
1012 if (WARN_ON(rxsize > 20))
1013 return -E2BIG;
a4fc5ed6 1014
9d1a1031
JN
1015 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1016 if (ret > 0) {
1017 msg->reply = rxbuf[0] >> 4;
1018 /*
1019 * Assume happy day, and copy the data. The caller is
1020 * expected to check msg->reply before touching it.
1021 *
1022 * Return payload size.
1023 */
1024 ret--;
1025 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1026 }
9d1a1031
JN
1027 break;
1028
1029 default:
1030 ret = -EINVAL;
1031 break;
a4fc5ed6 1032 }
f51a44b9 1033
9d1a1031 1034 return ret;
a4fc5ed6
KP
1035}
1036
9d1a1031
JN
1037static void
1038intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1039{
1040 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1041 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1042 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1043 enum port port = intel_dig_port->port;
500ea70d 1044 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1045 const char *name = NULL;
500ea70d 1046 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1047 int ret;
1048
500ea70d
RV
1049 /* On SKL we don't have Aux for port E so we rely on VBT to set
1050 * a proper alternate aux channel.
1051 */
1052 if (IS_SKYLAKE(dev) && port == PORT_E) {
1053 switch (info->alternate_aux_channel) {
1054 case DP_AUX_B:
1055 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1056 break;
1057 case DP_AUX_C:
1058 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1059 break;
1060 case DP_AUX_D:
1061 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1062 break;
1063 case DP_AUX_A:
1064 default:
1065 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1066 }
1067 }
1068
33ad6626
JN
1069 switch (port) {
1070 case PORT_A:
1071 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1072 name = "DPDDC-A";
ab2c0672 1073 break;
33ad6626
JN
1074 case PORT_B:
1075 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1076 name = "DPDDC-B";
ab2c0672 1077 break;
33ad6626
JN
1078 case PORT_C:
1079 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1080 name = "DPDDC-C";
ab2c0672 1081 break;
33ad6626
JN
1082 case PORT_D:
1083 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1084 name = "DPDDC-D";
33ad6626 1085 break;
500ea70d
RV
1086 case PORT_E:
1087 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1088 name = "DPDDC-E";
1089 break;
33ad6626
JN
1090 default:
1091 BUG();
ab2c0672
DA
1092 }
1093
1b1aad75
DL
1094 /*
1095 * The AUX_CTL register is usually DP_CTL + 0x10.
1096 *
1097 * On Haswell and Broadwell though:
1098 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1099 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1100 *
1101 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1102 */
500ea70d 1103 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1104 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1105
0b99836f 1106 intel_dp->aux.name = name;
9d1a1031
JN
1107 intel_dp->aux.dev = dev->dev;
1108 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1109
0b99836f
JN
1110 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1111 connector->base.kdev->kobj.name);
8316f337 1112
4f71d0cb 1113 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1114 if (ret < 0) {
4f71d0cb 1115 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1116 name, ret);
1117 return;
ab2c0672 1118 }
8a5e6aeb 1119
0b99836f
JN
1120 ret = sysfs_create_link(&connector->base.kdev->kobj,
1121 &intel_dp->aux.ddc.dev.kobj,
1122 intel_dp->aux.ddc.dev.kobj.name);
1123 if (ret < 0) {
1124 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1125 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1126 }
a4fc5ed6
KP
1127}
1128
80f65de3
ID
1129static void
1130intel_dp_connector_unregister(struct intel_connector *intel_connector)
1131{
1132 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1133
0e32b39c
DA
1134 if (!intel_connector->mst_port)
1135 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1136 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1137 intel_connector_unregister(intel_connector);
1138}
1139
5416d871 1140static void
840b32b7 1141skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1142{
1143 u32 ctrl1;
1144
dd3cd74a
ACO
1145 memset(&pipe_config->dpll_hw_state, 0,
1146 sizeof(pipe_config->dpll_hw_state));
1147
5416d871
DL
1148 pipe_config->ddi_pll_sel = SKL_DPLL0;
1149 pipe_config->dpll_hw_state.cfgcr1 = 0;
1150 pipe_config->dpll_hw_state.cfgcr2 = 0;
1151
1152 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1153 switch (pipe_config->port_clock / 2) {
c3346ef6 1154 case 81000:
71cd8423 1155 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1156 SKL_DPLL0);
1157 break;
c3346ef6 1158 case 135000:
71cd8423 1159 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1160 SKL_DPLL0);
1161 break;
c3346ef6 1162 case 270000:
71cd8423 1163 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1164 SKL_DPLL0);
1165 break;
c3346ef6 1166 case 162000:
71cd8423 1167 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1168 SKL_DPLL0);
1169 break;
1170 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1171 results in CDCLK change. Need to handle the change of CDCLK by
1172 disabling pipes and re-enabling them */
1173 case 108000:
71cd8423 1174 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1175 SKL_DPLL0);
1176 break;
1177 case 216000:
71cd8423 1178 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1179 SKL_DPLL0);
1180 break;
1181
5416d871
DL
1182 }
1183 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1184}
1185
0e50338c 1186static void
840b32b7 1187hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1188{
ee46f3c7
ACO
1189 memset(&pipe_config->dpll_hw_state, 0,
1190 sizeof(pipe_config->dpll_hw_state));
1191
840b32b7
VS
1192 switch (pipe_config->port_clock / 2) {
1193 case 81000:
0e50338c
DV
1194 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1195 break;
840b32b7 1196 case 135000:
0e50338c
DV
1197 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1198 break;
840b32b7 1199 case 270000:
0e50338c
DV
1200 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1201 break;
1202 }
1203}
1204
fc0f8e25 1205static int
12f6a2e2 1206intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1207{
94ca719e
VS
1208 if (intel_dp->num_sink_rates) {
1209 *sink_rates = intel_dp->sink_rates;
1210 return intel_dp->num_sink_rates;
fc0f8e25 1211 }
12f6a2e2
VS
1212
1213 *sink_rates = default_rates;
1214
1215 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1216}
1217
a8f3ef61 1218static int
1db10e28 1219intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1220{
64987fc5
SJ
1221 if (IS_BROXTON(dev)) {
1222 *source_rates = bxt_rates;
1223 return ARRAY_SIZE(bxt_rates);
1224 } else if (IS_SKYLAKE(dev)) {
637a9c63
SJ
1225 *source_rates = skl_rates;
1226 return ARRAY_SIZE(skl_rates);
fe51bfb9
VS
1227 } else if (IS_CHERRYVIEW(dev)) {
1228 *source_rates = chv_rates;
1229 return ARRAY_SIZE(chv_rates);
a8f3ef61 1230 }
636280ba
VS
1231
1232 *source_rates = default_rates;
1233
1db10e28
VS
1234 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1235 /* WaDisableHBR2:skl */
1236 return (DP_LINK_BW_2_7 >> 3) + 1;
1237 else if (INTEL_INFO(dev)->gen >= 8 ||
1238 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1239 return (DP_LINK_BW_5_4 >> 3) + 1;
1240 else
1241 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1242}
1243
c6bb3538
DV
1244static void
1245intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1246 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1247{
1248 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1249 const struct dp_link_dpll *divisor = NULL;
1250 int i, count = 0;
c6bb3538
DV
1251
1252 if (IS_G4X(dev)) {
9dd4ffdf
CML
1253 divisor = gen4_dpll;
1254 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1255 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1256 divisor = pch_dpll;
1257 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1258 } else if (IS_CHERRYVIEW(dev)) {
1259 divisor = chv_dpll;
1260 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1261 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1262 divisor = vlv_dpll;
1263 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1264 }
9dd4ffdf
CML
1265
1266 if (divisor && count) {
1267 for (i = 0; i < count; i++) {
840b32b7 1268 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1269 pipe_config->dpll = divisor[i].dpll;
1270 pipe_config->clock_set = true;
1271 break;
1272 }
1273 }
c6bb3538
DV
1274 }
1275}
1276
2ecae76a
VS
1277static int intersect_rates(const int *source_rates, int source_len,
1278 const int *sink_rates, int sink_len,
94ca719e 1279 int *common_rates)
a8f3ef61
SJ
1280{
1281 int i = 0, j = 0, k = 0;
1282
a8f3ef61
SJ
1283 while (i < source_len && j < sink_len) {
1284 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1285 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1286 return k;
94ca719e 1287 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1288 ++k;
1289 ++i;
1290 ++j;
1291 } else if (source_rates[i] < sink_rates[j]) {
1292 ++i;
1293 } else {
1294 ++j;
1295 }
1296 }
1297 return k;
1298}
1299
94ca719e
VS
1300static int intel_dp_common_rates(struct intel_dp *intel_dp,
1301 int *common_rates)
2ecae76a
VS
1302{
1303 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1304 const int *source_rates, *sink_rates;
1305 int source_len, sink_len;
1306
1307 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1308 source_len = intel_dp_source_rates(dev, &source_rates);
1309
1310 return intersect_rates(source_rates, source_len,
1311 sink_rates, sink_len,
94ca719e 1312 common_rates);
2ecae76a
VS
1313}
1314
0336400e
VS
1315static void snprintf_int_array(char *str, size_t len,
1316 const int *array, int nelem)
1317{
1318 int i;
1319
1320 str[0] = '\0';
1321
1322 for (i = 0; i < nelem; i++) {
b2f505be 1323 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1324 if (r >= len)
1325 return;
1326 str += r;
1327 len -= r;
1328 }
1329}
1330
1331static void intel_dp_print_rates(struct intel_dp *intel_dp)
1332{
1333 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1334 const int *source_rates, *sink_rates;
94ca719e
VS
1335 int source_len, sink_len, common_len;
1336 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1337 char str[128]; /* FIXME: too big for stack? */
1338
1339 if ((drm_debug & DRM_UT_KMS) == 0)
1340 return;
1341
1342 source_len = intel_dp_source_rates(dev, &source_rates);
1343 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1344 DRM_DEBUG_KMS("source rates: %s\n", str);
1345
1346 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1347 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1348 DRM_DEBUG_KMS("sink rates: %s\n", str);
1349
94ca719e
VS
1350 common_len = intel_dp_common_rates(intel_dp, common_rates);
1351 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1352 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1353}
1354
f4896f15 1355static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1356{
1357 int i = 0;
1358
1359 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1360 if (find == rates[i])
1361 break;
1362
1363 return i;
1364}
1365
50fec21a
VS
1366int
1367intel_dp_max_link_rate(struct intel_dp *intel_dp)
1368{
1369 int rates[DP_MAX_SUPPORTED_RATES] = {};
1370 int len;
1371
94ca719e 1372 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1373 if (WARN_ON(len <= 0))
1374 return 162000;
1375
1376 return rates[rate_to_index(0, rates) - 1];
1377}
1378
ed4e9c1d
VS
1379int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1380{
94ca719e 1381 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1382}
1383
04a60f9f
VS
1384static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1385 uint8_t *link_bw, uint8_t *rate_select)
1386{
1387 if (intel_dp->num_sink_rates) {
1388 *link_bw = 0;
1389 *rate_select =
1390 intel_dp_rate_select(intel_dp, port_clock);
1391 } else {
1392 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1393 *rate_select = 0;
1394 }
1395}
1396
00c09d70 1397bool
5bfe2ac0 1398intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1399 struct intel_crtc_state *pipe_config)
a4fc5ed6 1400{
5bfe2ac0 1401 struct drm_device *dev = encoder->base.dev;
36008365 1402 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1403 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1404 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1405 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1406 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1407 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1408 int lane_count, clock;
56071a20 1409 int min_lane_count = 1;
eeb6324d 1410 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1411 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1412 int min_clock = 0;
a8f3ef61 1413 int max_clock;
083f9560 1414 int bpp, mode_rate;
ff9a6750 1415 int link_avail, link_clock;
94ca719e
VS
1416 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1417 int common_len;
04a60f9f 1418 uint8_t link_bw, rate_select;
a8f3ef61 1419
94ca719e 1420 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1421
1422 /* No common link rates between source and sink */
94ca719e 1423 WARN_ON(common_len <= 0);
a8f3ef61 1424
94ca719e 1425 max_clock = common_len - 1;
a4fc5ed6 1426
bc7d38a4 1427 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1428 pipe_config->has_pch_encoder = true;
1429
03afc4a2 1430 pipe_config->has_dp_encoder = true;
f769cd24 1431 pipe_config->has_drrs = false;
9fcb1704 1432 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1433
dd06f90e
JN
1434 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1435 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1436 adjusted_mode);
a1b2278e
CK
1437
1438 if (INTEL_INFO(dev)->gen >= 9) {
1439 int ret;
e435d6e5 1440 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1441 if (ret)
1442 return ret;
1443 }
1444
2dd24552
JB
1445 if (!HAS_PCH_SPLIT(dev))
1446 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1447 intel_connector->panel.fitting_mode);
1448 else
b074cec8
JB
1449 intel_pch_panel_fitting(intel_crtc, pipe_config,
1450 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1451 }
1452
cb1793ce 1453 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1454 return false;
1455
083f9560 1456 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1457 "max bw %d pixel clock %iKHz\n",
94ca719e 1458 max_lane_count, common_rates[max_clock],
241bfc38 1459 adjusted_mode->crtc_clock);
083f9560 1460
36008365
DV
1461 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1462 * bpc in between. */
3e7ca985 1463 bpp = pipe_config->pipe_bpp;
56071a20 1464 if (is_edp(intel_dp)) {
22ce5628
TS
1465
1466 /* Get bpp from vbt only for panels that dont have bpp in edid */
1467 if (intel_connector->base.display_info.bpc == 0 &&
1468 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1469 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1470 dev_priv->vbt.edp_bpp);
1471 bpp = dev_priv->vbt.edp_bpp;
1472 }
1473
344c5bbc
JN
1474 /*
1475 * Use the maximum clock and number of lanes the eDP panel
1476 * advertizes being capable of. The panels are generally
1477 * designed to support only a single clock and lane
1478 * configuration, and typically these values correspond to the
1479 * native resolution of the panel.
1480 */
1481 min_lane_count = max_lane_count;
1482 min_clock = max_clock;
7984211e 1483 }
657445fe 1484
36008365 1485 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1486 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1487 bpp);
36008365 1488
c6930992 1489 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1490 for (lane_count = min_lane_count;
1491 lane_count <= max_lane_count;
1492 lane_count <<= 1) {
1493
94ca719e 1494 link_clock = common_rates[clock];
36008365
DV
1495 link_avail = intel_dp_max_data_rate(link_clock,
1496 lane_count);
1497
1498 if (mode_rate <= link_avail) {
1499 goto found;
1500 }
1501 }
1502 }
1503 }
c4867936 1504
36008365 1505 return false;
3685a8f3 1506
36008365 1507found:
55bc60db
VS
1508 if (intel_dp->color_range_auto) {
1509 /*
1510 * See:
1511 * CEA-861-E - 5.1 Default Encoding Parameters
1512 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1513 */
0f2a2a75
VS
1514 pipe_config->limited_color_range =
1515 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1516 } else {
1517 pipe_config->limited_color_range =
1518 intel_dp->limited_color_range;
55bc60db
VS
1519 }
1520
90a6b7b0 1521 pipe_config->lane_count = lane_count;
a8f3ef61 1522
657445fe 1523 pipe_config->pipe_bpp = bpp;
94ca719e 1524 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1525
04a60f9f
VS
1526 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1527 &link_bw, &rate_select);
1528
1529 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1530 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1531 pipe_config->port_clock, bpp);
36008365
DV
1532 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1533 mode_rate, link_avail);
a4fc5ed6 1534
03afc4a2 1535 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1536 adjusted_mode->crtc_clock,
1537 pipe_config->port_clock,
03afc4a2 1538 &pipe_config->dp_m_n);
9d1a455b 1539
439d7ac0 1540 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1541 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1542 pipe_config->has_drrs = true;
439d7ac0
PB
1543 intel_link_compute_m_n(bpp, lane_count,
1544 intel_connector->panel.downclock_mode->clock,
1545 pipe_config->port_clock,
1546 &pipe_config->dp_m2_n2);
1547 }
1548
5416d871 1549 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
840b32b7 1550 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1551 else if (IS_BROXTON(dev))
1552 /* handled in ddi */;
5416d871 1553 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1554 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1555 else
840b32b7 1556 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1557
03afc4a2 1558 return true;
a4fc5ed6
KP
1559}
1560
7c62a164 1561static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1562{
7c62a164
DV
1563 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1564 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1565 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1566 struct drm_i915_private *dev_priv = dev->dev_private;
1567 u32 dpa_ctl;
1568
6e3c9717
ACO
1569 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1570 crtc->config->port_clock);
ea9b6006
DV
1571 dpa_ctl = I915_READ(DP_A);
1572 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1573
6e3c9717 1574 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1575 /* For a long time we've carried around a ILK-DevA w/a for the
1576 * 160MHz clock. If we're really unlucky, it's still required.
1577 */
1578 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1579 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1580 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1581 } else {
1582 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1583 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1584 }
1ce17038 1585
ea9b6006
DV
1586 I915_WRITE(DP_A, dpa_ctl);
1587
1588 POSTING_READ(DP_A);
1589 udelay(500);
1590}
1591
901c2daf
VS
1592void intel_dp_set_link_params(struct intel_dp *intel_dp,
1593 const struct intel_crtc_state *pipe_config)
1594{
1595 intel_dp->link_rate = pipe_config->port_clock;
1596 intel_dp->lane_count = pipe_config->lane_count;
1597}
1598
8ac33ed3 1599static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1600{
b934223d 1601 struct drm_device *dev = encoder->base.dev;
417e822d 1602 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1603 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1604 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1605 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1606 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1607
901c2daf
VS
1608 intel_dp_set_link_params(intel_dp, crtc->config);
1609
417e822d 1610 /*
1a2eb460 1611 * There are four kinds of DP registers:
417e822d
KP
1612 *
1613 * IBX PCH
1a2eb460
KP
1614 * SNB CPU
1615 * IVB CPU
417e822d
KP
1616 * CPT PCH
1617 *
1618 * IBX PCH and CPU are the same for almost everything,
1619 * except that the CPU DP PLL is configured in this
1620 * register
1621 *
1622 * CPT PCH is quite different, having many bits moved
1623 * to the TRANS_DP_CTL register instead. That
1624 * configuration happens (oddly) in ironlake_pch_enable
1625 */
9c9e7927 1626
417e822d
KP
1627 /* Preserve the BIOS-computed detected bit. This is
1628 * supposed to be read-only.
1629 */
1630 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1631
417e822d 1632 /* Handle DP bits in common between all three register formats */
417e822d 1633 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1634 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1635
6e3c9717 1636 if (crtc->config->has_audio)
ea5b213a 1637 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1638
417e822d 1639 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1640
39e5fa88 1641 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1642 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1643 intel_dp->DP |= DP_SYNC_HS_HIGH;
1644 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1645 intel_dp->DP |= DP_SYNC_VS_HIGH;
1646 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1647
6aba5b6c 1648 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1649 intel_dp->DP |= DP_ENHANCED_FRAMING;
1650
7c62a164 1651 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1652 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1653 u32 trans_dp;
1654
39e5fa88 1655 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1656
1657 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1658 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1659 trans_dp |= TRANS_DP_ENH_FRAMING;
1660 else
1661 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1662 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1663 } else {
0f2a2a75
VS
1664 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1665 crtc->config->limited_color_range)
1666 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1667
1668 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1669 intel_dp->DP |= DP_SYNC_HS_HIGH;
1670 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1671 intel_dp->DP |= DP_SYNC_VS_HIGH;
1672 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1673
6aba5b6c 1674 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1675 intel_dp->DP |= DP_ENHANCED_FRAMING;
1676
39e5fa88 1677 if (IS_CHERRYVIEW(dev))
44f37d1f 1678 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1679 else if (crtc->pipe == PIPE_B)
1680 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1681 }
a4fc5ed6
KP
1682}
1683
ffd6749d
PZ
1684#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1685#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1686
1a5ef5b7
PZ
1687#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1688#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1689
ffd6749d
PZ
1690#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1691#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1692
4be73780 1693static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1694 u32 mask,
1695 u32 value)
bd943159 1696{
30add22d 1697 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1698 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1699 u32 pp_stat_reg, pp_ctrl_reg;
1700
e39b999a
VS
1701 lockdep_assert_held(&dev_priv->pps_mutex);
1702
bf13e81b
JN
1703 pp_stat_reg = _pp_stat_reg(intel_dp);
1704 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1705
99ea7127 1706 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1707 mask, value,
1708 I915_READ(pp_stat_reg),
1709 I915_READ(pp_ctrl_reg));
32ce697c 1710
453c5420 1711 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1712 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1713 I915_READ(pp_stat_reg),
1714 I915_READ(pp_ctrl_reg));
32ce697c 1715 }
54c136d4
CW
1716
1717 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1718}
32ce697c 1719
4be73780 1720static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1721{
1722 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1723 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1724}
1725
4be73780 1726static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1727{
1728 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1729 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1730}
1731
4be73780 1732static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1733{
1734 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1735
1736 /* When we disable the VDD override bit last we have to do the manual
1737 * wait. */
1738 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1739 intel_dp->panel_power_cycle_delay);
1740
4be73780 1741 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1742}
1743
4be73780 1744static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1745{
1746 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1747 intel_dp->backlight_on_delay);
1748}
1749
4be73780 1750static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1751{
1752 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1753 intel_dp->backlight_off_delay);
1754}
99ea7127 1755
832dd3c1
KP
1756/* Read the current pp_control value, unlocking the register if it
1757 * is locked
1758 */
1759
453c5420 1760static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1761{
453c5420
JB
1762 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1763 struct drm_i915_private *dev_priv = dev->dev_private;
1764 u32 control;
832dd3c1 1765
e39b999a
VS
1766 lockdep_assert_held(&dev_priv->pps_mutex);
1767
bf13e81b 1768 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1769 if (!IS_BROXTON(dev)) {
1770 control &= ~PANEL_UNLOCK_MASK;
1771 control |= PANEL_UNLOCK_REGS;
1772 }
832dd3c1 1773 return control;
bd943159
KP
1774}
1775
951468f3
VS
1776/*
1777 * Must be paired with edp_panel_vdd_off().
1778 * Must hold pps_mutex around the whole on/off sequence.
1779 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1780 */
1e0560e0 1781static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1782{
30add22d 1783 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1784 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1785 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1786 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1787 enum intel_display_power_domain power_domain;
5d613501 1788 u32 pp;
453c5420 1789 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1790 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1791
e39b999a
VS
1792 lockdep_assert_held(&dev_priv->pps_mutex);
1793
97af61f5 1794 if (!is_edp(intel_dp))
adddaaf4 1795 return false;
bd943159 1796
2c623c11 1797 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1798 intel_dp->want_panel_vdd = true;
99ea7127 1799
4be73780 1800 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1801 return need_to_disable;
b0665d57 1802
4e6e1a54
ID
1803 power_domain = intel_display_port_power_domain(intel_encoder);
1804 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1805
3936fcf4
VS
1806 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1807 port_name(intel_dig_port->port));
bd943159 1808
4be73780
DV
1809 if (!edp_have_panel_power(intel_dp))
1810 wait_panel_power_cycle(intel_dp);
99ea7127 1811
453c5420 1812 pp = ironlake_get_pp_control(intel_dp);
5d613501 1813 pp |= EDP_FORCE_VDD;
ebf33b18 1814
bf13e81b
JN
1815 pp_stat_reg = _pp_stat_reg(intel_dp);
1816 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1817
1818 I915_WRITE(pp_ctrl_reg, pp);
1819 POSTING_READ(pp_ctrl_reg);
1820 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1821 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1822 /*
1823 * If the panel wasn't on, delay before accessing aux channel
1824 */
4be73780 1825 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1826 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1827 port_name(intel_dig_port->port));
f01eca2e 1828 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1829 }
adddaaf4
JN
1830
1831 return need_to_disable;
1832}
1833
951468f3
VS
1834/*
1835 * Must be paired with intel_edp_panel_vdd_off() or
1836 * intel_edp_panel_off().
1837 * Nested calls to these functions are not allowed since
1838 * we drop the lock. Caller must use some higher level
1839 * locking to prevent nested calls from other threads.
1840 */
b80d6c78 1841void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1842{
c695b6b6 1843 bool vdd;
adddaaf4 1844
c695b6b6
VS
1845 if (!is_edp(intel_dp))
1846 return;
1847
773538e8 1848 pps_lock(intel_dp);
c695b6b6 1849 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1850 pps_unlock(intel_dp);
c695b6b6 1851
e2c719b7 1852 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1853 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1854}
1855
4be73780 1856static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1857{
30add22d 1858 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1859 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1860 struct intel_digital_port *intel_dig_port =
1861 dp_to_dig_port(intel_dp);
1862 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1863 enum intel_display_power_domain power_domain;
5d613501 1864 u32 pp;
453c5420 1865 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1866
e39b999a 1867 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1868
15e899a0 1869 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1870
15e899a0 1871 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1872 return;
b0665d57 1873
3936fcf4
VS
1874 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1875 port_name(intel_dig_port->port));
bd943159 1876
be2c9196
VS
1877 pp = ironlake_get_pp_control(intel_dp);
1878 pp &= ~EDP_FORCE_VDD;
453c5420 1879
be2c9196
VS
1880 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1881 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1882
be2c9196
VS
1883 I915_WRITE(pp_ctrl_reg, pp);
1884 POSTING_READ(pp_ctrl_reg);
90791a5c 1885
be2c9196
VS
1886 /* Make sure sequencer is idle before allowing subsequent activity */
1887 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1888 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1889
be2c9196
VS
1890 if ((pp & POWER_TARGET_ON) == 0)
1891 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1892
be2c9196
VS
1893 power_domain = intel_display_port_power_domain(intel_encoder);
1894 intel_display_power_put(dev_priv, power_domain);
bd943159 1895}
5d613501 1896
4be73780 1897static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1898{
1899 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1900 struct intel_dp, panel_vdd_work);
bd943159 1901
773538e8 1902 pps_lock(intel_dp);
15e899a0
VS
1903 if (!intel_dp->want_panel_vdd)
1904 edp_panel_vdd_off_sync(intel_dp);
773538e8 1905 pps_unlock(intel_dp);
bd943159
KP
1906}
1907
aba86890
ID
1908static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1909{
1910 unsigned long delay;
1911
1912 /*
1913 * Queue the timer to fire a long time from now (relative to the power
1914 * down delay) to keep the panel power up across a sequence of
1915 * operations.
1916 */
1917 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1918 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1919}
1920
951468f3
VS
1921/*
1922 * Must be paired with edp_panel_vdd_on().
1923 * Must hold pps_mutex around the whole on/off sequence.
1924 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1925 */
4be73780 1926static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1927{
e39b999a
VS
1928 struct drm_i915_private *dev_priv =
1929 intel_dp_to_dev(intel_dp)->dev_private;
1930
1931 lockdep_assert_held(&dev_priv->pps_mutex);
1932
97af61f5
KP
1933 if (!is_edp(intel_dp))
1934 return;
5d613501 1935
e2c719b7 1936 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1937 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1938
bd943159
KP
1939 intel_dp->want_panel_vdd = false;
1940
aba86890 1941 if (sync)
4be73780 1942 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1943 else
1944 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1945}
1946
9f0fb5be 1947static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1948{
30add22d 1949 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1950 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1951 u32 pp;
453c5420 1952 u32 pp_ctrl_reg;
9934c132 1953
9f0fb5be
VS
1954 lockdep_assert_held(&dev_priv->pps_mutex);
1955
97af61f5 1956 if (!is_edp(intel_dp))
bd943159 1957 return;
99ea7127 1958
3936fcf4
VS
1959 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1960 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1961
e7a89ace
VS
1962 if (WARN(edp_have_panel_power(intel_dp),
1963 "eDP port %c panel power already on\n",
1964 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1965 return;
9934c132 1966
4be73780 1967 wait_panel_power_cycle(intel_dp);
37c6c9b0 1968
bf13e81b 1969 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1970 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1971 if (IS_GEN5(dev)) {
1972 /* ILK workaround: disable reset around power sequence */
1973 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1974 I915_WRITE(pp_ctrl_reg, pp);
1975 POSTING_READ(pp_ctrl_reg);
05ce1a49 1976 }
37c6c9b0 1977
1c0ae80a 1978 pp |= POWER_TARGET_ON;
99ea7127
KP
1979 if (!IS_GEN5(dev))
1980 pp |= PANEL_POWER_RESET;
1981
453c5420
JB
1982 I915_WRITE(pp_ctrl_reg, pp);
1983 POSTING_READ(pp_ctrl_reg);
9934c132 1984
4be73780 1985 wait_panel_on(intel_dp);
dce56b3c 1986 intel_dp->last_power_on = jiffies;
9934c132 1987
05ce1a49
KP
1988 if (IS_GEN5(dev)) {
1989 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1990 I915_WRITE(pp_ctrl_reg, pp);
1991 POSTING_READ(pp_ctrl_reg);
05ce1a49 1992 }
9f0fb5be 1993}
e39b999a 1994
9f0fb5be
VS
1995void intel_edp_panel_on(struct intel_dp *intel_dp)
1996{
1997 if (!is_edp(intel_dp))
1998 return;
1999
2000 pps_lock(intel_dp);
2001 edp_panel_on(intel_dp);
773538e8 2002 pps_unlock(intel_dp);
9934c132
JB
2003}
2004
9f0fb5be
VS
2005
2006static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2007{
4e6e1a54
ID
2008 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2009 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2010 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2011 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2012 enum intel_display_power_domain power_domain;
99ea7127 2013 u32 pp;
453c5420 2014 u32 pp_ctrl_reg;
9934c132 2015
9f0fb5be
VS
2016 lockdep_assert_held(&dev_priv->pps_mutex);
2017
97af61f5
KP
2018 if (!is_edp(intel_dp))
2019 return;
37c6c9b0 2020
3936fcf4
VS
2021 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2022 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2023
3936fcf4
VS
2024 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2025 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2026
453c5420 2027 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2028 /* We need to switch off panel power _and_ force vdd, for otherwise some
2029 * panels get very unhappy and cease to work. */
b3064154
PJ
2030 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2031 EDP_BLC_ENABLE);
453c5420 2032
bf13e81b 2033 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2034
849e39f5
PZ
2035 intel_dp->want_panel_vdd = false;
2036
453c5420
JB
2037 I915_WRITE(pp_ctrl_reg, pp);
2038 POSTING_READ(pp_ctrl_reg);
9934c132 2039
dce56b3c 2040 intel_dp->last_power_cycle = jiffies;
4be73780 2041 wait_panel_off(intel_dp);
849e39f5
PZ
2042
2043 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2044 power_domain = intel_display_port_power_domain(intel_encoder);
2045 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2046}
e39b999a 2047
9f0fb5be
VS
2048void intel_edp_panel_off(struct intel_dp *intel_dp)
2049{
2050 if (!is_edp(intel_dp))
2051 return;
e39b999a 2052
9f0fb5be
VS
2053 pps_lock(intel_dp);
2054 edp_panel_off(intel_dp);
773538e8 2055 pps_unlock(intel_dp);
9934c132
JB
2056}
2057
1250d107
JN
2058/* Enable backlight in the panel power control. */
2059static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2060{
da63a9f2
PZ
2061 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2062 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2063 struct drm_i915_private *dev_priv = dev->dev_private;
2064 u32 pp;
453c5420 2065 u32 pp_ctrl_reg;
32f9d658 2066
01cb9ea6
JB
2067 /*
2068 * If we enable the backlight right away following a panel power
2069 * on, we may see slight flicker as the panel syncs with the eDP
2070 * link. So delay a bit to make sure the image is solid before
2071 * allowing it to appear.
2072 */
4be73780 2073 wait_backlight_on(intel_dp);
e39b999a 2074
773538e8 2075 pps_lock(intel_dp);
e39b999a 2076
453c5420 2077 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2078 pp |= EDP_BLC_ENABLE;
453c5420 2079
bf13e81b 2080 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2081
2082 I915_WRITE(pp_ctrl_reg, pp);
2083 POSTING_READ(pp_ctrl_reg);
e39b999a 2084
773538e8 2085 pps_unlock(intel_dp);
32f9d658
ZW
2086}
2087
1250d107
JN
2088/* Enable backlight PWM and backlight PP control. */
2089void intel_edp_backlight_on(struct intel_dp *intel_dp)
2090{
2091 if (!is_edp(intel_dp))
2092 return;
2093
2094 DRM_DEBUG_KMS("\n");
2095
2096 intel_panel_enable_backlight(intel_dp->attached_connector);
2097 _intel_edp_backlight_on(intel_dp);
2098}
2099
2100/* Disable backlight in the panel power control. */
2101static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2102{
30add22d 2103 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 u32 pp;
453c5420 2106 u32 pp_ctrl_reg;
32f9d658 2107
f01eca2e
KP
2108 if (!is_edp(intel_dp))
2109 return;
2110
773538e8 2111 pps_lock(intel_dp);
e39b999a 2112
453c5420 2113 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2114 pp &= ~EDP_BLC_ENABLE;
453c5420 2115
bf13e81b 2116 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2117
2118 I915_WRITE(pp_ctrl_reg, pp);
2119 POSTING_READ(pp_ctrl_reg);
f7d2323c 2120
773538e8 2121 pps_unlock(intel_dp);
e39b999a
VS
2122
2123 intel_dp->last_backlight_off = jiffies;
f7d2323c 2124 edp_wait_backlight_off(intel_dp);
1250d107 2125}
f7d2323c 2126
1250d107
JN
2127/* Disable backlight PP control and backlight PWM. */
2128void intel_edp_backlight_off(struct intel_dp *intel_dp)
2129{
2130 if (!is_edp(intel_dp))
2131 return;
2132
2133 DRM_DEBUG_KMS("\n");
f7d2323c 2134
1250d107 2135 _intel_edp_backlight_off(intel_dp);
f7d2323c 2136 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2137}
a4fc5ed6 2138
73580fb7
JN
2139/*
2140 * Hook for controlling the panel power control backlight through the bl_power
2141 * sysfs attribute. Take care to handle multiple calls.
2142 */
2143static void intel_edp_backlight_power(struct intel_connector *connector,
2144 bool enable)
2145{
2146 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2147 bool is_enabled;
2148
773538e8 2149 pps_lock(intel_dp);
e39b999a 2150 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2151 pps_unlock(intel_dp);
73580fb7
JN
2152
2153 if (is_enabled == enable)
2154 return;
2155
23ba9373
JN
2156 DRM_DEBUG_KMS("panel power control backlight %s\n",
2157 enable ? "enable" : "disable");
73580fb7
JN
2158
2159 if (enable)
2160 _intel_edp_backlight_on(intel_dp);
2161 else
2162 _intel_edp_backlight_off(intel_dp);
2163}
2164
2bd2ad64 2165static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2166{
da63a9f2
PZ
2167 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2168 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2169 struct drm_device *dev = crtc->dev;
d240f20f
JB
2170 struct drm_i915_private *dev_priv = dev->dev_private;
2171 u32 dpa_ctl;
2172
2bd2ad64
DV
2173 assert_pipe_disabled(dev_priv,
2174 to_intel_crtc(crtc)->pipe);
2175
d240f20f
JB
2176 DRM_DEBUG_KMS("\n");
2177 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2178 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2179 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2180
2181 /* We don't adjust intel_dp->DP while tearing down the link, to
2182 * facilitate link retraining (e.g. after hotplug). Hence clear all
2183 * enable bits here to ensure that we don't enable too much. */
2184 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2185 intel_dp->DP |= DP_PLL_ENABLE;
2186 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2187 POSTING_READ(DP_A);
2188 udelay(200);
d240f20f
JB
2189}
2190
2bd2ad64 2191static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2192{
da63a9f2
PZ
2193 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2194 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2195 struct drm_device *dev = crtc->dev;
d240f20f
JB
2196 struct drm_i915_private *dev_priv = dev->dev_private;
2197 u32 dpa_ctl;
2198
2bd2ad64
DV
2199 assert_pipe_disabled(dev_priv,
2200 to_intel_crtc(crtc)->pipe);
2201
d240f20f 2202 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2203 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2204 "dp pll off, should be on\n");
2205 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2206
2207 /* We can't rely on the value tracked for the DP register in
2208 * intel_dp->DP because link_down must not change that (otherwise link
2209 * re-training will fail. */
298b0b39 2210 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2211 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2212 POSTING_READ(DP_A);
d240f20f
JB
2213 udelay(200);
2214}
2215
c7ad3810 2216/* If the sink supports it, try to set the power state appropriately */
c19b0669 2217void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2218{
2219 int ret, i;
2220
2221 /* Should have a valid DPCD by this point */
2222 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2223 return;
2224
2225 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2226 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2227 DP_SET_POWER_D3);
c7ad3810
JB
2228 } else {
2229 /*
2230 * When turning on, we need to retry for 1ms to give the sink
2231 * time to wake up.
2232 */
2233 for (i = 0; i < 3; i++) {
9d1a1031
JN
2234 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2235 DP_SET_POWER_D0);
c7ad3810
JB
2236 if (ret == 1)
2237 break;
2238 msleep(1);
2239 }
2240 }
f9cac721
JN
2241
2242 if (ret != 1)
2243 DRM_DEBUG_KMS("failed to %s sink power state\n",
2244 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2245}
2246
19d8fe15
DV
2247static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2248 enum pipe *pipe)
d240f20f 2249{
19d8fe15 2250 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2251 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2252 struct drm_device *dev = encoder->base.dev;
2253 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2254 enum intel_display_power_domain power_domain;
2255 u32 tmp;
2256
2257 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2258 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2259 return false;
2260
2261 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2262
2263 if (!(tmp & DP_PORT_EN))
2264 return false;
2265
39e5fa88 2266 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2267 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2268 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2269 enum pipe p;
19d8fe15 2270
adc289d7
VS
2271 for_each_pipe(dev_priv, p) {
2272 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2273 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2274 *pipe = p;
19d8fe15
DV
2275 return true;
2276 }
2277 }
19d8fe15 2278
4a0833ec
DV
2279 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2280 intel_dp->output_reg);
39e5fa88
VS
2281 } else if (IS_CHERRYVIEW(dev)) {
2282 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2283 } else {
2284 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2285 }
d240f20f 2286
19d8fe15
DV
2287 return true;
2288}
d240f20f 2289
045ac3b5 2290static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2291 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2292{
2293 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2294 u32 tmp, flags = 0;
63000ef6
XZ
2295 struct drm_device *dev = encoder->base.dev;
2296 struct drm_i915_private *dev_priv = dev->dev_private;
2297 enum port port = dp_to_dig_port(intel_dp)->port;
2298 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2299 int dotclock;
045ac3b5 2300
9ed109a7 2301 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2302
2303 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2304
39e5fa88 2305 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2306 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2307
2308 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2309 flags |= DRM_MODE_FLAG_PHSYNC;
2310 else
2311 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2312
b81e34c2 2313 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2314 flags |= DRM_MODE_FLAG_PVSYNC;
2315 else
2316 flags |= DRM_MODE_FLAG_NVSYNC;
2317 } else {
39e5fa88 2318 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2319 flags |= DRM_MODE_FLAG_PHSYNC;
2320 else
2321 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2322
39e5fa88 2323 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2324 flags |= DRM_MODE_FLAG_PVSYNC;
2325 else
2326 flags |= DRM_MODE_FLAG_NVSYNC;
2327 }
045ac3b5 2328
2d112de7 2329 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2330
8c875fca
VS
2331 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2332 tmp & DP_COLOR_RANGE_16_235)
2333 pipe_config->limited_color_range = true;
2334
eb14cb74
VS
2335 pipe_config->has_dp_encoder = true;
2336
90a6b7b0
VS
2337 pipe_config->lane_count =
2338 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2339
eb14cb74
VS
2340 intel_dp_get_m_n(crtc, pipe_config);
2341
18442d08 2342 if (port == PORT_A) {
f1f644dc
JB
2343 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2344 pipe_config->port_clock = 162000;
2345 else
2346 pipe_config->port_clock = 270000;
2347 }
18442d08
VS
2348
2349 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2350 &pipe_config->dp_m_n);
2351
2352 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2353 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2354
2d112de7 2355 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2356
c6cd2ee2
JN
2357 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2358 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2359 /*
2360 * This is a big fat ugly hack.
2361 *
2362 * Some machines in UEFI boot mode provide us a VBT that has 18
2363 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2364 * unknown we fail to light up. Yet the same BIOS boots up with
2365 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2366 * max, not what it tells us to use.
2367 *
2368 * Note: This will still be broken if the eDP panel is not lit
2369 * up by the BIOS, and thus we can't get the mode at module
2370 * load.
2371 */
2372 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2373 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2374 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2375 }
045ac3b5
JB
2376}
2377
e8cb4558 2378static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2379{
e8cb4558 2380 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2381 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2382 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2383
6e3c9717 2384 if (crtc->config->has_audio)
495a5bb8 2385 intel_audio_codec_disable(encoder);
6cb49835 2386
b32c6f48
RV
2387 if (HAS_PSR(dev) && !HAS_DDI(dev))
2388 intel_psr_disable(intel_dp);
2389
6cb49835
DV
2390 /* Make sure the panel is off before trying to change the mode. But also
2391 * ensure that we have vdd while we switch off the panel. */
24f3e092 2392 intel_edp_panel_vdd_on(intel_dp);
4be73780 2393 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2394 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2395 intel_edp_panel_off(intel_dp);
3739850b 2396
08aff3fe
VS
2397 /* disable the port before the pipe on g4x */
2398 if (INTEL_INFO(dev)->gen < 5)
3739850b 2399 intel_dp_link_down(intel_dp);
d240f20f
JB
2400}
2401
08aff3fe 2402static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2403{
2bd2ad64 2404 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2405 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2406
49277c31 2407 intel_dp_link_down(intel_dp);
08aff3fe
VS
2408 if (port == PORT_A)
2409 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2410}
2411
2412static void vlv_post_disable_dp(struct intel_encoder *encoder)
2413{
2414 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2415
2416 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2417}
2418
580d3811
VS
2419static void chv_post_disable_dp(struct intel_encoder *encoder)
2420{
2421 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2422 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2423 struct drm_device *dev = encoder->base.dev;
2424 struct drm_i915_private *dev_priv = dev->dev_private;
2425 struct intel_crtc *intel_crtc =
2426 to_intel_crtc(encoder->base.crtc);
2427 enum dpio_channel ch = vlv_dport_to_channel(dport);
2428 enum pipe pipe = intel_crtc->pipe;
2429 u32 val;
2430
2431 intel_dp_link_down(intel_dp);
2432
a580516d 2433 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2434
2435 /* Propagate soft reset to data lane reset */
97fd4d5c 2436 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2437 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2438 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2439
e0fce78f
VS
2440 if (intel_crtc->config->lane_count > 2) {
2441 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2442 val |= CHV_PCS_REQ_SOFTRESET_EN;
2443 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2444 }
97fd4d5c
VS
2445
2446 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2447 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2448 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2449
e0fce78f
VS
2450 if (intel_crtc->config->lane_count > 2) {
2451 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2452 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2453 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2454 }
580d3811 2455
a580516d 2456 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2457}
2458
7b13b58a
VS
2459static void
2460_intel_dp_set_link_train(struct intel_dp *intel_dp,
2461 uint32_t *DP,
2462 uint8_t dp_train_pat)
2463{
2464 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2465 struct drm_device *dev = intel_dig_port->base.base.dev;
2466 struct drm_i915_private *dev_priv = dev->dev_private;
2467 enum port port = intel_dig_port->port;
2468
2469 if (HAS_DDI(dev)) {
2470 uint32_t temp = I915_READ(DP_TP_CTL(port));
2471
2472 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2473 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2474 else
2475 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2476
2477 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2478 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2479 case DP_TRAINING_PATTERN_DISABLE:
2480 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2481
2482 break;
2483 case DP_TRAINING_PATTERN_1:
2484 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2485 break;
2486 case DP_TRAINING_PATTERN_2:
2487 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2488 break;
2489 case DP_TRAINING_PATTERN_3:
2490 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2491 break;
2492 }
2493 I915_WRITE(DP_TP_CTL(port), temp);
2494
39e5fa88
VS
2495 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2496 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2497 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2498
2499 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2500 case DP_TRAINING_PATTERN_DISABLE:
2501 *DP |= DP_LINK_TRAIN_OFF_CPT;
2502 break;
2503 case DP_TRAINING_PATTERN_1:
2504 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2505 break;
2506 case DP_TRAINING_PATTERN_2:
2507 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2508 break;
2509 case DP_TRAINING_PATTERN_3:
2510 DRM_ERROR("DP training pattern 3 not supported\n");
2511 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2512 break;
2513 }
2514
2515 } else {
2516 if (IS_CHERRYVIEW(dev))
2517 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2518 else
2519 *DP &= ~DP_LINK_TRAIN_MASK;
2520
2521 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2522 case DP_TRAINING_PATTERN_DISABLE:
2523 *DP |= DP_LINK_TRAIN_OFF;
2524 break;
2525 case DP_TRAINING_PATTERN_1:
2526 *DP |= DP_LINK_TRAIN_PAT_1;
2527 break;
2528 case DP_TRAINING_PATTERN_2:
2529 *DP |= DP_LINK_TRAIN_PAT_2;
2530 break;
2531 case DP_TRAINING_PATTERN_3:
2532 if (IS_CHERRYVIEW(dev)) {
2533 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2534 } else {
2535 DRM_ERROR("DP training pattern 3 not supported\n");
2536 *DP |= DP_LINK_TRAIN_PAT_2;
2537 }
2538 break;
2539 }
2540 }
2541}
2542
2543static void intel_dp_enable_port(struct intel_dp *intel_dp)
2544{
2545 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547
7b13b58a
VS
2548 /* enable with pattern 1 (as per spec) */
2549 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2550 DP_TRAINING_PATTERN_1);
2551
2552 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2553 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2554
2555 /*
2556 * Magic for VLV/CHV. We _must_ first set up the register
2557 * without actually enabling the port, and then do another
2558 * write to enable the port. Otherwise link training will
2559 * fail when the power sequencer is freshly used for this port.
2560 */
2561 intel_dp->DP |= DP_PORT_EN;
2562
2563 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2564 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2565}
2566
e8cb4558 2567static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2568{
e8cb4558
DV
2569 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2570 struct drm_device *dev = encoder->base.dev;
2571 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2572 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2573 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2574
0c33d8d7
DV
2575 if (WARN_ON(dp_reg & DP_PORT_EN))
2576 return;
5d613501 2577
093e3f13
VS
2578 pps_lock(intel_dp);
2579
2580 if (IS_VALLEYVIEW(dev))
2581 vlv_init_panel_power_sequencer(intel_dp);
2582
7b13b58a 2583 intel_dp_enable_port(intel_dp);
093e3f13
VS
2584
2585 edp_panel_vdd_on(intel_dp);
2586 edp_panel_on(intel_dp);
2587 edp_panel_vdd_off(intel_dp, true);
2588
2589 pps_unlock(intel_dp);
2590
e0fce78f
VS
2591 if (IS_VALLEYVIEW(dev)) {
2592 unsigned int lane_mask = 0x0;
2593
2594 if (IS_CHERRYVIEW(dev))
2595 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2596
9b6de0a1
VS
2597 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2598 lane_mask);
e0fce78f 2599 }
61234fa5 2600
f01eca2e 2601 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2602 intel_dp_start_link_train(intel_dp);
33a34e4e 2603 intel_dp_complete_link_train(intel_dp);
3ab9c637 2604 intel_dp_stop_link_train(intel_dp);
c1dec79a 2605
6e3c9717 2606 if (crtc->config->has_audio) {
c1dec79a
JN
2607 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2608 pipe_name(crtc->pipe));
2609 intel_audio_codec_enable(encoder);
2610 }
ab1f90f9 2611}
89b667f8 2612
ecff4f3b
JN
2613static void g4x_enable_dp(struct intel_encoder *encoder)
2614{
828f5c6e
JN
2615 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2616
ecff4f3b 2617 intel_enable_dp(encoder);
4be73780 2618 intel_edp_backlight_on(intel_dp);
ab1f90f9 2619}
89b667f8 2620
ab1f90f9
JN
2621static void vlv_enable_dp(struct intel_encoder *encoder)
2622{
828f5c6e
JN
2623 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2624
4be73780 2625 intel_edp_backlight_on(intel_dp);
b32c6f48 2626 intel_psr_enable(intel_dp);
d240f20f
JB
2627}
2628
ecff4f3b 2629static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2630{
2631 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2632 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2633
8ac33ed3
DV
2634 intel_dp_prepare(encoder);
2635
d41f1efb
DV
2636 /* Only ilk+ has port A */
2637 if (dport->port == PORT_A) {
2638 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2639 ironlake_edp_pll_on(intel_dp);
d41f1efb 2640 }
ab1f90f9
JN
2641}
2642
83b84597
VS
2643static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2644{
2645 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2646 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2647 enum pipe pipe = intel_dp->pps_pipe;
2648 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2649
2650 edp_panel_vdd_off_sync(intel_dp);
2651
2652 /*
2653 * VLV seems to get confused when multiple power seqeuencers
2654 * have the same port selected (even if only one has power/vdd
2655 * enabled). The failure manifests as vlv_wait_port_ready() failing
2656 * CHV on the other hand doesn't seem to mind having the same port
2657 * selected in multiple power seqeuencers, but let's clear the
2658 * port select always when logically disconnecting a power sequencer
2659 * from a port.
2660 */
2661 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2662 pipe_name(pipe), port_name(intel_dig_port->port));
2663 I915_WRITE(pp_on_reg, 0);
2664 POSTING_READ(pp_on_reg);
2665
2666 intel_dp->pps_pipe = INVALID_PIPE;
2667}
2668
a4a5d2f8
VS
2669static void vlv_steal_power_sequencer(struct drm_device *dev,
2670 enum pipe pipe)
2671{
2672 struct drm_i915_private *dev_priv = dev->dev_private;
2673 struct intel_encoder *encoder;
2674
2675 lockdep_assert_held(&dev_priv->pps_mutex);
2676
ac3c12e4
VS
2677 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2678 return;
2679
a4a5d2f8
VS
2680 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2681 base.head) {
2682 struct intel_dp *intel_dp;
773538e8 2683 enum port port;
a4a5d2f8
VS
2684
2685 if (encoder->type != INTEL_OUTPUT_EDP)
2686 continue;
2687
2688 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2689 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2690
2691 if (intel_dp->pps_pipe != pipe)
2692 continue;
2693
2694 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2695 pipe_name(pipe), port_name(port));
a4a5d2f8 2696
e02f9a06 2697 WARN(encoder->base.crtc,
034e43c6
VS
2698 "stealing pipe %c power sequencer from active eDP port %c\n",
2699 pipe_name(pipe), port_name(port));
a4a5d2f8 2700
a4a5d2f8 2701 /* make sure vdd is off before we steal it */
83b84597 2702 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2703 }
2704}
2705
2706static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2707{
2708 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2709 struct intel_encoder *encoder = &intel_dig_port->base;
2710 struct drm_device *dev = encoder->base.dev;
2711 struct drm_i915_private *dev_priv = dev->dev_private;
2712 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2713
2714 lockdep_assert_held(&dev_priv->pps_mutex);
2715
093e3f13
VS
2716 if (!is_edp(intel_dp))
2717 return;
2718
a4a5d2f8
VS
2719 if (intel_dp->pps_pipe == crtc->pipe)
2720 return;
2721
2722 /*
2723 * If another power sequencer was being used on this
2724 * port previously make sure to turn off vdd there while
2725 * we still have control of it.
2726 */
2727 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2728 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2729
2730 /*
2731 * We may be stealing the power
2732 * sequencer from another port.
2733 */
2734 vlv_steal_power_sequencer(dev, crtc->pipe);
2735
2736 /* now it's all ours */
2737 intel_dp->pps_pipe = crtc->pipe;
2738
2739 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2740 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2741
2742 /* init power sequencer on this pipe and port */
36b5f425
VS
2743 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2744 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2745}
2746
ab1f90f9 2747static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2748{
2bd2ad64 2749 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2750 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2751 struct drm_device *dev = encoder->base.dev;
89b667f8 2752 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2753 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2754 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2755 int pipe = intel_crtc->pipe;
2756 u32 val;
a4fc5ed6 2757
a580516d 2758 mutex_lock(&dev_priv->sb_lock);
89b667f8 2759
ab3c759a 2760 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2761 val = 0;
2762 if (pipe)
2763 val |= (1<<21);
2764 else
2765 val &= ~(1<<21);
2766 val |= 0x001000c4;
ab3c759a
CML
2767 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2768 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2769 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2770
a580516d 2771 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2772
2773 intel_enable_dp(encoder);
89b667f8
JB
2774}
2775
ecff4f3b 2776static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2777{
2778 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2779 struct drm_device *dev = encoder->base.dev;
2780 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2781 struct intel_crtc *intel_crtc =
2782 to_intel_crtc(encoder->base.crtc);
e4607fcf 2783 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2784 int pipe = intel_crtc->pipe;
89b667f8 2785
8ac33ed3
DV
2786 intel_dp_prepare(encoder);
2787
89b667f8 2788 /* Program Tx lane resets to default */
a580516d 2789 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2790 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2791 DPIO_PCS_TX_LANE2_RESET |
2792 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2793 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2794 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2795 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2796 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2797 DPIO_PCS_CLK_SOFT_RESET);
2798
2799 /* Fix up inter-pair skew failure */
ab3c759a
CML
2800 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2801 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2802 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2803 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2804}
2805
e4a1d846
CML
2806static void chv_pre_enable_dp(struct intel_encoder *encoder)
2807{
2808 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2809 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2810 struct drm_device *dev = encoder->base.dev;
2811 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2812 struct intel_crtc *intel_crtc =
2813 to_intel_crtc(encoder->base.crtc);
2814 enum dpio_channel ch = vlv_dport_to_channel(dport);
2815 int pipe = intel_crtc->pipe;
2e523e98 2816 int data, i, stagger;
949c1d43 2817 u32 val;
e4a1d846 2818
a580516d 2819 mutex_lock(&dev_priv->sb_lock);
949c1d43 2820
570e2a74
VS
2821 /* allow hardware to manage TX FIFO reset source */
2822 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2823 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2824 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2825
e0fce78f
VS
2826 if (intel_crtc->config->lane_count > 2) {
2827 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2828 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2829 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2830 }
570e2a74 2831
949c1d43 2832 /* Deassert soft data lane reset*/
97fd4d5c 2833 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2834 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2835 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2836
e0fce78f
VS
2837 if (intel_crtc->config->lane_count > 2) {
2838 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2839 val |= CHV_PCS_REQ_SOFTRESET_EN;
2840 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2841 }
97fd4d5c
VS
2842
2843 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2844 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2845 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2846
e0fce78f
VS
2847 if (intel_crtc->config->lane_count > 2) {
2848 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2849 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2850 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2851 }
949c1d43
VS
2852
2853 /* Program Tx lane latency optimal setting*/
e0fce78f 2854 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 2855 /* Set the upar bit */
e0fce78f
VS
2856 if (intel_crtc->config->lane_count == 1)
2857 data = 0x0;
2858 else
2859 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
2860 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2861 data << DPIO_UPAR_SHIFT);
2862 }
2863
2864 /* Data lane stagger programming */
2e523e98
VS
2865 if (intel_crtc->config->port_clock > 270000)
2866 stagger = 0x18;
2867 else if (intel_crtc->config->port_clock > 135000)
2868 stagger = 0xd;
2869 else if (intel_crtc->config->port_clock > 67500)
2870 stagger = 0x7;
2871 else if (intel_crtc->config->port_clock > 33750)
2872 stagger = 0x4;
2873 else
2874 stagger = 0x2;
2875
2876 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2877 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2878 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2879
e0fce78f
VS
2880 if (intel_crtc->config->lane_count > 2) {
2881 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2882 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2883 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2884 }
2e523e98
VS
2885
2886 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2887 DPIO_LANESTAGGER_STRAP(stagger) |
2888 DPIO_LANESTAGGER_STRAP_OVRD |
2889 DPIO_TX1_STAGGER_MASK(0x1f) |
2890 DPIO_TX1_STAGGER_MULT(6) |
2891 DPIO_TX2_STAGGER_MULT(0));
2892
e0fce78f
VS
2893 if (intel_crtc->config->lane_count > 2) {
2894 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2895 DPIO_LANESTAGGER_STRAP(stagger) |
2896 DPIO_LANESTAGGER_STRAP_OVRD |
2897 DPIO_TX1_STAGGER_MASK(0x1f) |
2898 DPIO_TX1_STAGGER_MULT(7) |
2899 DPIO_TX2_STAGGER_MULT(5));
2900 }
e4a1d846 2901
a580516d 2902 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2903
e4a1d846 2904 intel_enable_dp(encoder);
b0b33846
VS
2905
2906 /* Second common lane will stay alive on its own now */
2907 if (dport->release_cl2_override) {
2908 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2909 dport->release_cl2_override = false;
2910 }
e4a1d846
CML
2911}
2912
9197c88b
VS
2913static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2914{
2915 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2916 struct drm_device *dev = encoder->base.dev;
2917 struct drm_i915_private *dev_priv = dev->dev_private;
2918 struct intel_crtc *intel_crtc =
2919 to_intel_crtc(encoder->base.crtc);
2920 enum dpio_channel ch = vlv_dport_to_channel(dport);
2921 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
2922 unsigned int lane_mask =
2923 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
2924 u32 val;
2925
625695f8
VS
2926 intel_dp_prepare(encoder);
2927
b0b33846
VS
2928 /*
2929 * Must trick the second common lane into life.
2930 * Otherwise we can't even access the PLL.
2931 */
2932 if (ch == DPIO_CH0 && pipe == PIPE_B)
2933 dport->release_cl2_override =
2934 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2935
e0fce78f
VS
2936 chv_phy_powergate_lanes(encoder, true, lane_mask);
2937
a580516d 2938 mutex_lock(&dev_priv->sb_lock);
9197c88b 2939
b9e5ac3c
VS
2940 /* program left/right clock distribution */
2941 if (pipe != PIPE_B) {
2942 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2943 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2944 if (ch == DPIO_CH0)
2945 val |= CHV_BUFLEFTENA1_FORCE;
2946 if (ch == DPIO_CH1)
2947 val |= CHV_BUFRIGHTENA1_FORCE;
2948 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2949 } else {
2950 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2951 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2952 if (ch == DPIO_CH0)
2953 val |= CHV_BUFLEFTENA2_FORCE;
2954 if (ch == DPIO_CH1)
2955 val |= CHV_BUFRIGHTENA2_FORCE;
2956 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2957 }
2958
9197c88b
VS
2959 /* program clock channel usage */
2960 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2961 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2962 if (pipe != PIPE_B)
2963 val &= ~CHV_PCS_USEDCLKCHANNEL;
2964 else
2965 val |= CHV_PCS_USEDCLKCHANNEL;
2966 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2967
e0fce78f
VS
2968 if (intel_crtc->config->lane_count > 2) {
2969 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2970 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2971 if (pipe != PIPE_B)
2972 val &= ~CHV_PCS_USEDCLKCHANNEL;
2973 else
2974 val |= CHV_PCS_USEDCLKCHANNEL;
2975 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2976 }
9197c88b
VS
2977
2978 /*
2979 * This a a bit weird since generally CL
2980 * matches the pipe, but here we need to
2981 * pick the CL based on the port.
2982 */
2983 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2984 if (pipe != PIPE_B)
2985 val &= ~CHV_CMN_USEDCLKCHANNEL;
2986 else
2987 val |= CHV_CMN_USEDCLKCHANNEL;
2988 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2989
a580516d 2990 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2991}
2992
d6db995f
VS
2993static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2994{
2995 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2996 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2997 u32 val;
2998
2999 mutex_lock(&dev_priv->sb_lock);
3000
3001 /* disable left/right clock distribution */
3002 if (pipe != PIPE_B) {
3003 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3004 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3005 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3006 } else {
3007 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3008 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3009 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3010 }
3011
3012 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3013
b0b33846
VS
3014 /*
3015 * Leave the power down bit cleared for at least one
3016 * lane so that chv_powergate_phy_ch() will power
3017 * on something when the channel is otherwise unused.
3018 * When the port is off and the override is removed
3019 * the lanes power down anyway, so otherwise it doesn't
3020 * really matter what the state of power down bits is
3021 * after this.
3022 */
e0fce78f 3023 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3024}
3025
a4fc5ed6 3026/*
df0c237d
JB
3027 * Native read with retry for link status and receiver capability reads for
3028 * cases where the sink may still be asleep.
9d1a1031
JN
3029 *
3030 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3031 * supposed to retry 3 times per the spec.
a4fc5ed6 3032 */
9d1a1031
JN
3033static ssize_t
3034intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3035 void *buffer, size_t size)
a4fc5ed6 3036{
9d1a1031
JN
3037 ssize_t ret;
3038 int i;
61da5fab 3039
f6a19066
VS
3040 /*
3041 * Sometime we just get the same incorrect byte repeated
3042 * over the entire buffer. Doing just one throw away read
3043 * initially seems to "solve" it.
3044 */
3045 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3046
61da5fab 3047 for (i = 0; i < 3; i++) {
9d1a1031
JN
3048 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3049 if (ret == size)
3050 return ret;
61da5fab
JB
3051 msleep(1);
3052 }
a4fc5ed6 3053
9d1a1031 3054 return ret;
a4fc5ed6
KP
3055}
3056
3057/*
3058 * Fetch AUX CH registers 0x202 - 0x207 which contain
3059 * link status information
3060 */
3061static bool
93f62dad 3062intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3063{
9d1a1031
JN
3064 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3065 DP_LANE0_1_STATUS,
3066 link_status,
3067 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3068}
3069
1100244e 3070/* These are source-specific values. */
a4fc5ed6 3071static uint8_t
1a2eb460 3072intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3073{
30add22d 3074 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3075 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3076 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3077
9314726b
VK
3078 if (IS_BROXTON(dev))
3079 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3080 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3081 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3082 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3083 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 3084 } else if (IS_VALLEYVIEW(dev))
bd60018a 3085 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3086 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3087 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3088 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3089 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3090 else
bd60018a 3091 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3092}
3093
3094static uint8_t
3095intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3096{
30add22d 3097 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3098 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3099
5a9d1f1a
DL
3100 if (INTEL_INFO(dev)->gen >= 9) {
3101 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3102 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3103 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3105 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3107 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3108 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3109 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3110 default:
3111 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3112 }
3113 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3114 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3116 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3117 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3118 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3120 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3122 default:
bd60018a 3123 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3124 }
e2fa6fba
P
3125 } else if (IS_VALLEYVIEW(dev)) {
3126 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3128 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3130 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3131 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3132 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3134 default:
bd60018a 3135 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3136 }
bc7d38a4 3137 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3138 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3142 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3143 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3144 default:
bd60018a 3145 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3146 }
3147 } else {
3148 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3149 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3150 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3151 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3152 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3153 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3154 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3156 default:
bd60018a 3157 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3158 }
a4fc5ed6
KP
3159 }
3160}
3161
5829975c 3162static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3163{
3164 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3165 struct drm_i915_private *dev_priv = dev->dev_private;
3166 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3167 struct intel_crtc *intel_crtc =
3168 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3169 unsigned long demph_reg_value, preemph_reg_value,
3170 uniqtranscale_reg_value;
3171 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3172 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3173 int pipe = intel_crtc->pipe;
e2fa6fba
P
3174
3175 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3176 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3177 preemph_reg_value = 0x0004000;
3178 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3180 demph_reg_value = 0x2B405555;
3181 uniqtranscale_reg_value = 0x552AB83A;
3182 break;
bd60018a 3183 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3184 demph_reg_value = 0x2B404040;
3185 uniqtranscale_reg_value = 0x5548B83A;
3186 break;
bd60018a 3187 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3188 demph_reg_value = 0x2B245555;
3189 uniqtranscale_reg_value = 0x5560B83A;
3190 break;
bd60018a 3191 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3192 demph_reg_value = 0x2B405555;
3193 uniqtranscale_reg_value = 0x5598DA3A;
3194 break;
3195 default:
3196 return 0;
3197 }
3198 break;
bd60018a 3199 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3200 preemph_reg_value = 0x0002000;
3201 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3203 demph_reg_value = 0x2B404040;
3204 uniqtranscale_reg_value = 0x5552B83A;
3205 break;
bd60018a 3206 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3207 demph_reg_value = 0x2B404848;
3208 uniqtranscale_reg_value = 0x5580B83A;
3209 break;
bd60018a 3210 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3211 demph_reg_value = 0x2B404040;
3212 uniqtranscale_reg_value = 0x55ADDA3A;
3213 break;
3214 default:
3215 return 0;
3216 }
3217 break;
bd60018a 3218 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3219 preemph_reg_value = 0x0000000;
3220 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3221 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3222 demph_reg_value = 0x2B305555;
3223 uniqtranscale_reg_value = 0x5570B83A;
3224 break;
bd60018a 3225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3226 demph_reg_value = 0x2B2B4040;
3227 uniqtranscale_reg_value = 0x55ADDA3A;
3228 break;
3229 default:
3230 return 0;
3231 }
3232 break;
bd60018a 3233 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3234 preemph_reg_value = 0x0006000;
3235 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3236 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3237 demph_reg_value = 0x1B405555;
3238 uniqtranscale_reg_value = 0x55ADDA3A;
3239 break;
3240 default:
3241 return 0;
3242 }
3243 break;
3244 default:
3245 return 0;
3246 }
3247
a580516d 3248 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3249 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3250 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3251 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3252 uniqtranscale_reg_value);
ab3c759a
CML
3253 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3254 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3255 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3256 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3257 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3258
3259 return 0;
3260}
3261
67fa24b4
VS
3262static bool chv_need_uniq_trans_scale(uint8_t train_set)
3263{
3264 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3265 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3266}
3267
5829975c 3268static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3269{
3270 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3271 struct drm_i915_private *dev_priv = dev->dev_private;
3272 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3273 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3274 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3275 uint8_t train_set = intel_dp->train_set[0];
3276 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3277 enum pipe pipe = intel_crtc->pipe;
3278 int i;
e4a1d846
CML
3279
3280 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3281 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3282 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3284 deemph_reg_value = 128;
3285 margin_reg_value = 52;
3286 break;
bd60018a 3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3288 deemph_reg_value = 128;
3289 margin_reg_value = 77;
3290 break;
bd60018a 3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3292 deemph_reg_value = 128;
3293 margin_reg_value = 102;
3294 break;
bd60018a 3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3296 deemph_reg_value = 128;
3297 margin_reg_value = 154;
3298 /* FIXME extra to set for 1200 */
3299 break;
3300 default:
3301 return 0;
3302 }
3303 break;
bd60018a 3304 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3305 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3307 deemph_reg_value = 85;
3308 margin_reg_value = 78;
3309 break;
bd60018a 3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3311 deemph_reg_value = 85;
3312 margin_reg_value = 116;
3313 break;
bd60018a 3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3315 deemph_reg_value = 85;
3316 margin_reg_value = 154;
3317 break;
3318 default:
3319 return 0;
3320 }
3321 break;
bd60018a 3322 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3323 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3325 deemph_reg_value = 64;
3326 margin_reg_value = 104;
3327 break;
bd60018a 3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3329 deemph_reg_value = 64;
3330 margin_reg_value = 154;
3331 break;
3332 default:
3333 return 0;
3334 }
3335 break;
bd60018a 3336 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3337 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3339 deemph_reg_value = 43;
3340 margin_reg_value = 154;
3341 break;
3342 default:
3343 return 0;
3344 }
3345 break;
3346 default:
3347 return 0;
3348 }
3349
a580516d 3350 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3351
3352 /* Clear calc init */
1966e59e
VS
3353 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3354 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3355 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3356 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3357 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3358
e0fce78f
VS
3359 if (intel_crtc->config->lane_count > 2) {
3360 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3361 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3362 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3363 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3364 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3365 }
e4a1d846 3366
a02ef3c7
VS
3367 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3368 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3369 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3370 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3371
e0fce78f
VS
3372 if (intel_crtc->config->lane_count > 2) {
3373 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3374 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3375 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3376 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3377 }
a02ef3c7 3378
e4a1d846 3379 /* Program swing deemph */
e0fce78f 3380 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3381 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3382 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3383 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3384 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3385 }
e4a1d846
CML
3386
3387 /* Program swing margin */
e0fce78f 3388 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3389 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3390
1fb44505
VS
3391 val &= ~DPIO_SWING_MARGIN000_MASK;
3392 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3393
3394 /*
3395 * Supposedly this value shouldn't matter when unique transition
3396 * scale is disabled, but in fact it does matter. Let's just
3397 * always program the same value and hope it's OK.
3398 */
3399 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3400 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3401
f72df8db
VS
3402 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3403 }
e4a1d846 3404
67fa24b4
VS
3405 /*
3406 * The document said it needs to set bit 27 for ch0 and bit 26
3407 * for ch1. Might be a typo in the doc.
3408 * For now, for this unique transition scale selection, set bit
3409 * 27 for ch0 and ch1.
3410 */
e0fce78f 3411 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3412 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3413 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3414 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3415 else
3416 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3417 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3418 }
3419
3420 /* Start swing calculation */
1966e59e
VS
3421 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3422 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3423 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3424
e0fce78f
VS
3425 if (intel_crtc->config->lane_count > 2) {
3426 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3427 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3428 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3429 }
e4a1d846
CML
3430
3431 /* LRC Bypass */
3432 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3433 val |= DPIO_LRC_BYPASS;
3434 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3435
a580516d 3436 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3437
3438 return 0;
3439}
3440
a4fc5ed6 3441static void
0301b3ac
JN
3442intel_get_adjust_train(struct intel_dp *intel_dp,
3443 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3444{
3445 uint8_t v = 0;
3446 uint8_t p = 0;
3447 int lane;
1a2eb460
KP
3448 uint8_t voltage_max;
3449 uint8_t preemph_max;
a4fc5ed6 3450
901c2daf 3451 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3452 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3453 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3454
3455 if (this_v > v)
3456 v = this_v;
3457 if (this_p > p)
3458 p = this_p;
3459 }
3460
1a2eb460 3461 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3462 if (v >= voltage_max)
3463 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3464
1a2eb460
KP
3465 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3466 if (p >= preemph_max)
3467 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3468
3469 for (lane = 0; lane < 4; lane++)
33a34e4e 3470 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3471}
3472
3473static uint32_t
5829975c 3474gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3475{
3cf2efb1 3476 uint32_t signal_levels = 0;
a4fc5ed6 3477
3cf2efb1 3478 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3479 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3480 default:
3481 signal_levels |= DP_VOLTAGE_0_4;
3482 break;
bd60018a 3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3484 signal_levels |= DP_VOLTAGE_0_6;
3485 break;
bd60018a 3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3487 signal_levels |= DP_VOLTAGE_0_8;
3488 break;
bd60018a 3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3490 signal_levels |= DP_VOLTAGE_1_2;
3491 break;
3492 }
3cf2efb1 3493 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3494 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3495 default:
3496 signal_levels |= DP_PRE_EMPHASIS_0;
3497 break;
bd60018a 3498 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3499 signal_levels |= DP_PRE_EMPHASIS_3_5;
3500 break;
bd60018a 3501 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3502 signal_levels |= DP_PRE_EMPHASIS_6;
3503 break;
bd60018a 3504 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3505 signal_levels |= DP_PRE_EMPHASIS_9_5;
3506 break;
3507 }
3508 return signal_levels;
3509}
3510
e3421a18
ZW
3511/* Gen6's DP voltage swing and pre-emphasis control */
3512static uint32_t
5829975c 3513gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3514{
3c5a62b5
YL
3515 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3516 DP_TRAIN_PRE_EMPHASIS_MASK);
3517 switch (signal_levels) {
bd60018a
SJ
3518 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3519 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3520 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3521 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3522 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3523 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3524 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3525 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3526 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3527 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3528 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3529 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3530 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3531 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3532 default:
3c5a62b5
YL
3533 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3534 "0x%x\n", signal_levels);
3535 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3536 }
3537}
3538
1a2eb460
KP
3539/* Gen7's DP voltage swing and pre-emphasis control */
3540static uint32_t
5829975c 3541gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3542{
3543 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3544 DP_TRAIN_PRE_EMPHASIS_MASK);
3545 switch (signal_levels) {
bd60018a 3546 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3547 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3548 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3549 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3550 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3551 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3552
bd60018a 3553 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3554 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3555 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3556 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3557
bd60018a 3558 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3559 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3560 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3561 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3562
3563 default:
3564 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3565 "0x%x\n", signal_levels);
3566 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3567 }
3568}
3569
f0a3424e
PZ
3570/* Properly updates "DP" with the correct signal levels. */
3571static void
3572intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3573{
3574 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3575 enum port port = intel_dig_port->port;
f0a3424e 3576 struct drm_device *dev = intel_dig_port->base.base.dev;
f8896f5d 3577 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3578 uint8_t train_set = intel_dp->train_set[0];
3579
f8896f5d
DW
3580 if (HAS_DDI(dev)) {
3581 signal_levels = ddi_signal_levels(intel_dp);
3582
3583 if (IS_BROXTON(dev))
3584 signal_levels = 0;
3585 else
3586 mask = DDI_BUF_EMP_MASK;
e4a1d846 3587 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3588 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3589 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3590 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3591 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3592 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3593 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3594 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3595 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3596 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3597 } else {
5829975c 3598 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3599 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3600 }
3601
96fb9f9b
VK
3602 if (mask)
3603 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3604
3605 DRM_DEBUG_KMS("Using vswing level %d\n",
3606 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3607 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3608 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3609 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3610
3611 *DP = (*DP & ~mask) | signal_levels;
3612}
3613
a4fc5ed6 3614static bool
ea5b213a 3615intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3616 uint32_t *DP,
58e10eb9 3617 uint8_t dp_train_pat)
a4fc5ed6 3618{
174edf1f 3619 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3620 struct drm_i915_private *dev_priv =
3621 to_i915(intel_dig_port->base.base.dev);
2cdfe6c8
JN
3622 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3623 int ret, len;
a4fc5ed6 3624
7b13b58a 3625 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3626
70aff66c 3627 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3628 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3629
2cdfe6c8
JN
3630 buf[0] = dp_train_pat;
3631 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3632 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3633 /* don't write DP_TRAINING_LANEx_SET on disable */
3634 len = 1;
3635 } else {
3636 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
901c2daf
VS
3637 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3638 len = intel_dp->lane_count + 1;
47ea7542 3639 }
a4fc5ed6 3640
9d1a1031
JN
3641 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3642 buf, len);
2cdfe6c8
JN
3643
3644 return ret == len;
a4fc5ed6
KP
3645}
3646
70aff66c
JN
3647static bool
3648intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3649 uint8_t dp_train_pat)
3650{
4e96c977
MK
3651 if (!intel_dp->train_set_valid)
3652 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3653 intel_dp_set_signal_levels(intel_dp, DP);
3654 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3655}
3656
3657static bool
3658intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3659 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3660{
3661 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3662 struct drm_i915_private *dev_priv =
3663 to_i915(intel_dig_port->base.base.dev);
70aff66c
JN
3664 int ret;
3665
3666 intel_get_adjust_train(intel_dp, link_status);
3667 intel_dp_set_signal_levels(intel_dp, DP);
3668
3669 I915_WRITE(intel_dp->output_reg, *DP);
3670 POSTING_READ(intel_dp->output_reg);
3671
9d1a1031 3672 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
901c2daf 3673 intel_dp->train_set, intel_dp->lane_count);
70aff66c 3674
901c2daf 3675 return ret == intel_dp->lane_count;
70aff66c
JN
3676}
3677
3ab9c637
ID
3678static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3679{
3680 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3681 struct drm_device *dev = intel_dig_port->base.base.dev;
3682 struct drm_i915_private *dev_priv = dev->dev_private;
3683 enum port port = intel_dig_port->port;
3684 uint32_t val;
3685
3686 if (!HAS_DDI(dev))
3687 return;
3688
3689 val = I915_READ(DP_TP_CTL(port));
3690 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3691 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3692 I915_WRITE(DP_TP_CTL(port), val);
3693
3694 /*
3695 * On PORT_A we can have only eDP in SST mode. There the only reason
3696 * we need to set idle transmission mode is to work around a HW issue
3697 * where we enable the pipe while not in idle link-training mode.
3698 * In this case there is requirement to wait for a minimum number of
3699 * idle patterns to be sent.
3700 */
3701 if (port == PORT_A)
3702 return;
3703
3704 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3705 1))
3706 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3707}
3708
33a34e4e 3709/* Enable corresponding port and start training pattern 1 */
c19b0669 3710void
33a34e4e 3711intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3712{
da63a9f2 3713 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3714 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3715 int i;
3716 uint8_t voltage;
cdb0e95b 3717 int voltage_tries, loop_tries;
ea5b213a 3718 uint32_t DP = intel_dp->DP;
6aba5b6c 3719 uint8_t link_config[2];
04a60f9f 3720 uint8_t link_bw, rate_select;
a4fc5ed6 3721
affa9354 3722 if (HAS_DDI(dev))
c19b0669
PZ
3723 intel_ddi_prepare_link_retrain(encoder);
3724
901c2daf 3725 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
04a60f9f
VS
3726 &link_bw, &rate_select);
3727
3cf2efb1 3728 /* Write the link configuration data */
04a60f9f 3729 link_config[0] = link_bw;
901c2daf 3730 link_config[1] = intel_dp->lane_count;
6aba5b6c
JN
3731 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3732 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3733 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3734 if (intel_dp->num_sink_rates)
a8f3ef61 3735 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
04a60f9f 3736 &rate_select, 1);
6aba5b6c
JN
3737
3738 link_config[0] = 0;
3739 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3740 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3741
3742 DP |= DP_PORT_EN;
1a2eb460 3743
70aff66c
JN
3744 /* clock recovery */
3745 if (!intel_dp_reset_link_train(intel_dp, &DP,
3746 DP_TRAINING_PATTERN_1 |
3747 DP_LINK_SCRAMBLING_DISABLE)) {
3748 DRM_ERROR("failed to enable link training\n");
3749 return;
3750 }
3751
a4fc5ed6 3752 voltage = 0xff;
cdb0e95b
KP
3753 voltage_tries = 0;
3754 loop_tries = 0;
a4fc5ed6 3755 for (;;) {
70aff66c 3756 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3757
a7c9655f 3758 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3759 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3760 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3761 break;
93f62dad 3762 }
a4fc5ed6 3763
901c2daf 3764 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3765 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3766 break;
3767 }
3768
4e96c977
MK
3769 /*
3770 * if we used previously trained voltage and pre-emphasis values
3771 * and we don't get clock recovery, reset link training values
3772 */
3773 if (intel_dp->train_set_valid) {
3774 DRM_DEBUG_KMS("clock recovery not ok, reset");
3775 /* clear the flag as we are not reusing train set */
3776 intel_dp->train_set_valid = false;
3777 if (!intel_dp_reset_link_train(intel_dp, &DP,
3778 DP_TRAINING_PATTERN_1 |
3779 DP_LINK_SCRAMBLING_DISABLE)) {
3780 DRM_ERROR("failed to enable link training\n");
3781 return;
3782 }
3783 continue;
3784 }
3785
3cf2efb1 3786 /* Check to see if we've tried the max voltage */
901c2daf 3787 for (i = 0; i < intel_dp->lane_count; i++)
3cf2efb1 3788 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3789 break;
901c2daf 3790 if (i == intel_dp->lane_count) {
b06fbda3
DV
3791 ++loop_tries;
3792 if (loop_tries == 5) {
3def84b3 3793 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3794 break;
3795 }
70aff66c
JN
3796 intel_dp_reset_link_train(intel_dp, &DP,
3797 DP_TRAINING_PATTERN_1 |
3798 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3799 voltage_tries = 0;
3800 continue;
3801 }
a4fc5ed6 3802
3cf2efb1 3803 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3804 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3805 ++voltage_tries;
b06fbda3 3806 if (voltage_tries == 5) {
3def84b3 3807 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3808 break;
3809 }
3810 } else
3811 voltage_tries = 0;
3812 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3813
70aff66c
JN
3814 /* Update training set as requested by target */
3815 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3816 DRM_ERROR("failed to update link training\n");
3817 break;
3818 }
a4fc5ed6
KP
3819 }
3820
33a34e4e
JB
3821 intel_dp->DP = DP;
3822}
3823
c19b0669 3824void
33a34e4e
JB
3825intel_dp_complete_link_train(struct intel_dp *intel_dp)
3826{
33a34e4e 3827 bool channel_eq = false;
37f80975 3828 int tries, cr_tries;
33a34e4e 3829 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3830 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3831
a79b8165 3832 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
901c2daf 3833 if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
06ea66b6 3834 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3835
a4fc5ed6 3836 /* channel equalization */
70aff66c 3837 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3838 training_pattern |
70aff66c
JN
3839 DP_LINK_SCRAMBLING_DISABLE)) {
3840 DRM_ERROR("failed to start channel equalization\n");
3841 return;
3842 }
3843
a4fc5ed6 3844 tries = 0;
37f80975 3845 cr_tries = 0;
a4fc5ed6
KP
3846 channel_eq = false;
3847 for (;;) {
70aff66c 3848 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3849
37f80975
JB
3850 if (cr_tries > 5) {
3851 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3852 break;
3853 }
3854
a7c9655f 3855 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3856 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3857 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3858 break;
70aff66c 3859 }
a4fc5ed6 3860
37f80975 3861 /* Make sure clock is still ok */
90a6b7b0 3862 if (!drm_dp_clock_recovery_ok(link_status,
901c2daf 3863 intel_dp->lane_count)) {
4e96c977 3864 intel_dp->train_set_valid = false;
37f80975 3865 intel_dp_start_link_train(intel_dp);
70aff66c 3866 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3867 training_pattern |
70aff66c 3868 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3869 cr_tries++;
3870 continue;
3871 }
3872
90a6b7b0 3873 if (drm_dp_channel_eq_ok(link_status,
901c2daf 3874 intel_dp->lane_count)) {
3cf2efb1
CW
3875 channel_eq = true;
3876 break;
3877 }
a4fc5ed6 3878
37f80975
JB
3879 /* Try 5 times, then try clock recovery if that fails */
3880 if (tries > 5) {
4e96c977 3881 intel_dp->train_set_valid = false;
37f80975 3882 intel_dp_start_link_train(intel_dp);
70aff66c 3883 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3884 training_pattern |
70aff66c 3885 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3886 tries = 0;
3887 cr_tries++;
3888 continue;
3889 }
a4fc5ed6 3890
70aff66c
JN
3891 /* Update training set as requested by target */
3892 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3893 DRM_ERROR("failed to update link training\n");
3894 break;
3895 }
3cf2efb1 3896 ++tries;
869184a6 3897 }
3cf2efb1 3898
3ab9c637
ID
3899 intel_dp_set_idle_link_train(intel_dp);
3900
3901 intel_dp->DP = DP;
3902
4e96c977 3903 if (channel_eq) {
5fa836a9 3904 intel_dp->train_set_valid = true;
07f42258 3905 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3906 }
3ab9c637
ID
3907}
3908
3909void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3910{
70aff66c 3911 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3912 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3913}
3914
3915static void
ea5b213a 3916intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3917{
da63a9f2 3918 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3919 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3920 enum port port = intel_dig_port->port;
da63a9f2 3921 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3922 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3923 uint32_t DP = intel_dp->DP;
a4fc5ed6 3924
bc76e320 3925 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3926 return;
3927
0c33d8d7 3928 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3929 return;
3930
28c97730 3931 DRM_DEBUG_KMS("\n");
32f9d658 3932
39e5fa88
VS
3933 if ((IS_GEN7(dev) && port == PORT_A) ||
3934 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3935 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3936 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3937 } else {
aad3d14d
VS
3938 if (IS_CHERRYVIEW(dev))
3939 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3940 else
3941 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3942 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3943 }
1612c8bd 3944 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3945 POSTING_READ(intel_dp->output_reg);
5eb08b69 3946
1612c8bd
VS
3947 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3948 I915_WRITE(intel_dp->output_reg, DP);
3949 POSTING_READ(intel_dp->output_reg);
3950
3951 /*
3952 * HW workaround for IBX, we need to move the port
3953 * to transcoder A after disabling it to allow the
3954 * matching HDMI port to be enabled on transcoder A.
3955 */
3956 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3957 /* always enable with pattern 1 (as per spec) */
3958 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3959 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3960 I915_WRITE(intel_dp->output_reg, DP);
3961 POSTING_READ(intel_dp->output_reg);
3962
3963 DP &= ~DP_PORT_EN;
5bddd17f 3964 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3965 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3966 }
3967
f01eca2e 3968 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3969}
3970
26d61aad
KP
3971static bool
3972intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3973{
a031d709
RV
3974 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3975 struct drm_device *dev = dig_port->base.base.dev;
3976 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3977 uint8_t rev;
a031d709 3978
9d1a1031
JN
3979 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3980 sizeof(intel_dp->dpcd)) < 0)
edb39244 3981 return false; /* aux transfer failed */
92fd8fd1 3982
a8e98153 3983 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3984
edb39244
AJ
3985 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3986 return false; /* DPCD not present */
3987
2293bb5c
SK
3988 /* Check if the panel supports PSR */
3989 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3990 if (is_edp(intel_dp)) {
9d1a1031
JN
3991 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3992 intel_dp->psr_dpcd,
3993 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3994 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3995 dev_priv->psr.sink_support = true;
50003939 3996 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3997 }
474d1ec4
SJ
3998
3999 if (INTEL_INFO(dev)->gen >= 9 &&
4000 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4001 uint8_t frame_sync_cap;
4002
4003 dev_priv->psr.sink_support = true;
4004 intel_dp_dpcd_read_wake(&intel_dp->aux,
4005 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4006 &frame_sync_cap, 1);
4007 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4008 /* PSR2 needs frame sync as well */
4009 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4010 DRM_DEBUG_KMS("PSR2 %s on sink",
4011 dev_priv->psr.psr2_support ? "supported" : "not supported");
4012 }
50003939
JN
4013 }
4014
7809a611 4015 /* Training Pattern 3 support, both source and sink */
06ea66b6 4016 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
4017 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
4018 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 4019 intel_dp->use_tps3 = true;
f8d8a672 4020 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
4021 } else
4022 intel_dp->use_tps3 = false;
4023
fc0f8e25
SJ
4024 /* Intermediate frequency support */
4025 if (is_edp(intel_dp) &&
4026 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4027 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4028 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 4029 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
4030 int i;
4031
fc0f8e25
SJ
4032 intel_dp_dpcd_read_wake(&intel_dp->aux,
4033 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
4034 sink_rates,
4035 sizeof(sink_rates));
ea2d8a42 4036
94ca719e
VS
4037 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4038 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
4039
4040 if (val == 0)
4041 break;
4042
af77b974
SJ
4043 /* Value read is in kHz while drm clock is saved in deca-kHz */
4044 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 4045 }
94ca719e 4046 intel_dp->num_sink_rates = i;
fc0f8e25 4047 }
0336400e
VS
4048
4049 intel_dp_print_rates(intel_dp);
4050
edb39244
AJ
4051 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4052 DP_DWN_STRM_PORT_PRESENT))
4053 return true; /* native DP sink */
4054
4055 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4056 return true; /* no per-port downstream info */
4057
9d1a1031
JN
4058 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4059 intel_dp->downstream_ports,
4060 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
4061 return false; /* downstream port status fetch failed */
4062
4063 return true;
92fd8fd1
KP
4064}
4065
0d198328
AJ
4066static void
4067intel_dp_probe_oui(struct intel_dp *intel_dp)
4068{
4069 u8 buf[3];
4070
4071 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4072 return;
4073
9d1a1031 4074 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
4075 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4076 buf[0], buf[1], buf[2]);
4077
9d1a1031 4078 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
4079 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4080 buf[0], buf[1], buf[2]);
4081}
4082
0e32b39c
DA
4083static bool
4084intel_dp_probe_mst(struct intel_dp *intel_dp)
4085{
4086 u8 buf[1];
4087
4088 if (!intel_dp->can_mst)
4089 return false;
4090
4091 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4092 return false;
4093
0e32b39c
DA
4094 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4095 if (buf[0] & DP_MST_CAP) {
4096 DRM_DEBUG_KMS("Sink is MST capable\n");
4097 intel_dp->is_mst = true;
4098 } else {
4099 DRM_DEBUG_KMS("Sink is not MST capable\n");
4100 intel_dp->is_mst = false;
4101 }
4102 }
0e32b39c
DA
4103
4104 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4105 return intel_dp->is_mst;
4106}
4107
e5a1cab5 4108static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 4109{
082dcc7c
RV
4110 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4111 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 4112 u8 buf;
e5a1cab5 4113 int ret = 0;
d2e216d0 4114
082dcc7c
RV
4115 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4116 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4117 ret = -EIO;
4118 goto out;
4373f0f2
PZ
4119 }
4120
082dcc7c 4121 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4122 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4123 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4124 ret = -EIO;
4125 goto out;
4126 }
d2e216d0 4127
621d4c76 4128 intel_dp->sink_crc.started = false;
e5a1cab5 4129 out:
082dcc7c 4130 hsw_enable_ips(intel_crtc);
e5a1cab5 4131 return ret;
082dcc7c
RV
4132}
4133
4134static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4135{
4136 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4137 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4138 u8 buf;
e5a1cab5
RV
4139 int ret;
4140
621d4c76 4141 if (intel_dp->sink_crc.started) {
e5a1cab5
RV
4142 ret = intel_dp_sink_crc_stop(intel_dp);
4143 if (ret)
4144 return ret;
4145 }
082dcc7c
RV
4146
4147 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4148 return -EIO;
4149
4150 if (!(buf & DP_TEST_CRC_SUPPORTED))
4151 return -ENOTTY;
4152
621d4c76
RV
4153 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4154
082dcc7c
RV
4155 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4156 return -EIO;
4157
4158 hsw_disable_ips(intel_crtc);
1dda5f93 4159
9d1a1031 4160 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4161 buf | DP_TEST_SINK_START) < 0) {
4162 hsw_enable_ips(intel_crtc);
4163 return -EIO;
4373f0f2
PZ
4164 }
4165
621d4c76 4166 intel_dp->sink_crc.started = true;
082dcc7c
RV
4167 return 0;
4168}
4169
4170int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4171{
4172 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4173 struct drm_device *dev = dig_port->base.base.dev;
4174 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4175 u8 buf;
621d4c76 4176 int count, ret;
082dcc7c 4177 int attempts = 6;
aabc95dc 4178 bool old_equal_new;
082dcc7c
RV
4179
4180 ret = intel_dp_sink_crc_start(intel_dp);
4181 if (ret)
4182 return ret;
4183
ad9dc91b 4184 do {
621d4c76
RV
4185 intel_wait_for_vblank(dev, intel_crtc->pipe);
4186
1dda5f93 4187 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4188 DP_TEST_SINK_MISC, &buf) < 0) {
4189 ret = -EIO;
afe0d67e 4190 goto stop;
4373f0f2 4191 }
621d4c76 4192 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4193
621d4c76
RV
4194 /*
4195 * Count might be reset during the loop. In this case
4196 * last known count needs to be reset as well.
4197 */
4198 if (count == 0)
4199 intel_dp->sink_crc.last_count = 0;
4200
4201 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4202 ret = -EIO;
4203 goto stop;
4204 }
aabc95dc
RV
4205
4206 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4207 !memcmp(intel_dp->sink_crc.last_crc, crc,
4208 6 * sizeof(u8)));
4209
4210 } while (--attempts && (count == 0 || old_equal_new));
621d4c76
RV
4211
4212 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4213 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
ad9dc91b
RV
4214
4215 if (attempts == 0) {
aabc95dc
RV
4216 if (old_equal_new) {
4217 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4218 } else {
4219 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4220 ret = -ETIMEDOUT;
4221 goto stop;
4222 }
ad9dc91b 4223 }
d2e216d0 4224
afe0d67e 4225stop:
082dcc7c 4226 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4227 return ret;
d2e216d0
RV
4228}
4229
a60f0e38
JB
4230static bool
4231intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4232{
9d1a1031
JN
4233 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4234 DP_DEVICE_SERVICE_IRQ_VECTOR,
4235 sink_irq_vector, 1) == 1;
a60f0e38
JB
4236}
4237
0e32b39c
DA
4238static bool
4239intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4240{
4241 int ret;
4242
4243 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4244 DP_SINK_COUNT_ESI,
4245 sink_irq_vector, 14);
4246 if (ret != 14)
4247 return false;
4248
4249 return true;
4250}
4251
c5d5ab7a
TP
4252static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4253{
4254 uint8_t test_result = DP_TEST_ACK;
4255 return test_result;
4256}
4257
4258static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4259{
4260 uint8_t test_result = DP_TEST_NAK;
4261 return test_result;
4262}
4263
4264static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4265{
c5d5ab7a 4266 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4267 struct intel_connector *intel_connector = intel_dp->attached_connector;
4268 struct drm_connector *connector = &intel_connector->base;
4269
4270 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4271 connector->edid_corrupt ||
559be30c
TP
4272 intel_dp->aux.i2c_defer_count > 6) {
4273 /* Check EDID read for NACKs, DEFERs and corruption
4274 * (DP CTS 1.2 Core r1.1)
4275 * 4.2.2.4 : Failed EDID read, I2C_NAK
4276 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4277 * 4.2.2.6 : EDID corruption detected
4278 * Use failsafe mode for all cases
4279 */
4280 if (intel_dp->aux.i2c_nack_count > 0 ||
4281 intel_dp->aux.i2c_defer_count > 0)
4282 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4283 intel_dp->aux.i2c_nack_count,
4284 intel_dp->aux.i2c_defer_count);
4285 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4286 } else {
f79b468e
TS
4287 struct edid *block = intel_connector->detect_edid;
4288
4289 /* We have to write the checksum
4290 * of the last block read
4291 */
4292 block += intel_connector->detect_edid->extensions;
4293
559be30c
TP
4294 if (!drm_dp_dpcd_write(&intel_dp->aux,
4295 DP_TEST_EDID_CHECKSUM,
f79b468e 4296 &block->checksum,
5a1cc655 4297 1))
559be30c
TP
4298 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4299
4300 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4301 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4302 }
4303
4304 /* Set test active flag here so userspace doesn't interrupt things */
4305 intel_dp->compliance_test_active = 1;
4306
c5d5ab7a
TP
4307 return test_result;
4308}
4309
4310static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4311{
c5d5ab7a
TP
4312 uint8_t test_result = DP_TEST_NAK;
4313 return test_result;
4314}
4315
4316static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4317{
4318 uint8_t response = DP_TEST_NAK;
4319 uint8_t rxdata = 0;
4320 int status = 0;
4321
559be30c 4322 intel_dp->compliance_test_active = 0;
c5d5ab7a 4323 intel_dp->compliance_test_type = 0;
559be30c
TP
4324 intel_dp->compliance_test_data = 0;
4325
c5d5ab7a
TP
4326 intel_dp->aux.i2c_nack_count = 0;
4327 intel_dp->aux.i2c_defer_count = 0;
4328
4329 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4330 if (status <= 0) {
4331 DRM_DEBUG_KMS("Could not read test request from sink\n");
4332 goto update_status;
4333 }
4334
4335 switch (rxdata) {
4336 case DP_TEST_LINK_TRAINING:
4337 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4338 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4339 response = intel_dp_autotest_link_training(intel_dp);
4340 break;
4341 case DP_TEST_LINK_VIDEO_PATTERN:
4342 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4343 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4344 response = intel_dp_autotest_video_pattern(intel_dp);
4345 break;
4346 case DP_TEST_LINK_EDID_READ:
4347 DRM_DEBUG_KMS("EDID test requested\n");
4348 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4349 response = intel_dp_autotest_edid(intel_dp);
4350 break;
4351 case DP_TEST_LINK_PHY_TEST_PATTERN:
4352 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4353 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4354 response = intel_dp_autotest_phy_pattern(intel_dp);
4355 break;
4356 default:
4357 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4358 break;
4359 }
4360
4361update_status:
4362 status = drm_dp_dpcd_write(&intel_dp->aux,
4363 DP_TEST_RESPONSE,
4364 &response, 1);
4365 if (status <= 0)
4366 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4367}
4368
0e32b39c
DA
4369static int
4370intel_dp_check_mst_status(struct intel_dp *intel_dp)
4371{
4372 bool bret;
4373
4374 if (intel_dp->is_mst) {
4375 u8 esi[16] = { 0 };
4376 int ret = 0;
4377 int retry;
4378 bool handled;
4379 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4380go_again:
4381 if (bret == true) {
4382
4383 /* check link status - esi[10] = 0x200c */
90a6b7b0 4384 if (intel_dp->active_mst_links &&
901c2daf 4385 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4386 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4387 intel_dp_start_link_train(intel_dp);
4388 intel_dp_complete_link_train(intel_dp);
4389 intel_dp_stop_link_train(intel_dp);
4390 }
4391
6f34cc39 4392 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4393 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4394
4395 if (handled) {
4396 for (retry = 0; retry < 3; retry++) {
4397 int wret;
4398 wret = drm_dp_dpcd_write(&intel_dp->aux,
4399 DP_SINK_COUNT_ESI+1,
4400 &esi[1], 3);
4401 if (wret == 3) {
4402 break;
4403 }
4404 }
4405
4406 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4407 if (bret == true) {
6f34cc39 4408 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4409 goto go_again;
4410 }
4411 } else
4412 ret = 0;
4413
4414 return ret;
4415 } else {
4416 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4417 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4418 intel_dp->is_mst = false;
4419 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4420 /* send a hotplug event */
4421 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4422 }
4423 }
4424 return -EINVAL;
4425}
4426
a4fc5ed6
KP
4427/*
4428 * According to DP spec
4429 * 5.1.2:
4430 * 1. Read DPCD
4431 * 2. Configure link according to Receiver Capabilities
4432 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4433 * 4. Check link status on receipt of hot-plug interrupt
4434 */
a5146200 4435static void
ea5b213a 4436intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4437{
5b215bcf 4438 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4439 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4440 u8 sink_irq_vector;
93f62dad 4441 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4442
5b215bcf
DA
4443 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4444
e02f9a06 4445 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4446 return;
4447
1a125d8a
ID
4448 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4449 return;
4450
92fd8fd1 4451 /* Try to read receiver status if the link appears to be up */
93f62dad 4452 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4453 return;
4454 }
4455
92fd8fd1 4456 /* Now read the DPCD to see if it's actually running */
26d61aad 4457 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4458 return;
4459 }
4460
a60f0e38
JB
4461 /* Try to read the source of the interrupt */
4462 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4463 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4464 /* Clear interrupt source */
9d1a1031
JN
4465 drm_dp_dpcd_writeb(&intel_dp->aux,
4466 DP_DEVICE_SERVICE_IRQ_VECTOR,
4467 sink_irq_vector);
a60f0e38
JB
4468
4469 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4470 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4471 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4472 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4473 }
4474
901c2daf 4475 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4476 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4477 intel_encoder->base.name);
33a34e4e
JB
4478 intel_dp_start_link_train(intel_dp);
4479 intel_dp_complete_link_train(intel_dp);
3ab9c637 4480 intel_dp_stop_link_train(intel_dp);
33a34e4e 4481 }
a4fc5ed6 4482}
a4fc5ed6 4483
caf9ab24 4484/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4485static enum drm_connector_status
26d61aad 4486intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4487{
caf9ab24 4488 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4489 uint8_t type;
4490
4491 if (!intel_dp_get_dpcd(intel_dp))
4492 return connector_status_disconnected;
4493
4494 /* if there's no downstream port, we're done */
4495 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4496 return connector_status_connected;
caf9ab24
AJ
4497
4498 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4499 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4500 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4501 uint8_t reg;
9d1a1031
JN
4502
4503 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4504 &reg, 1) < 0)
caf9ab24 4505 return connector_status_unknown;
9d1a1031 4506
23235177
AJ
4507 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4508 : connector_status_disconnected;
caf9ab24
AJ
4509 }
4510
4511 /* If no HPD, poke DDC gently */
0b99836f 4512 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4513 return connector_status_connected;
caf9ab24
AJ
4514
4515 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4516 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4517 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4518 if (type == DP_DS_PORT_TYPE_VGA ||
4519 type == DP_DS_PORT_TYPE_NON_EDID)
4520 return connector_status_unknown;
4521 } else {
4522 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4523 DP_DWN_STRM_PORT_TYPE_MASK;
4524 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4525 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4526 return connector_status_unknown;
4527 }
caf9ab24
AJ
4528
4529 /* Anything else is out of spec, warn and ignore */
4530 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4531 return connector_status_disconnected;
71ba9000
AJ
4532}
4533
d410b56d
CW
4534static enum drm_connector_status
4535edp_detect(struct intel_dp *intel_dp)
4536{
4537 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4538 enum drm_connector_status status;
4539
4540 status = intel_panel_detect(dev);
4541 if (status == connector_status_unknown)
4542 status = connector_status_connected;
4543
4544 return status;
4545}
4546
b93433cc
JN
4547static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4548 struct intel_digital_port *port)
5eb08b69 4549{
b93433cc 4550 u32 bit;
01cb9ea6 4551
0df53b77
JN
4552 switch (port->port) {
4553 case PORT_A:
4554 return true;
4555 case PORT_B:
4556 bit = SDE_PORTB_HOTPLUG;
4557 break;
4558 case PORT_C:
4559 bit = SDE_PORTC_HOTPLUG;
4560 break;
4561 case PORT_D:
4562 bit = SDE_PORTD_HOTPLUG;
4563 break;
4564 default:
4565 MISSING_CASE(port->port);
4566 return false;
4567 }
4568
4569 return I915_READ(SDEISR) & bit;
4570}
4571
4572static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4573 struct intel_digital_port *port)
4574{
4575 u32 bit;
4576
4577 switch (port->port) {
4578 case PORT_A:
4579 return true;
4580 case PORT_B:
4581 bit = SDE_PORTB_HOTPLUG_CPT;
4582 break;
4583 case PORT_C:
4584 bit = SDE_PORTC_HOTPLUG_CPT;
4585 break;
4586 case PORT_D:
4587 bit = SDE_PORTD_HOTPLUG_CPT;
4588 break;
4589 default:
4590 MISSING_CASE(port->port);
4591 return false;
b93433cc 4592 }
1b469639 4593
b93433cc 4594 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4595}
4596
7e66bcf2 4597static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4598 struct intel_digital_port *port)
a4fc5ed6 4599{
9642c81c 4600 u32 bit;
5eb08b69 4601
9642c81c
JN
4602 switch (port->port) {
4603 case PORT_B:
4604 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4605 break;
4606 case PORT_C:
4607 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4608 break;
4609 case PORT_D:
4610 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4611 break;
4612 default:
4613 MISSING_CASE(port->port);
4614 return false;
4615 }
4616
4617 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4618}
4619
4620static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4621 struct intel_digital_port *port)
4622{
4623 u32 bit;
4624
4625 switch (port->port) {
4626 case PORT_B:
4627 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4628 break;
4629 case PORT_C:
4630 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4631 break;
4632 case PORT_D:
4633 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4634 break;
4635 default:
4636 MISSING_CASE(port->port);
4637 return false;
a4fc5ed6
KP
4638 }
4639
1d245987 4640 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4641}
4642
e464bfde
JN
4643static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4644 struct intel_digital_port *port)
4645{
4646 u32 bit;
4647
4648 switch (port->port) {
4649 case PORT_A:
4650 bit = BXT_DE_PORT_HP_DDIA;
4651 break;
4652 case PORT_B:
4653 bit = BXT_DE_PORT_HP_DDIB;
4654 break;
4655 case PORT_C:
4656 bit = BXT_DE_PORT_HP_DDIC;
4657 break;
4658 default:
4659 MISSING_CASE(port->port);
4660 return false;
4661 }
4662
4663 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4664}
4665
7e66bcf2
JN
4666/*
4667 * intel_digital_port_connected - is the specified port connected?
4668 * @dev_priv: i915 private structure
4669 * @port: the port to test
4670 *
4671 * Return %true if @port is connected, %false otherwise.
4672 */
4673static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4674 struct intel_digital_port *port)
4675{
0df53b77 4676 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4677 return ibx_digital_port_connected(dev_priv, port);
0df53b77
JN
4678 if (HAS_PCH_SPLIT(dev_priv))
4679 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4680 else if (IS_BROXTON(dev_priv))
4681 return bxt_digital_port_connected(dev_priv, port);
9642c81c
JN
4682 else if (IS_VALLEYVIEW(dev_priv))
4683 return vlv_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4684 else
4685 return g4x_digital_port_connected(dev_priv, port);
4686}
4687
b93433cc
JN
4688static enum drm_connector_status
4689ironlake_dp_detect(struct intel_dp *intel_dp)
4690{
4691 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4692 struct drm_i915_private *dev_priv = dev->dev_private;
4693 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4694
7e66bcf2 4695 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
b93433cc
JN
4696 return connector_status_disconnected;
4697
4698 return intel_dp_detect_dpcd(intel_dp);
4699}
4700
2a592bec
DA
4701static enum drm_connector_status
4702g4x_dp_detect(struct intel_dp *intel_dp)
4703{
4704 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2a592bec
DA
4706
4707 /* Can't disconnect eDP, but you can close the lid... */
4708 if (is_edp(intel_dp)) {
4709 enum drm_connector_status status;
4710
4711 status = intel_panel_detect(dev);
4712 if (status == connector_status_unknown)
4713 status = connector_status_connected;
4714 return status;
4715 }
4716
7e66bcf2 4717 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
a4fc5ed6
KP
4718 return connector_status_disconnected;
4719
26d61aad 4720 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4721}
4722
8c241fef 4723static struct edid *
beb60608 4724intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4725{
beb60608 4726 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4727
9cd300e0
JN
4728 /* use cached edid if we have one */
4729 if (intel_connector->edid) {
9cd300e0
JN
4730 /* invalid edid */
4731 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4732 return NULL;
4733
55e9edeb 4734 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4735 } else
4736 return drm_get_edid(&intel_connector->base,
4737 &intel_dp->aux.ddc);
4738}
8c241fef 4739
beb60608
CW
4740static void
4741intel_dp_set_edid(struct intel_dp *intel_dp)
4742{
4743 struct intel_connector *intel_connector = intel_dp->attached_connector;
4744 struct edid *edid;
8c241fef 4745
beb60608
CW
4746 edid = intel_dp_get_edid(intel_dp);
4747 intel_connector->detect_edid = edid;
4748
4749 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4750 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4751 else
4752 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4753}
4754
beb60608
CW
4755static void
4756intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4757{
beb60608 4758 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4759
beb60608
CW
4760 kfree(intel_connector->detect_edid);
4761 intel_connector->detect_edid = NULL;
9cd300e0 4762
beb60608
CW
4763 intel_dp->has_audio = false;
4764}
d6f24d0f 4765
beb60608
CW
4766static enum intel_display_power_domain
4767intel_dp_power_get(struct intel_dp *dp)
4768{
4769 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4770 enum intel_display_power_domain power_domain;
4771
4772 power_domain = intel_display_port_power_domain(encoder);
4773 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4774
4775 return power_domain;
4776}
d6f24d0f 4777
beb60608
CW
4778static void
4779intel_dp_power_put(struct intel_dp *dp,
4780 enum intel_display_power_domain power_domain)
4781{
4782 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4783 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4784}
4785
a9756bb5
ZW
4786static enum drm_connector_status
4787intel_dp_detect(struct drm_connector *connector, bool force)
4788{
4789 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4791 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4792 struct drm_device *dev = connector->dev;
a9756bb5 4793 enum drm_connector_status status;
671dedd2 4794 enum intel_display_power_domain power_domain;
0e32b39c 4795 bool ret;
09b1eb13 4796 u8 sink_irq_vector;
a9756bb5 4797
164c8598 4798 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4799 connector->base.id, connector->name);
beb60608 4800 intel_dp_unset_edid(intel_dp);
164c8598 4801
0e32b39c
DA
4802 if (intel_dp->is_mst) {
4803 /* MST devices are disconnected from a monitor POV */
4804 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4805 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4806 return connector_status_disconnected;
0e32b39c
DA
4807 }
4808
beb60608 4809 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4810
d410b56d
CW
4811 /* Can't disconnect eDP, but you can close the lid... */
4812 if (is_edp(intel_dp))
4813 status = edp_detect(intel_dp);
4814 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4815 status = ironlake_dp_detect(intel_dp);
4816 else
4817 status = g4x_dp_detect(intel_dp);
4818 if (status != connector_status_connected)
c8c8fb33 4819 goto out;
a9756bb5 4820
0d198328
AJ
4821 intel_dp_probe_oui(intel_dp);
4822
0e32b39c
DA
4823 ret = intel_dp_probe_mst(intel_dp);
4824 if (ret) {
4825 /* if we are in MST mode then this connector
4826 won't appear connected or have anything with EDID on it */
4827 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4828 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4829 status = connector_status_disconnected;
4830 goto out;
4831 }
4832
beb60608 4833 intel_dp_set_edid(intel_dp);
a9756bb5 4834
d63885da
PZ
4835 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4836 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4837 status = connector_status_connected;
4838
09b1eb13
TP
4839 /* Try to read the source of the interrupt */
4840 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4841 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4842 /* Clear interrupt source */
4843 drm_dp_dpcd_writeb(&intel_dp->aux,
4844 DP_DEVICE_SERVICE_IRQ_VECTOR,
4845 sink_irq_vector);
4846
4847 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4848 intel_dp_handle_test_request(intel_dp);
4849 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4850 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4851 }
4852
c8c8fb33 4853out:
beb60608 4854 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4855 return status;
a4fc5ed6
KP
4856}
4857
beb60608
CW
4858static void
4859intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4860{
df0e9248 4861 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4862 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4863 enum intel_display_power_domain power_domain;
a4fc5ed6 4864
beb60608
CW
4865 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4866 connector->base.id, connector->name);
4867 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4868
beb60608
CW
4869 if (connector->status != connector_status_connected)
4870 return;
671dedd2 4871
beb60608
CW
4872 power_domain = intel_dp_power_get(intel_dp);
4873
4874 intel_dp_set_edid(intel_dp);
4875
4876 intel_dp_power_put(intel_dp, power_domain);
4877
4878 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4879 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4880}
4881
4882static int intel_dp_get_modes(struct drm_connector *connector)
4883{
4884 struct intel_connector *intel_connector = to_intel_connector(connector);
4885 struct edid *edid;
4886
4887 edid = intel_connector->detect_edid;
4888 if (edid) {
4889 int ret = intel_connector_update_modes(connector, edid);
4890 if (ret)
4891 return ret;
4892 }
32f9d658 4893
f8779fda 4894 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4895 if (is_edp(intel_attached_dp(connector)) &&
4896 intel_connector->panel.fixed_mode) {
f8779fda 4897 struct drm_display_mode *mode;
beb60608
CW
4898
4899 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4900 intel_connector->panel.fixed_mode);
f8779fda 4901 if (mode) {
32f9d658
ZW
4902 drm_mode_probed_add(connector, mode);
4903 return 1;
4904 }
4905 }
beb60608 4906
32f9d658 4907 return 0;
a4fc5ed6
KP
4908}
4909
1aad7ac0
CW
4910static bool
4911intel_dp_detect_audio(struct drm_connector *connector)
4912{
1aad7ac0 4913 bool has_audio = false;
beb60608 4914 struct edid *edid;
1aad7ac0 4915
beb60608
CW
4916 edid = to_intel_connector(connector)->detect_edid;
4917 if (edid)
1aad7ac0 4918 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4919
1aad7ac0
CW
4920 return has_audio;
4921}
4922
f684960e
CW
4923static int
4924intel_dp_set_property(struct drm_connector *connector,
4925 struct drm_property *property,
4926 uint64_t val)
4927{
e953fd7b 4928 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4929 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4930 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4931 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4932 int ret;
4933
662595df 4934 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4935 if (ret)
4936 return ret;
4937
3f43c48d 4938 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4939 int i = val;
4940 bool has_audio;
4941
4942 if (i == intel_dp->force_audio)
f684960e
CW
4943 return 0;
4944
1aad7ac0 4945 intel_dp->force_audio = i;
f684960e 4946
c3e5f67b 4947 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4948 has_audio = intel_dp_detect_audio(connector);
4949 else
c3e5f67b 4950 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4951
4952 if (has_audio == intel_dp->has_audio)
f684960e
CW
4953 return 0;
4954
1aad7ac0 4955 intel_dp->has_audio = has_audio;
f684960e
CW
4956 goto done;
4957 }
4958
e953fd7b 4959 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4960 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4961 bool old_range = intel_dp->limited_color_range;
ae4edb80 4962
55bc60db
VS
4963 switch (val) {
4964 case INTEL_BROADCAST_RGB_AUTO:
4965 intel_dp->color_range_auto = true;
4966 break;
4967 case INTEL_BROADCAST_RGB_FULL:
4968 intel_dp->color_range_auto = false;
0f2a2a75 4969 intel_dp->limited_color_range = false;
55bc60db
VS
4970 break;
4971 case INTEL_BROADCAST_RGB_LIMITED:
4972 intel_dp->color_range_auto = false;
0f2a2a75 4973 intel_dp->limited_color_range = true;
55bc60db
VS
4974 break;
4975 default:
4976 return -EINVAL;
4977 }
ae4edb80
DV
4978
4979 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4980 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4981 return 0;
4982
e953fd7b
CW
4983 goto done;
4984 }
4985
53b41837
YN
4986 if (is_edp(intel_dp) &&
4987 property == connector->dev->mode_config.scaling_mode_property) {
4988 if (val == DRM_MODE_SCALE_NONE) {
4989 DRM_DEBUG_KMS("no scaling not supported\n");
4990 return -EINVAL;
4991 }
4992
4993 if (intel_connector->panel.fitting_mode == val) {
4994 /* the eDP scaling property is not changed */
4995 return 0;
4996 }
4997 intel_connector->panel.fitting_mode = val;
4998
4999 goto done;
5000 }
5001
f684960e
CW
5002 return -EINVAL;
5003
5004done:
c0c36b94
CW
5005 if (intel_encoder->base.crtc)
5006 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
5007
5008 return 0;
5009}
5010
a4fc5ed6 5011static void
73845adf 5012intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 5013{
1d508706 5014 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 5015
10e972d3 5016 kfree(intel_connector->detect_edid);
beb60608 5017
9cd300e0
JN
5018 if (!IS_ERR_OR_NULL(intel_connector->edid))
5019 kfree(intel_connector->edid);
5020
acd8db10
PZ
5021 /* Can't call is_edp() since the encoder may have been destroyed
5022 * already. */
5023 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 5024 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 5025
a4fc5ed6 5026 drm_connector_cleanup(connector);
55f78c43 5027 kfree(connector);
a4fc5ed6
KP
5028}
5029
00c09d70 5030void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 5031{
da63a9f2
PZ
5032 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5033 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 5034
4f71d0cb 5035 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 5036 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
5037 if (is_edp(intel_dp)) {
5038 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5039 /*
5040 * vdd might still be enabled do to the delayed vdd off.
5041 * Make sure vdd is actually turned off here.
5042 */
773538e8 5043 pps_lock(intel_dp);
4be73780 5044 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
5045 pps_unlock(intel_dp);
5046
01527b31
CT
5047 if (intel_dp->edp_notifier.notifier_call) {
5048 unregister_reboot_notifier(&intel_dp->edp_notifier);
5049 intel_dp->edp_notifier.notifier_call = NULL;
5050 }
bd943159 5051 }
c8bd0e49 5052 drm_encoder_cleanup(encoder);
da63a9f2 5053 kfree(intel_dig_port);
24d05927
DV
5054}
5055
07f9cd0b
ID
5056static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5057{
5058 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5059
5060 if (!is_edp(intel_dp))
5061 return;
5062
951468f3
VS
5063 /*
5064 * vdd might still be enabled do to the delayed vdd off.
5065 * Make sure vdd is actually turned off here.
5066 */
afa4e53a 5067 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 5068 pps_lock(intel_dp);
07f9cd0b 5069 edp_panel_vdd_off_sync(intel_dp);
773538e8 5070 pps_unlock(intel_dp);
07f9cd0b
ID
5071}
5072
49e6bc51
VS
5073static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5074{
5075 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5076 struct drm_device *dev = intel_dig_port->base.base.dev;
5077 struct drm_i915_private *dev_priv = dev->dev_private;
5078 enum intel_display_power_domain power_domain;
5079
5080 lockdep_assert_held(&dev_priv->pps_mutex);
5081
5082 if (!edp_have_panel_vdd(intel_dp))
5083 return;
5084
5085 /*
5086 * The VDD bit needs a power domain reference, so if the bit is
5087 * already enabled when we boot or resume, grab this reference and
5088 * schedule a vdd off, so we don't hold on to the reference
5089 * indefinitely.
5090 */
5091 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5092 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5093 intel_display_power_get(dev_priv, power_domain);
5094
5095 edp_panel_vdd_schedule_off(intel_dp);
5096}
5097
6d93c0c4
ID
5098static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5099{
49e6bc51
VS
5100 struct intel_dp *intel_dp;
5101
5102 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5103 return;
5104
5105 intel_dp = enc_to_intel_dp(encoder);
5106
5107 pps_lock(intel_dp);
5108
5109 /*
5110 * Read out the current power sequencer assignment,
5111 * in case the BIOS did something with it.
5112 */
5113 if (IS_VALLEYVIEW(encoder->dev))
5114 vlv_initial_power_sequencer_setup(intel_dp);
5115
5116 intel_edp_panel_vdd_sanitize(intel_dp);
5117
5118 pps_unlock(intel_dp);
6d93c0c4
ID
5119}
5120
a4fc5ed6 5121static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 5122 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 5123 .detect = intel_dp_detect,
beb60608 5124 .force = intel_dp_force,
a4fc5ed6 5125 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 5126 .set_property = intel_dp_set_property,
2545e4a6 5127 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 5128 .destroy = intel_dp_connector_destroy,
c6f95f27 5129 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 5130 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
5131};
5132
5133static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5134 .get_modes = intel_dp_get_modes,
5135 .mode_valid = intel_dp_mode_valid,
df0e9248 5136 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
5137};
5138
a4fc5ed6 5139static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 5140 .reset = intel_dp_encoder_reset,
24d05927 5141 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
5142};
5143
b2c5c181 5144enum irqreturn
13cf5504
DA
5145intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5146{
5147 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 5148 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
5149 struct drm_device *dev = intel_dig_port->base.base.dev;
5150 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 5151 enum intel_display_power_domain power_domain;
b2c5c181 5152 enum irqreturn ret = IRQ_NONE;
1c767b33 5153
0e32b39c
DA
5154 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5155 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5156
7a7f84cc
VS
5157 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5158 /*
5159 * vdd off can generate a long pulse on eDP which
5160 * would require vdd on to handle it, and thus we
5161 * would end up in an endless cycle of
5162 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5163 */
5164 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5165 port_name(intel_dig_port->port));
a8b3d52f 5166 return IRQ_HANDLED;
7a7f84cc
VS
5167 }
5168
26fbb774
VS
5169 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5170 port_name(intel_dig_port->port),
0e32b39c 5171 long_hpd ? "long" : "short");
13cf5504 5172
1c767b33
ID
5173 power_domain = intel_display_port_power_domain(intel_encoder);
5174 intel_display_power_get(dev_priv, power_domain);
5175
0e32b39c 5176 if (long_hpd) {
5fa836a9
MK
5177 /* indicate that we need to restart link training */
5178 intel_dp->train_set_valid = false;
2a592bec 5179
7e66bcf2
JN
5180 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5181 goto mst_fail;
0e32b39c
DA
5182
5183 if (!intel_dp_get_dpcd(intel_dp)) {
5184 goto mst_fail;
5185 }
5186
5187 intel_dp_probe_oui(intel_dp);
5188
5189 if (!intel_dp_probe_mst(intel_dp))
5190 goto mst_fail;
5191
5192 } else {
5193 if (intel_dp->is_mst) {
1c767b33 5194 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5195 goto mst_fail;
5196 }
5197
5198 if (!intel_dp->is_mst) {
5199 /*
5200 * we'll check the link status via the normal hot plug path later -
5201 * but for short hpds we should check it now
5202 */
5b215bcf 5203 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5204 intel_dp_check_link_status(intel_dp);
5b215bcf 5205 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5206 }
5207 }
b2c5c181
DV
5208
5209 ret = IRQ_HANDLED;
5210
1c767b33 5211 goto put_power;
0e32b39c
DA
5212mst_fail:
5213 /* if we were in MST mode, and device is not there get out of MST mode */
5214 if (intel_dp->is_mst) {
5215 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5216 intel_dp->is_mst = false;
5217 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5218 }
1c767b33
ID
5219put_power:
5220 intel_display_power_put(dev_priv, power_domain);
5221
5222 return ret;
13cf5504
DA
5223}
5224
e3421a18
ZW
5225/* Return which DP Port should be selected for Transcoder DP control */
5226int
0206e353 5227intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5228{
5229 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5230 struct intel_encoder *intel_encoder;
5231 struct intel_dp *intel_dp;
e3421a18 5232
fa90ecef
PZ
5233 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5234 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5235
fa90ecef
PZ
5236 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5237 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5238 return intel_dp->output_reg;
e3421a18 5239 }
ea5b213a 5240
e3421a18
ZW
5241 return -1;
5242}
5243
36e83a18 5244/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5245bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5246{
5247 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5248 union child_device_config *p_child;
36e83a18 5249 int i;
5d8a7752
VS
5250 static const short port_mapping[] = {
5251 [PORT_B] = PORT_IDPB,
5252 [PORT_C] = PORT_IDPC,
5253 [PORT_D] = PORT_IDPD,
5254 };
36e83a18 5255
3b32a35b
VS
5256 if (port == PORT_A)
5257 return true;
5258
41aa3448 5259 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5260 return false;
5261
41aa3448
RV
5262 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5263 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5264
5d8a7752 5265 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5266 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5267 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5268 return true;
5269 }
5270 return false;
5271}
5272
0e32b39c 5273void
f684960e
CW
5274intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5275{
53b41837
YN
5276 struct intel_connector *intel_connector = to_intel_connector(connector);
5277
3f43c48d 5278 intel_attach_force_audio_property(connector);
e953fd7b 5279 intel_attach_broadcast_rgb_property(connector);
55bc60db 5280 intel_dp->color_range_auto = true;
53b41837
YN
5281
5282 if (is_edp(intel_dp)) {
5283 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5284 drm_object_attach_property(
5285 &connector->base,
53b41837 5286 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5287 DRM_MODE_SCALE_ASPECT);
5288 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5289 }
f684960e
CW
5290}
5291
dada1a9f
ID
5292static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5293{
5294 intel_dp->last_power_cycle = jiffies;
5295 intel_dp->last_power_on = jiffies;
5296 intel_dp->last_backlight_off = jiffies;
5297}
5298
67a54566
DV
5299static void
5300intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5301 struct intel_dp *intel_dp)
67a54566
DV
5302{
5303 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5304 struct edp_power_seq cur, vbt, spec,
5305 *final = &intel_dp->pps_delays;
b0a08bec
VK
5306 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5307 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5308
e39b999a
VS
5309 lockdep_assert_held(&dev_priv->pps_mutex);
5310
81ddbc69
VS
5311 /* already initialized? */
5312 if (final->t11_t12 != 0)
5313 return;
5314
b0a08bec
VK
5315 if (IS_BROXTON(dev)) {
5316 /*
5317 * TODO: BXT has 2 sets of PPS registers.
5318 * Correct Register for Broxton need to be identified
5319 * using VBT. hardcoding for now
5320 */
5321 pp_ctrl_reg = BXT_PP_CONTROL(0);
5322 pp_on_reg = BXT_PP_ON_DELAYS(0);
5323 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5324 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5325 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5326 pp_on_reg = PCH_PP_ON_DELAYS;
5327 pp_off_reg = PCH_PP_OFF_DELAYS;
5328 pp_div_reg = PCH_PP_DIVISOR;
5329 } else {
bf13e81b
JN
5330 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5331
5332 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5333 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5334 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5335 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5336 }
67a54566
DV
5337
5338 /* Workaround: Need to write PP_CONTROL with the unlock key as
5339 * the very first thing. */
b0a08bec 5340 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5341
453c5420
JB
5342 pp_on = I915_READ(pp_on_reg);
5343 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5344 if (!IS_BROXTON(dev)) {
5345 I915_WRITE(pp_ctrl_reg, pp_ctl);
5346 pp_div = I915_READ(pp_div_reg);
5347 }
67a54566
DV
5348
5349 /* Pull timing values out of registers */
5350 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5351 PANEL_POWER_UP_DELAY_SHIFT;
5352
5353 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5354 PANEL_LIGHT_ON_DELAY_SHIFT;
5355
5356 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5357 PANEL_LIGHT_OFF_DELAY_SHIFT;
5358
5359 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5360 PANEL_POWER_DOWN_DELAY_SHIFT;
5361
b0a08bec
VK
5362 if (IS_BROXTON(dev)) {
5363 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5364 BXT_POWER_CYCLE_DELAY_SHIFT;
5365 if (tmp > 0)
5366 cur.t11_t12 = (tmp - 1) * 1000;
5367 else
5368 cur.t11_t12 = 0;
5369 } else {
5370 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5371 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5372 }
67a54566
DV
5373
5374 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5375 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5376
41aa3448 5377 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5378
5379 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5380 * our hw here, which are all in 100usec. */
5381 spec.t1_t3 = 210 * 10;
5382 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5383 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5384 spec.t10 = 500 * 10;
5385 /* This one is special and actually in units of 100ms, but zero
5386 * based in the hw (so we need to add 100 ms). But the sw vbt
5387 * table multiplies it with 1000 to make it in units of 100usec,
5388 * too. */
5389 spec.t11_t12 = (510 + 100) * 10;
5390
5391 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5392 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5393
5394 /* Use the max of the register settings and vbt. If both are
5395 * unset, fall back to the spec limits. */
36b5f425 5396#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5397 spec.field : \
5398 max(cur.field, vbt.field))
5399 assign_final(t1_t3);
5400 assign_final(t8);
5401 assign_final(t9);
5402 assign_final(t10);
5403 assign_final(t11_t12);
5404#undef assign_final
5405
36b5f425 5406#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5407 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5408 intel_dp->backlight_on_delay = get_delay(t8);
5409 intel_dp->backlight_off_delay = get_delay(t9);
5410 intel_dp->panel_power_down_delay = get_delay(t10);
5411 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5412#undef get_delay
5413
f30d26e4
JN
5414 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5415 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5416 intel_dp->panel_power_cycle_delay);
5417
5418 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5419 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5420}
5421
5422static void
5423intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5424 struct intel_dp *intel_dp)
f30d26e4
JN
5425{
5426 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5427 u32 pp_on, pp_off, pp_div, port_sel = 0;
5428 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5429 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5430 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5431 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5432
e39b999a 5433 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5434
b0a08bec
VK
5435 if (IS_BROXTON(dev)) {
5436 /*
5437 * TODO: BXT has 2 sets of PPS registers.
5438 * Correct Register for Broxton need to be identified
5439 * using VBT. hardcoding for now
5440 */
5441 pp_ctrl_reg = BXT_PP_CONTROL(0);
5442 pp_on_reg = BXT_PP_ON_DELAYS(0);
5443 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5444
5445 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5446 pp_on_reg = PCH_PP_ON_DELAYS;
5447 pp_off_reg = PCH_PP_OFF_DELAYS;
5448 pp_div_reg = PCH_PP_DIVISOR;
5449 } else {
bf13e81b
JN
5450 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5451
5452 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5453 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5454 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5455 }
5456
b2f19d1a
PZ
5457 /*
5458 * And finally store the new values in the power sequencer. The
5459 * backlight delays are set to 1 because we do manual waits on them. For
5460 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5461 * we'll end up waiting for the backlight off delay twice: once when we
5462 * do the manual sleep, and once when we disable the panel and wait for
5463 * the PP_STATUS bit to become zero.
5464 */
f30d26e4 5465 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5466 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5467 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5468 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5469 /* Compute the divisor for the pp clock, simply match the Bspec
5470 * formula. */
b0a08bec
VK
5471 if (IS_BROXTON(dev)) {
5472 pp_div = I915_READ(pp_ctrl_reg);
5473 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5474 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5475 << BXT_POWER_CYCLE_DELAY_SHIFT);
5476 } else {
5477 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5478 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5479 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5480 }
67a54566
DV
5481
5482 /* Haswell doesn't have any port selection bits for the panel
5483 * power sequencer any more. */
bc7d38a4 5484 if (IS_VALLEYVIEW(dev)) {
ad933b56 5485 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5486 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5487 if (port == PORT_A)
a24c144c 5488 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5489 else
a24c144c 5490 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5491 }
5492
453c5420
JB
5493 pp_on |= port_sel;
5494
5495 I915_WRITE(pp_on_reg, pp_on);
5496 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5497 if (IS_BROXTON(dev))
5498 I915_WRITE(pp_ctrl_reg, pp_div);
5499 else
5500 I915_WRITE(pp_div_reg, pp_div);
67a54566 5501
67a54566 5502 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5503 I915_READ(pp_on_reg),
5504 I915_READ(pp_off_reg),
b0a08bec
VK
5505 IS_BROXTON(dev) ?
5506 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5507 I915_READ(pp_div_reg));
f684960e
CW
5508}
5509
b33a2815
VK
5510/**
5511 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5512 * @dev: DRM device
5513 * @refresh_rate: RR to be programmed
5514 *
5515 * This function gets called when refresh rate (RR) has to be changed from
5516 * one frequency to another. Switches can be between high and low RR
5517 * supported by the panel or to any other RR based on media playback (in
5518 * this case, RR value needs to be passed from user space).
5519 *
5520 * The caller of this function needs to take a lock on dev_priv->drrs.
5521 */
96178eeb 5522static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5523{
5524 struct drm_i915_private *dev_priv = dev->dev_private;
5525 struct intel_encoder *encoder;
96178eeb
VK
5526 struct intel_digital_port *dig_port = NULL;
5527 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5528 struct intel_crtc_state *config = NULL;
439d7ac0 5529 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5530 u32 reg, val;
96178eeb 5531 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5532
5533 if (refresh_rate <= 0) {
5534 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5535 return;
5536 }
5537
96178eeb
VK
5538 if (intel_dp == NULL) {
5539 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5540 return;
5541 }
5542
1fcc9d1c 5543 /*
e4d59f6b
RV
5544 * FIXME: This needs proper synchronization with psr state for some
5545 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5546 */
439d7ac0 5547
96178eeb
VK
5548 dig_port = dp_to_dig_port(intel_dp);
5549 encoder = &dig_port->base;
723f9aab 5550 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5551
5552 if (!intel_crtc) {
5553 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5554 return;
5555 }
5556
6e3c9717 5557 config = intel_crtc->config;
439d7ac0 5558
96178eeb 5559 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5560 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5561 return;
5562 }
5563
96178eeb
VK
5564 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5565 refresh_rate)
439d7ac0
PB
5566 index = DRRS_LOW_RR;
5567
96178eeb 5568 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5569 DRM_DEBUG_KMS(
5570 "DRRS requested for previously set RR...ignoring\n");
5571 return;
5572 }
5573
5574 if (!intel_crtc->active) {
5575 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5576 return;
5577 }
5578
44395bfe 5579 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5580 switch (index) {
5581 case DRRS_HIGH_RR:
5582 intel_dp_set_m_n(intel_crtc, M1_N1);
5583 break;
5584 case DRRS_LOW_RR:
5585 intel_dp_set_m_n(intel_crtc, M2_N2);
5586 break;
5587 case DRRS_MAX_RR:
5588 default:
5589 DRM_ERROR("Unsupported refreshrate type\n");
5590 }
5591 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5592 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5593 val = I915_READ(reg);
a4c30b1d 5594
439d7ac0 5595 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5596 if (IS_VALLEYVIEW(dev))
5597 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5598 else
5599 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5600 } else {
6fa7aec1
VK
5601 if (IS_VALLEYVIEW(dev))
5602 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5603 else
5604 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5605 }
5606 I915_WRITE(reg, val);
5607 }
5608
4e9ac947
VK
5609 dev_priv->drrs.refresh_rate_type = index;
5610
5611 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5612}
5613
b33a2815
VK
5614/**
5615 * intel_edp_drrs_enable - init drrs struct if supported
5616 * @intel_dp: DP struct
5617 *
5618 * Initializes frontbuffer_bits and drrs.dp
5619 */
c395578e
VK
5620void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5621{
5622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5623 struct drm_i915_private *dev_priv = dev->dev_private;
5624 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5625 struct drm_crtc *crtc = dig_port->base.base.crtc;
5626 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5627
5628 if (!intel_crtc->config->has_drrs) {
5629 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5630 return;
5631 }
5632
5633 mutex_lock(&dev_priv->drrs.mutex);
5634 if (WARN_ON(dev_priv->drrs.dp)) {
5635 DRM_ERROR("DRRS already enabled\n");
5636 goto unlock;
5637 }
5638
5639 dev_priv->drrs.busy_frontbuffer_bits = 0;
5640
5641 dev_priv->drrs.dp = intel_dp;
5642
5643unlock:
5644 mutex_unlock(&dev_priv->drrs.mutex);
5645}
5646
b33a2815
VK
5647/**
5648 * intel_edp_drrs_disable - Disable DRRS
5649 * @intel_dp: DP struct
5650 *
5651 */
c395578e
VK
5652void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5653{
5654 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5655 struct drm_i915_private *dev_priv = dev->dev_private;
5656 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5657 struct drm_crtc *crtc = dig_port->base.base.crtc;
5658 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5659
5660 if (!intel_crtc->config->has_drrs)
5661 return;
5662
5663 mutex_lock(&dev_priv->drrs.mutex);
5664 if (!dev_priv->drrs.dp) {
5665 mutex_unlock(&dev_priv->drrs.mutex);
5666 return;
5667 }
5668
5669 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5670 intel_dp_set_drrs_state(dev_priv->dev,
5671 intel_dp->attached_connector->panel.
5672 fixed_mode->vrefresh);
5673
5674 dev_priv->drrs.dp = NULL;
5675 mutex_unlock(&dev_priv->drrs.mutex);
5676
5677 cancel_delayed_work_sync(&dev_priv->drrs.work);
5678}
5679
4e9ac947
VK
5680static void intel_edp_drrs_downclock_work(struct work_struct *work)
5681{
5682 struct drm_i915_private *dev_priv =
5683 container_of(work, typeof(*dev_priv), drrs.work.work);
5684 struct intel_dp *intel_dp;
5685
5686 mutex_lock(&dev_priv->drrs.mutex);
5687
5688 intel_dp = dev_priv->drrs.dp;
5689
5690 if (!intel_dp)
5691 goto unlock;
5692
439d7ac0 5693 /*
4e9ac947
VK
5694 * The delayed work can race with an invalidate hence we need to
5695 * recheck.
439d7ac0
PB
5696 */
5697
4e9ac947
VK
5698 if (dev_priv->drrs.busy_frontbuffer_bits)
5699 goto unlock;
439d7ac0 5700
4e9ac947
VK
5701 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5702 intel_dp_set_drrs_state(dev_priv->dev,
5703 intel_dp->attached_connector->panel.
5704 downclock_mode->vrefresh);
439d7ac0 5705
4e9ac947 5706unlock:
4e9ac947 5707 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5708}
5709
b33a2815 5710/**
0ddfd203 5711 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5712 * @dev: DRM device
5713 * @frontbuffer_bits: frontbuffer plane tracking bits
5714 *
0ddfd203
R
5715 * This function gets called everytime rendering on the given planes start.
5716 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5717 *
5718 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5719 */
a93fad0f
VK
5720void intel_edp_drrs_invalidate(struct drm_device *dev,
5721 unsigned frontbuffer_bits)
5722{
5723 struct drm_i915_private *dev_priv = dev->dev_private;
5724 struct drm_crtc *crtc;
5725 enum pipe pipe;
5726
9da7d693 5727 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5728 return;
5729
88f933a8 5730 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5731
a93fad0f 5732 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5733 if (!dev_priv->drrs.dp) {
5734 mutex_unlock(&dev_priv->drrs.mutex);
5735 return;
5736 }
5737
a93fad0f
VK
5738 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5739 pipe = to_intel_crtc(crtc)->pipe;
5740
c1d038c6
DV
5741 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5742 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5743
0ddfd203 5744 /* invalidate means busy screen hence upclock */
c1d038c6 5745 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5746 intel_dp_set_drrs_state(dev_priv->dev,
5747 dev_priv->drrs.dp->attached_connector->panel.
5748 fixed_mode->vrefresh);
a93fad0f 5749
a93fad0f
VK
5750 mutex_unlock(&dev_priv->drrs.mutex);
5751}
5752
b33a2815 5753/**
0ddfd203 5754 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5755 * @dev: DRM device
5756 * @frontbuffer_bits: frontbuffer plane tracking bits
5757 *
0ddfd203
R
5758 * This function gets called every time rendering on the given planes has
5759 * completed or flip on a crtc is completed. So DRRS should be upclocked
5760 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5761 * if no other planes are dirty.
b33a2815
VK
5762 *
5763 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5764 */
a93fad0f
VK
5765void intel_edp_drrs_flush(struct drm_device *dev,
5766 unsigned frontbuffer_bits)
5767{
5768 struct drm_i915_private *dev_priv = dev->dev_private;
5769 struct drm_crtc *crtc;
5770 enum pipe pipe;
5771
9da7d693 5772 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5773 return;
5774
88f933a8 5775 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5776
a93fad0f 5777 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5778 if (!dev_priv->drrs.dp) {
5779 mutex_unlock(&dev_priv->drrs.mutex);
5780 return;
5781 }
5782
a93fad0f
VK
5783 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5784 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5785
5786 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5787 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5788
0ddfd203 5789 /* flush means busy screen hence upclock */
c1d038c6 5790 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5791 intel_dp_set_drrs_state(dev_priv->dev,
5792 dev_priv->drrs.dp->attached_connector->panel.
5793 fixed_mode->vrefresh);
5794
5795 /*
5796 * flush also means no more activity hence schedule downclock, if all
5797 * other fbs are quiescent too
5798 */
5799 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5800 schedule_delayed_work(&dev_priv->drrs.work,
5801 msecs_to_jiffies(1000));
5802 mutex_unlock(&dev_priv->drrs.mutex);
5803}
5804
b33a2815
VK
5805/**
5806 * DOC: Display Refresh Rate Switching (DRRS)
5807 *
5808 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5809 * which enables swtching between low and high refresh rates,
5810 * dynamically, based on the usage scenario. This feature is applicable
5811 * for internal panels.
5812 *
5813 * Indication that the panel supports DRRS is given by the panel EDID, which
5814 * would list multiple refresh rates for one resolution.
5815 *
5816 * DRRS is of 2 types - static and seamless.
5817 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5818 * (may appear as a blink on screen) and is used in dock-undock scenario.
5819 * Seamless DRRS involves changing RR without any visual effect to the user
5820 * and can be used during normal system usage. This is done by programming
5821 * certain registers.
5822 *
5823 * Support for static/seamless DRRS may be indicated in the VBT based on
5824 * inputs from the panel spec.
5825 *
5826 * DRRS saves power by switching to low RR based on usage scenarios.
5827 *
5828 * eDP DRRS:-
5829 * The implementation is based on frontbuffer tracking implementation.
5830 * When there is a disturbance on the screen triggered by user activity or a
5831 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5832 * When there is no movement on screen, after a timeout of 1 second, a switch
5833 * to low RR is made.
5834 * For integration with frontbuffer tracking code,
5835 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5836 *
5837 * DRRS can be further extended to support other internal panels and also
5838 * the scenario of video playback wherein RR is set based on the rate
5839 * requested by userspace.
5840 */
5841
5842/**
5843 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5844 * @intel_connector: eDP connector
5845 * @fixed_mode: preferred mode of panel
5846 *
5847 * This function is called only once at driver load to initialize basic
5848 * DRRS stuff.
5849 *
5850 * Returns:
5851 * Downclock mode if panel supports it, else return NULL.
5852 * DRRS support is determined by the presence of downclock mode (apart
5853 * from VBT setting).
5854 */
4f9db5b5 5855static struct drm_display_mode *
96178eeb
VK
5856intel_dp_drrs_init(struct intel_connector *intel_connector,
5857 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5858{
5859 struct drm_connector *connector = &intel_connector->base;
96178eeb 5860 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5861 struct drm_i915_private *dev_priv = dev->dev_private;
5862 struct drm_display_mode *downclock_mode = NULL;
5863
9da7d693
DV
5864 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5865 mutex_init(&dev_priv->drrs.mutex);
5866
4f9db5b5
PB
5867 if (INTEL_INFO(dev)->gen <= 6) {
5868 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5869 return NULL;
5870 }
5871
5872 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5873 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5874 return NULL;
5875 }
5876
5877 downclock_mode = intel_find_panel_downclock
5878 (dev, fixed_mode, connector);
5879
5880 if (!downclock_mode) {
a1d26342 5881 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5882 return NULL;
5883 }
5884
96178eeb 5885 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5886
96178eeb 5887 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5888 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5889 return downclock_mode;
5890}
5891
ed92f0b2 5892static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5893 struct intel_connector *intel_connector)
ed92f0b2
PZ
5894{
5895 struct drm_connector *connector = &intel_connector->base;
5896 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5897 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5898 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5899 struct drm_i915_private *dev_priv = dev->dev_private;
5900 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5901 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5902 bool has_dpcd;
5903 struct drm_display_mode *scan;
5904 struct edid *edid;
6517d273 5905 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5906
5907 if (!is_edp(intel_dp))
5908 return true;
5909
49e6bc51
VS
5910 pps_lock(intel_dp);
5911 intel_edp_panel_vdd_sanitize(intel_dp);
5912 pps_unlock(intel_dp);
63635217 5913
ed92f0b2 5914 /* Cache DPCD and EDID for edp. */
ed92f0b2 5915 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5916
5917 if (has_dpcd) {
5918 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5919 dev_priv->no_aux_handshake =
5920 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5921 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5922 } else {
5923 /* if this fails, presume the device is a ghost */
5924 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5925 return false;
5926 }
5927
5928 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5929 pps_lock(intel_dp);
36b5f425 5930 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5931 pps_unlock(intel_dp);
ed92f0b2 5932
060c8778 5933 mutex_lock(&dev->mode_config.mutex);
0b99836f 5934 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5935 if (edid) {
5936 if (drm_add_edid_modes(connector, edid)) {
5937 drm_mode_connector_update_edid_property(connector,
5938 edid);
5939 drm_edid_to_eld(connector, edid);
5940 } else {
5941 kfree(edid);
5942 edid = ERR_PTR(-EINVAL);
5943 }
5944 } else {
5945 edid = ERR_PTR(-ENOENT);
5946 }
5947 intel_connector->edid = edid;
5948
5949 /* prefer fixed mode from EDID if available */
5950 list_for_each_entry(scan, &connector->probed_modes, head) {
5951 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5952 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5953 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5954 intel_connector, fixed_mode);
ed92f0b2
PZ
5955 break;
5956 }
5957 }
5958
5959 /* fallback to VBT if available for eDP */
5960 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5961 fixed_mode = drm_mode_duplicate(dev,
5962 dev_priv->vbt.lfp_lvds_vbt_mode);
5963 if (fixed_mode)
5964 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5965 }
060c8778 5966 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5967
01527b31
CT
5968 if (IS_VALLEYVIEW(dev)) {
5969 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5970 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5971
5972 /*
5973 * Figure out the current pipe for the initial backlight setup.
5974 * If the current pipe isn't valid, try the PPS pipe, and if that
5975 * fails just assume pipe A.
5976 */
5977 if (IS_CHERRYVIEW(dev))
5978 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5979 else
5980 pipe = PORT_TO_PIPE(intel_dp->DP);
5981
5982 if (pipe != PIPE_A && pipe != PIPE_B)
5983 pipe = intel_dp->pps_pipe;
5984
5985 if (pipe != PIPE_A && pipe != PIPE_B)
5986 pipe = PIPE_A;
5987
5988 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5989 pipe_name(pipe));
01527b31
CT
5990 }
5991
4f9db5b5 5992 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5993 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5994 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5995
5996 return true;
5997}
5998
16c25533 5999bool
f0fec3f2
PZ
6000intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6001 struct intel_connector *intel_connector)
a4fc5ed6 6002{
f0fec3f2
PZ
6003 struct drm_connector *connector = &intel_connector->base;
6004 struct intel_dp *intel_dp = &intel_dig_port->dp;
6005 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6006 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 6007 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 6008 enum port port = intel_dig_port->port;
0b99836f 6009 int type;
a4fc5ed6 6010
a4a5d2f8
VS
6011 intel_dp->pps_pipe = INVALID_PIPE;
6012
ec5b01dd 6013 /* intel_dp vfuncs */
b6b5e383
DL
6014 if (INTEL_INFO(dev)->gen >= 9)
6015 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6016 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
6017 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6018 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6019 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6020 else if (HAS_PCH_SPLIT(dev))
6021 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6022 else
6023 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6024
b9ca5fad
DL
6025 if (INTEL_INFO(dev)->gen >= 9)
6026 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6027 else
6028 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 6029
0767935e
DV
6030 /* Preserve the current hw state. */
6031 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 6032 intel_dp->attached_connector = intel_connector;
3d3dc149 6033
3b32a35b 6034 if (intel_dp_is_edp(dev, port))
b329530c 6035 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
6036 else
6037 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 6038
f7d24902
ID
6039 /*
6040 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6041 * for DP the encoder type can be set by the caller to
6042 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6043 */
6044 if (type == DRM_MODE_CONNECTOR_eDP)
6045 intel_encoder->type = INTEL_OUTPUT_EDP;
6046
c17ed5b5
VS
6047 /* eDP only on port B and/or C on vlv/chv */
6048 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6049 port != PORT_B && port != PORT_C))
6050 return false;
6051
e7281eab
ID
6052 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6053 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6054 port_name(port));
6055
b329530c 6056 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
6057 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6058
a4fc5ed6
KP
6059 connector->interlace_allowed = true;
6060 connector->doublescan_allowed = 0;
6061
f0fec3f2 6062 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 6063 edp_panel_vdd_work);
a4fc5ed6 6064
df0e9248 6065 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 6066 drm_connector_register(connector);
a4fc5ed6 6067
affa9354 6068 if (HAS_DDI(dev))
bcbc889b
PZ
6069 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6070 else
6071 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 6072 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 6073
0b99836f 6074 /* Set up the hotplug pin. */
ab9d7c30
PZ
6075 switch (port) {
6076 case PORT_A:
1d843f9d 6077 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
6078 break;
6079 case PORT_B:
1d843f9d 6080 intel_encoder->hpd_pin = HPD_PORT_B;
cf1d5883
SJ
6081 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6082 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
6083 break;
6084 case PORT_C:
1d843f9d 6085 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
6086 break;
6087 case PORT_D:
1d843f9d 6088 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
6089 break;
6090 default:
ad1c0b19 6091 BUG();
5eb08b69
ZW
6092 }
6093
dada1a9f 6094 if (is_edp(intel_dp)) {
773538e8 6095 pps_lock(intel_dp);
1e74a324
VS
6096 intel_dp_init_panel_power_timestamps(intel_dp);
6097 if (IS_VALLEYVIEW(dev))
a4a5d2f8 6098 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 6099 else
36b5f425 6100 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 6101 pps_unlock(intel_dp);
dada1a9f 6102 }
0095e6dc 6103
9d1a1031 6104 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 6105
0e32b39c 6106 /* init MST on ports that can support it */
0c9b3715
JN
6107 if (HAS_DP_MST(dev) &&
6108 (port == PORT_B || port == PORT_C || port == PORT_D))
6109 intel_dp_mst_encoder_init(intel_dig_port,
6110 intel_connector->base.base.id);
0e32b39c 6111
36b5f425 6112 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 6113 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
6114 if (is_edp(intel_dp)) {
6115 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
6116 /*
6117 * vdd might still be enabled do to the delayed vdd off.
6118 * Make sure vdd is actually turned off here.
6119 */
773538e8 6120 pps_lock(intel_dp);
4be73780 6121 edp_panel_vdd_off_sync(intel_dp);
773538e8 6122 pps_unlock(intel_dp);
15b1d171 6123 }
34ea3d38 6124 drm_connector_unregister(connector);
b2f246a8 6125 drm_connector_cleanup(connector);
16c25533 6126 return false;
b2f246a8 6127 }
32f9d658 6128
f684960e
CW
6129 intel_dp_add_properties(intel_dp, connector);
6130
a4fc5ed6
KP
6131 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6132 * 0xd. Failure to do so will result in spurious interrupts being
6133 * generated on the port when a cable is not attached.
6134 */
6135 if (IS_G4X(dev) && !IS_GM45(dev)) {
6136 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6137 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6138 }
16c25533 6139
aa7471d2
JN
6140 i915_debugfs_connector_add(connector);
6141
16c25533 6142 return true;
a4fc5ed6 6143}
f0fec3f2
PZ
6144
6145void
6146intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6147{
13cf5504 6148 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6149 struct intel_digital_port *intel_dig_port;
6150 struct intel_encoder *intel_encoder;
6151 struct drm_encoder *encoder;
6152 struct intel_connector *intel_connector;
6153
b14c5679 6154 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6155 if (!intel_dig_port)
6156 return;
6157
08d9bc92 6158 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
6159 if (!intel_connector) {
6160 kfree(intel_dig_port);
6161 return;
6162 }
6163
6164 intel_encoder = &intel_dig_port->base;
6165 encoder = &intel_encoder->base;
6166
6167 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6168 DRM_MODE_ENCODER_TMDS);
6169
5bfe2ac0 6170 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6171 intel_encoder->disable = intel_disable_dp;
00c09d70 6172 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6173 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6174 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6175 if (IS_CHERRYVIEW(dev)) {
9197c88b 6176 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6177 intel_encoder->pre_enable = chv_pre_enable_dp;
6178 intel_encoder->enable = vlv_enable_dp;
580d3811 6179 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6180 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6181 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6182 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6183 intel_encoder->pre_enable = vlv_pre_enable_dp;
6184 intel_encoder->enable = vlv_enable_dp;
49277c31 6185 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6186 } else {
ecff4f3b
JN
6187 intel_encoder->pre_enable = g4x_pre_enable_dp;
6188 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6189 if (INTEL_INFO(dev)->gen >= 5)
6190 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6191 }
f0fec3f2 6192
174edf1f 6193 intel_dig_port->port = port;
f0fec3f2
PZ
6194 intel_dig_port->dp.output_reg = output_reg;
6195
00c09d70 6196 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6197 if (IS_CHERRYVIEW(dev)) {
6198 if (port == PORT_D)
6199 intel_encoder->crtc_mask = 1 << 2;
6200 else
6201 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6202 } else {
6203 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6204 }
bc079e8b 6205 intel_encoder->cloneable = 0;
f0fec3f2 6206
13cf5504 6207 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6208 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6209
15b1d171
PZ
6210 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6211 drm_encoder_cleanup(encoder);
6212 kfree(intel_dig_port);
b2f246a8 6213 kfree(intel_connector);
15b1d171 6214 }
f0fec3f2 6215}
0e32b39c
DA
6216
6217void intel_dp_mst_suspend(struct drm_device *dev)
6218{
6219 struct drm_i915_private *dev_priv = dev->dev_private;
6220 int i;
6221
6222 /* disable MST */
6223 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6224 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6225 if (!intel_dig_port)
6226 continue;
6227
6228 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6229 if (!intel_dig_port->dp.can_mst)
6230 continue;
6231 if (intel_dig_port->dp.is_mst)
6232 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6233 }
6234 }
6235}
6236
6237void intel_dp_mst_resume(struct drm_device *dev)
6238{
6239 struct drm_i915_private *dev_priv = dev->dev_private;
6240 int i;
6241
6242 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6243 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6244 if (!intel_dig_port)
6245 continue;
6246 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6247 int ret;
6248
6249 if (!intel_dig_port->dp.can_mst)
6250 continue;
6251
6252 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6253 if (ret != 0) {
6254 intel_dp_check_mst_status(&intel_dig_port->dp);
6255 }
6256 }
6257 }
6258}