]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Move WaBarrierPerformanceFixDisable:skl to skl code from chv code
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf
CML
50struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5
CML
69static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63
SJ
93
94static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15 95 324000, 432000, 540000 };
fe51bfb9
VS
96static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
f4896f15 99static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 100
cfcb0fc9
JB
101/**
102 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103 * @intel_dp: DP struct
104 *
105 * If a CPU or PCH DP output is attached to an eDP panel, this function
106 * will return true, and false otherwise.
107 */
108static bool is_edp(struct intel_dp *intel_dp)
109{
da63a9f2
PZ
110 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111
112 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
113}
114
68b4d824 115static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 116{
68b4d824
ID
117 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118
119 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
120}
121
df0e9248
CW
122static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123{
fa90ecef 124 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
125}
126
ea5b213a 127static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 128static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 129static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 130static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
131static void vlv_steal_power_sequencer(struct drm_device *dev,
132 enum pipe pipe);
a4fc5ed6 133
ed4e9c1d
VS
134static int
135intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 136{
7183dc29 137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
138
139 switch (max_link_bw) {
140 case DP_LINK_BW_1_62:
141 case DP_LINK_BW_2_7:
1db10e28 142 case DP_LINK_BW_5_4:
d4eead50 143 break;
a4fc5ed6 144 default:
d4eead50
ID
145 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 max_link_bw);
a4fc5ed6
KP
147 max_link_bw = DP_LINK_BW_1_62;
148 break;
149 }
150 return max_link_bw;
151}
152
eeb6324d
PZ
153static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154{
155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 struct drm_device *dev = intel_dig_port->base.base.dev;
157 u8 source_max, sink_max;
158
159 source_max = 4;
160 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162 source_max = 2;
163
164 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165
166 return min(source_max, sink_max);
167}
168
cd9dde44
AJ
169/*
170 * The units on the numbers in the next two are... bizarre. Examples will
171 * make it clearer; this one parallels an example in the eDP spec.
172 *
173 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 *
175 * 270000 * 1 * 8 / 10 == 216000
176 *
177 * The actual data capacity of that configuration is 2.16Gbit/s, so the
178 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
179 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180 * 119000. At 18bpp that's 2142000 kilobits per second.
181 *
182 * Thus the strange-looking division by 10 in intel_dp_link_required, to
183 * get the result in decakilobits instead of kilobits.
184 */
185
a4fc5ed6 186static int
c898261c 187intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 188{
cd9dde44 189 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
190}
191
fe27d53e
DA
192static int
193intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194{
195 return (max_link_clock * max_lanes * 8) / 10;
196}
197
c19de8eb 198static enum drm_mode_status
a4fc5ed6
KP
199intel_dp_mode_valid(struct drm_connector *connector,
200 struct drm_display_mode *mode)
201{
df0e9248 202 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
203 struct intel_connector *intel_connector = to_intel_connector(connector);
204 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
205 int target_clock = mode->clock;
206 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 207
dd06f90e
JN
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
210 return MODE_PANEL;
211
dd06f90e 212 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 213 return MODE_PANEL;
03afc4a2
DV
214
215 target_clock = fixed_mode->clock;
7de56f43
ZY
216 }
217
50fec21a 218 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 219 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
224 if (mode_rate > max_rate)
c4867936 225 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
0af78a2b
DV
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
a4fc5ed6
KP
233 return MODE_OK;
234}
235
a4f1289e 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
c2af70e2 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
fb0f8fbf
KP
257/* hrawclock is 1/4 the FSB frequency */
258static int
259intel_hrawclk(struct drm_device *dev)
260{
261 struct drm_i915_private *dev_priv = dev->dev_private;
262 uint32_t clkcfg;
263
9473c8f4
VP
264 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265 if (IS_VALLEYVIEW(dev))
266 return 200;
267
fb0f8fbf
KP
268 clkcfg = I915_READ(CLKCFG);
269 switch (clkcfg & CLKCFG_FSB_MASK) {
270 case CLKCFG_FSB_400:
271 return 100;
272 case CLKCFG_FSB_533:
273 return 133;
274 case CLKCFG_FSB_667:
275 return 166;
276 case CLKCFG_FSB_800:
277 return 200;
278 case CLKCFG_FSB_1067:
279 return 266;
280 case CLKCFG_FSB_1333:
281 return 333;
282 /* these two are just a guess; one of them might be right */
283 case CLKCFG_FSB_1600:
284 case CLKCFG_FSB_1600_ALT:
285 return 400;
286 default:
287 return 133;
288 }
289}
290
bf13e81b
JN
291static void
292intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 293 struct intel_dp *intel_dp);
bf13e81b
JN
294static void
295intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 296 struct intel_dp *intel_dp);
bf13e81b 297
773538e8
VS
298static void pps_lock(struct intel_dp *intel_dp)
299{
300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 struct intel_encoder *encoder = &intel_dig_port->base;
302 struct drm_device *dev = encoder->base.dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 enum intel_display_power_domain power_domain;
305
306 /*
307 * See vlv_power_sequencer_reset() why we need
308 * a power domain reference here.
309 */
310 power_domain = intel_display_port_power_domain(encoder);
311 intel_display_power_get(dev_priv, power_domain);
312
313 mutex_lock(&dev_priv->pps_mutex);
314}
315
316static void pps_unlock(struct intel_dp *intel_dp)
317{
318 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319 struct intel_encoder *encoder = &intel_dig_port->base;
320 struct drm_device *dev = encoder->base.dev;
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 enum intel_display_power_domain power_domain;
323
324 mutex_unlock(&dev_priv->pps_mutex);
325
326 power_domain = intel_display_port_power_domain(encoder);
327 intel_display_power_put(dev_priv, power_domain);
328}
329
961a0db0
VS
330static void
331vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332{
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_device *dev = intel_dig_port->base.base.dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 337 bool pll_enabled;
961a0db0
VS
338 uint32_t DP;
339
340 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342 pipe_name(pipe), port_name(intel_dig_port->port)))
343 return;
344
345 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346 pipe_name(pipe), port_name(intel_dig_port->port));
347
348 /* Preserve the BIOS-computed detected bit. This is
349 * supposed to be read-only.
350 */
351 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353 DP |= DP_PORT_WIDTH(1);
354 DP |= DP_LINK_TRAIN_PAT_1;
355
356 if (IS_CHERRYVIEW(dev))
357 DP |= DP_PIPE_SELECT_CHV(pipe);
358 else if (pipe == PIPE_B)
359 DP |= DP_PIPEB_SELECT;
360
d288f65f
VS
361 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362
363 /*
364 * The DPLL for the pipe must be enabled for this to work.
365 * So enable temporarily it if it's not already enabled.
366 */
367 if (!pll_enabled)
368 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370
961a0db0
VS
371 /*
372 * Similar magic as in intel_dp_enable_port().
373 * We _must_ do this port enable + disable trick
374 * to make this power seqeuencer lock onto the port.
375 * Otherwise even VDD force bit won't work.
376 */
377 I915_WRITE(intel_dp->output_reg, DP);
378 POSTING_READ(intel_dp->output_reg);
379
380 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
382
383 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
385
386 if (!pll_enabled)
387 vlv_force_pll_off(dev, pipe);
961a0db0
VS
388}
389
bf13e81b
JN
390static enum pipe
391vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392{
393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
394 struct drm_device *dev = intel_dig_port->base.base.dev;
395 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
396 struct intel_encoder *encoder;
397 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 398 enum pipe pipe;
bf13e81b 399
e39b999a 400 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 401
a8c3344e
VS
402 /* We should never land here with regular DP ports */
403 WARN_ON(!is_edp(intel_dp));
404
a4a5d2f8
VS
405 if (intel_dp->pps_pipe != INVALID_PIPE)
406 return intel_dp->pps_pipe;
407
408 /*
409 * We don't have power sequencer currently.
410 * Pick one that's not used by other ports.
411 */
412 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413 base.head) {
414 struct intel_dp *tmp;
415
416 if (encoder->type != INTEL_OUTPUT_EDP)
417 continue;
418
419 tmp = enc_to_intel_dp(&encoder->base);
420
421 if (tmp->pps_pipe != INVALID_PIPE)
422 pipes &= ~(1 << tmp->pps_pipe);
423 }
424
425 /*
426 * Didn't find one. This should not happen since there
427 * are two power sequencers and up to two eDP ports.
428 */
429 if (WARN_ON(pipes == 0))
a8c3344e
VS
430 pipe = PIPE_A;
431 else
432 pipe = ffs(pipes) - 1;
a4a5d2f8 433
a8c3344e
VS
434 vlv_steal_power_sequencer(dev, pipe);
435 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
436
437 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438 pipe_name(intel_dp->pps_pipe),
439 port_name(intel_dig_port->port));
440
441 /* init power sequencer on this pipe and port */
36b5f425
VS
442 intel_dp_init_panel_power_sequencer(dev, intel_dp);
443 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 444
961a0db0
VS
445 /*
446 * Even vdd force doesn't work until we've made
447 * the power sequencer lock in on the port.
448 */
449 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
450
451 return intel_dp->pps_pipe;
452}
453
6491ab27
VS
454typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
455 enum pipe pipe);
456
457static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461}
462
463static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467}
468
469static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 enum pipe pipe)
471{
472 return true;
473}
bf13e81b 474
a4a5d2f8 475static enum pipe
6491ab27
VS
476vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477 enum port port,
478 vlv_pipe_check pipe_check)
a4a5d2f8
VS
479{
480 enum pipe pipe;
bf13e81b 481
bf13e81b
JN
482 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
485
486 if (port_sel != PANEL_PORT_SELECT_VLV(port))
487 continue;
488
6491ab27
VS
489 if (!pipe_check(dev_priv, pipe))
490 continue;
491
a4a5d2f8 492 return pipe;
bf13e81b
JN
493 }
494
a4a5d2f8
VS
495 return INVALID_PIPE;
496}
497
498static void
499vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500{
501 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502 struct drm_device *dev = intel_dig_port->base.base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
504 enum port port = intel_dig_port->port;
505
506 lockdep_assert_held(&dev_priv->pps_mutex);
507
508 /* try to find a pipe with this port selected */
6491ab27
VS
509 /* first pick one where the panel is on */
510 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511 vlv_pipe_has_pp_on);
512 /* didn't find one? pick one where vdd is on */
513 if (intel_dp->pps_pipe == INVALID_PIPE)
514 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 vlv_pipe_has_vdd_on);
516 /* didn't find one? pick one with just the correct port */
517 if (intel_dp->pps_pipe == INVALID_PIPE)
518 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519 vlv_pipe_any);
a4a5d2f8
VS
520
521 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522 if (intel_dp->pps_pipe == INVALID_PIPE) {
523 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
524 port_name(port));
525 return;
bf13e81b
JN
526 }
527
a4a5d2f8
VS
528 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529 port_name(port), pipe_name(intel_dp->pps_pipe));
530
36b5f425
VS
531 intel_dp_init_panel_power_sequencer(dev, intel_dp);
532 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
533}
534
773538e8
VS
535void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536{
537 struct drm_device *dev = dev_priv->dev;
538 struct intel_encoder *encoder;
539
540 if (WARN_ON(!IS_VALLEYVIEW(dev)))
541 return;
542
543 /*
544 * We can't grab pps_mutex here due to deadlock with power_domain
545 * mutex when power_domain functions are called while holding pps_mutex.
546 * That also means that in order to use pps_pipe the code needs to
547 * hold both a power domain reference and pps_mutex, and the power domain
548 * reference get/put must be done while _not_ holding pps_mutex.
549 * pps_{lock,unlock}() do these steps in the correct order, so one
550 * should use them always.
551 */
552
553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554 struct intel_dp *intel_dp;
555
556 if (encoder->type != INTEL_OUTPUT_EDP)
557 continue;
558
559 intel_dp = enc_to_intel_dp(&encoder->base);
560 intel_dp->pps_pipe = INVALID_PIPE;
561 }
bf13e81b
JN
562}
563
564static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565{
566 struct drm_device *dev = intel_dp_to_dev(intel_dp);
567
568 if (HAS_PCH_SPLIT(dev))
569 return PCH_PP_CONTROL;
570 else
571 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
572}
573
574static u32 _pp_stat_reg(struct intel_dp *intel_dp)
575{
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577
578 if (HAS_PCH_SPLIT(dev))
579 return PCH_PP_STATUS;
580 else
581 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
582}
583
01527b31
CT
584/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
585 This function only applicable when panel PM state is not to be tracked */
586static int edp_notify_handler(struct notifier_block *this, unsigned long code,
587 void *unused)
588{
589 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
590 edp_notifier);
591 struct drm_device *dev = intel_dp_to_dev(intel_dp);
592 struct drm_i915_private *dev_priv = dev->dev_private;
593 u32 pp_div;
594 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
595
596 if (!is_edp(intel_dp) || code != SYS_RESTART)
597 return 0;
598
773538e8 599 pps_lock(intel_dp);
e39b999a 600
01527b31 601 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603
01527b31
CT
604 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
605 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
606 pp_div = I915_READ(pp_div_reg);
607 pp_div &= PP_REFERENCE_DIVIDER_MASK;
608
609 /* 0x1F write to PP_DIV_REG sets max cycle delay */
610 I915_WRITE(pp_div_reg, pp_div | 0x1F);
611 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
612 msleep(intel_dp->panel_power_cycle_delay);
613 }
614
773538e8 615 pps_unlock(intel_dp);
e39b999a 616
01527b31
CT
617 return 0;
618}
619
4be73780 620static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 621{
30add22d 622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
623 struct drm_i915_private *dev_priv = dev->dev_private;
624
e39b999a
VS
625 lockdep_assert_held(&dev_priv->pps_mutex);
626
9a42356b
VS
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
bf13e81b 631 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
632}
633
4be73780 634static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 635{
30add22d 636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
637 struct drm_i915_private *dev_priv = dev->dev_private;
638
e39b999a
VS
639 lockdep_assert_held(&dev_priv->pps_mutex);
640
9a42356b
VS
641 if (IS_VALLEYVIEW(dev) &&
642 intel_dp->pps_pipe == INVALID_PIPE)
643 return false;
644
773538e8 645 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
646}
647
9b984dae
KP
648static void
649intel_dp_check_edp(struct intel_dp *intel_dp)
650{
30add22d 651 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 652 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 653
9b984dae
KP
654 if (!is_edp(intel_dp))
655 return;
453c5420 656
4be73780 657 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
658 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
660 I915_READ(_pp_stat_reg(intel_dp)),
661 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
662 }
663}
664
9ee32fea
DV
665static uint32_t
666intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
667{
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 struct drm_device *dev = intel_dig_port->base.base.dev;
670 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 671 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
672 uint32_t status;
673 bool done;
674
ef04f00d 675#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 676 if (has_aux_irq)
b18ac466 677 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 678 msecs_to_jiffies_timeout(10));
9ee32fea
DV
679 else
680 done = wait_for_atomic(C, 10) == 0;
681 if (!done)
682 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 has_aux_irq);
684#undef C
685
686 return status;
687}
688
ec5b01dd 689static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 690{
174edf1f
PZ
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 693
ec5b01dd
DL
694 /*
695 * The clock divider is based off the hrawclk, and would like to run at
696 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 697 */
ec5b01dd
DL
698 return index ? 0 : intel_hrawclk(dev) / 2;
699}
700
701static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
702{
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 705 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
706
707 if (index)
708 return 0;
709
710 if (intel_dig_port->port == PORT_A) {
469d4b2a 711 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
ec5b01dd
DL
712 } else {
713 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
714 }
715}
716
717static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
718{
719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720 struct drm_device *dev = intel_dig_port->base.base.dev;
721 struct drm_i915_private *dev_priv = dev->dev_private;
722
723 if (intel_dig_port->port == PORT_A) {
724 if (index)
725 return 0;
1652d19e 726 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
2c55c336
JN
727 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728 /* Workaround for non-ULT HSW */
bc86625a
CW
729 switch (index) {
730 case 0: return 63;
731 case 1: return 72;
732 default: return 0;
733 }
ec5b01dd 734 } else {
bc86625a 735 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 736 }
b84a1cf8
RV
737}
738
ec5b01dd
DL
739static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740{
741 return index ? 0 : 100;
742}
743
b6b5e383
DL
744static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745{
746 /*
747 * SKL doesn't need us to program the AUX clock divider (Hardware will
748 * derive the clock from CDCLK automatically). We still implement the
749 * get_aux_clock_divider vfunc to plug-in into the existing code.
750 */
751 return index ? 0 : 1;
752}
753
5ed12a19
DL
754static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
755 bool has_aux_irq,
756 int send_bytes,
757 uint32_t aux_clock_divider)
758{
759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760 struct drm_device *dev = intel_dig_port->base.base.dev;
761 uint32_t precharge, timeout;
762
763 if (IS_GEN6(dev))
764 precharge = 3;
765 else
766 precharge = 5;
767
768 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
770 else
771 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
772
773 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 774 DP_AUX_CH_CTL_DONE |
5ed12a19 775 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 776 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 777 timeout |
788d4433 778 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
779 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 781 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
782}
783
b9ca5fad
DL
784static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785 bool has_aux_irq,
786 int send_bytes,
787 uint32_t unused)
788{
789 return DP_AUX_CH_CTL_SEND_BUSY |
790 DP_AUX_CH_CTL_DONE |
791 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792 DP_AUX_CH_CTL_TIME_OUT_ERROR |
793 DP_AUX_CH_CTL_TIME_OUT_1600us |
794 DP_AUX_CH_CTL_RECEIVE_ERROR |
795 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
797}
798
b84a1cf8
RV
799static int
800intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 801 const uint8_t *send, int send_bytes,
b84a1cf8
RV
802 uint8_t *recv, int recv_size)
803{
804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805 struct drm_device *dev = intel_dig_port->base.base.dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808 uint32_t ch_data = ch_ctl + 4;
bc86625a 809 uint32_t aux_clock_divider;
b84a1cf8
RV
810 int i, ret, recv_bytes;
811 uint32_t status;
5ed12a19 812 int try, clock = 0;
4e6b788c 813 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
814 bool vdd;
815
773538e8 816 pps_lock(intel_dp);
e39b999a 817
72c3500a
VS
818 /*
819 * We will be called with VDD already enabled for dpcd/edid/oui reads.
820 * In such cases we want to leave VDD enabled and it's up to upper layers
821 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
822 * ourselves.
823 */
1e0560e0 824 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
825
826 /* dp aux is extremely sensitive to irq latency, hence request the
827 * lowest possible wakeup latency and so prevent the cpu from going into
828 * deep sleep states.
829 */
830 pm_qos_update_request(&dev_priv->pm_qos, 0);
831
832 intel_dp_check_edp(intel_dp);
5eb08b69 833
c67a470b
PZ
834 intel_aux_display_runtime_get(dev_priv);
835
11bee43e
JB
836 /* Try to wait for any previous AUX channel activity */
837 for (try = 0; try < 3; try++) {
ef04f00d 838 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
839 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
840 break;
841 msleep(1);
842 }
843
844 if (try == 3) {
845 WARN(1, "dp_aux_ch not started status 0x%08x\n",
846 I915_READ(ch_ctl));
9ee32fea
DV
847 ret = -EBUSY;
848 goto out;
4f7f7b7e
CW
849 }
850
46a5ae9f
PZ
851 /* Only 5 data registers! */
852 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853 ret = -E2BIG;
854 goto out;
855 }
856
ec5b01dd 857 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
858 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859 has_aux_irq,
860 send_bytes,
861 aux_clock_divider);
5ed12a19 862
bc86625a
CW
863 /* Must try at least 3 times according to DP spec */
864 for (try = 0; try < 5; try++) {
865 /* Load the send data into the aux channel data registers */
866 for (i = 0; i < send_bytes; i += 4)
867 I915_WRITE(ch_data + i,
a4f1289e
RV
868 intel_dp_pack_aux(send + i,
869 send_bytes - i));
bc86625a
CW
870
871 /* Send the command and wait for it to complete */
5ed12a19 872 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
873
874 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
875
876 /* Clear done status and any errors */
877 I915_WRITE(ch_ctl,
878 status |
879 DP_AUX_CH_CTL_DONE |
880 DP_AUX_CH_CTL_TIME_OUT_ERROR |
881 DP_AUX_CH_CTL_RECEIVE_ERROR);
882
74ebf294 883 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 884 continue;
74ebf294
TP
885
886 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887 * 400us delay required for errors and timeouts
888 * Timeout errors from the HW already meet this
889 * requirement so skip to next iteration
890 */
891 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892 usleep_range(400, 500);
bc86625a 893 continue;
74ebf294 894 }
bc86625a
CW
895 if (status & DP_AUX_CH_CTL_DONE)
896 break;
897 }
4f7f7b7e 898 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
899 break;
900 }
901
a4fc5ed6 902 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 903 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
904 ret = -EBUSY;
905 goto out;
a4fc5ed6
KP
906 }
907
908 /* Check for timeout or receive error.
909 * Timeouts occur when the sink is not connected
910 */
a5b3da54 911 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 912 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
913 ret = -EIO;
914 goto out;
a5b3da54 915 }
1ae8c0a5
KP
916
917 /* Timeouts occur when the device isn't connected, so they're
918 * "normal" -- don't fill the kernel log with these */
a5b3da54 919 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 920 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
921 ret = -ETIMEDOUT;
922 goto out;
a4fc5ed6
KP
923 }
924
925 /* Unload any bytes sent back from the other side */
926 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
927 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
928 if (recv_bytes > recv_size)
929 recv_bytes = recv_size;
0206e353 930
4f7f7b7e 931 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
932 intel_dp_unpack_aux(I915_READ(ch_data + i),
933 recv + i, recv_bytes - i);
a4fc5ed6 934
9ee32fea
DV
935 ret = recv_bytes;
936out:
937 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 938 intel_aux_display_runtime_put(dev_priv);
9ee32fea 939
884f19e9
JN
940 if (vdd)
941 edp_panel_vdd_off(intel_dp, false);
942
773538e8 943 pps_unlock(intel_dp);
e39b999a 944
9ee32fea 945 return ret;
a4fc5ed6
KP
946}
947
a6c8aff0
JN
948#define BARE_ADDRESS_SIZE 3
949#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
950static ssize_t
951intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 952{
9d1a1031
JN
953 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
954 uint8_t txbuf[20], rxbuf[20];
955 size_t txsize, rxsize;
a4fc5ed6 956 int ret;
a4fc5ed6 957
d2d9cbbd
VS
958 txbuf[0] = (msg->request << 4) |
959 ((msg->address >> 16) & 0xf);
960 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
961 txbuf[2] = msg->address & 0xff;
962 txbuf[3] = msg->size - 1;
46a5ae9f 963
9d1a1031
JN
964 switch (msg->request & ~DP_AUX_I2C_MOT) {
965 case DP_AUX_NATIVE_WRITE:
966 case DP_AUX_I2C_WRITE:
a6c8aff0 967 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 968 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 969
9d1a1031
JN
970 if (WARN_ON(txsize > 20))
971 return -E2BIG;
a4fc5ed6 972
9d1a1031 973 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 974
9d1a1031
JN
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 978
a1ddefd8
JN
979 if (ret > 1) {
980 /* Number of bytes written in a short write. */
981 ret = clamp_t(int, rxbuf[1], 0, msg->size);
982 } else {
983 /* Return payload size. */
984 ret = msg->size;
985 }
9d1a1031
JN
986 }
987 break;
46a5ae9f 988
9d1a1031
JN
989 case DP_AUX_NATIVE_READ:
990 case DP_AUX_I2C_READ:
a6c8aff0 991 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 992 rxsize = msg->size + 1;
a4fc5ed6 993
9d1a1031
JN
994 if (WARN_ON(rxsize > 20))
995 return -E2BIG;
a4fc5ed6 996
9d1a1031
JN
997 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
998 if (ret > 0) {
999 msg->reply = rxbuf[0] >> 4;
1000 /*
1001 * Assume happy day, and copy the data. The caller is
1002 * expected to check msg->reply before touching it.
1003 *
1004 * Return payload size.
1005 */
1006 ret--;
1007 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1008 }
9d1a1031
JN
1009 break;
1010
1011 default:
1012 ret = -EINVAL;
1013 break;
a4fc5ed6 1014 }
f51a44b9 1015
9d1a1031 1016 return ret;
a4fc5ed6
KP
1017}
1018
9d1a1031
JN
1019static void
1020intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1021{
1022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1023 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1024 enum port port = intel_dig_port->port;
0b99836f 1025 const char *name = NULL;
ab2c0672
DA
1026 int ret;
1027
33ad6626
JN
1028 switch (port) {
1029 case PORT_A:
1030 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1031 name = "DPDDC-A";
ab2c0672 1032 break;
33ad6626
JN
1033 case PORT_B:
1034 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1035 name = "DPDDC-B";
ab2c0672 1036 break;
33ad6626
JN
1037 case PORT_C:
1038 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1039 name = "DPDDC-C";
ab2c0672 1040 break;
33ad6626
JN
1041 case PORT_D:
1042 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1043 name = "DPDDC-D";
33ad6626
JN
1044 break;
1045 default:
1046 BUG();
ab2c0672
DA
1047 }
1048
1b1aad75
DL
1049 /*
1050 * The AUX_CTL register is usually DP_CTL + 0x10.
1051 *
1052 * On Haswell and Broadwell though:
1053 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1054 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1055 *
1056 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1057 */
1058 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1059 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1060
0b99836f 1061 intel_dp->aux.name = name;
9d1a1031
JN
1062 intel_dp->aux.dev = dev->dev;
1063 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1064
0b99836f
JN
1065 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1066 connector->base.kdev->kobj.name);
8316f337 1067
4f71d0cb 1068 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1069 if (ret < 0) {
4f71d0cb 1070 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1071 name, ret);
1072 return;
ab2c0672 1073 }
8a5e6aeb 1074
0b99836f
JN
1075 ret = sysfs_create_link(&connector->base.kdev->kobj,
1076 &intel_dp->aux.ddc.dev.kobj,
1077 intel_dp->aux.ddc.dev.kobj.name);
1078 if (ret < 0) {
1079 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1080 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1081 }
a4fc5ed6
KP
1082}
1083
80f65de3
ID
1084static void
1085intel_dp_connector_unregister(struct intel_connector *intel_connector)
1086{
1087 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1088
0e32b39c
DA
1089 if (!intel_connector->mst_port)
1090 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1091 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1092 intel_connector_unregister(intel_connector);
1093}
1094
5416d871 1095static void
c3346ef6 1096skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1097{
1098 u32 ctrl1;
1099
dd3cd74a
ACO
1100 memset(&pipe_config->dpll_hw_state, 0,
1101 sizeof(pipe_config->dpll_hw_state));
1102
5416d871
DL
1103 pipe_config->ddi_pll_sel = SKL_DPLL0;
1104 pipe_config->dpll_hw_state.cfgcr1 = 0;
1105 pipe_config->dpll_hw_state.cfgcr2 = 0;
1106
1107 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1108 switch (link_clock / 2) {
1109 case 81000:
71cd8423 1110 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1111 SKL_DPLL0);
1112 break;
c3346ef6 1113 case 135000:
71cd8423 1114 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1115 SKL_DPLL0);
1116 break;
c3346ef6 1117 case 270000:
71cd8423 1118 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1119 SKL_DPLL0);
1120 break;
c3346ef6 1121 case 162000:
71cd8423 1122 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1123 SKL_DPLL0);
1124 break;
1125 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1126 results in CDCLK change. Need to handle the change of CDCLK by
1127 disabling pipes and re-enabling them */
1128 case 108000:
71cd8423 1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1130 SKL_DPLL0);
1131 break;
1132 case 216000:
71cd8423 1133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1134 SKL_DPLL0);
1135 break;
1136
5416d871
DL
1137 }
1138 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1139}
1140
0e50338c 1141static void
5cec258b 1142hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1143{
1144 switch (link_bw) {
1145 case DP_LINK_BW_1_62:
1146 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1147 break;
1148 case DP_LINK_BW_2_7:
1149 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1150 break;
1151 case DP_LINK_BW_5_4:
1152 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1153 break;
1154 }
1155}
1156
fc0f8e25 1157static int
12f6a2e2 1158intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1159{
94ca719e
VS
1160 if (intel_dp->num_sink_rates) {
1161 *sink_rates = intel_dp->sink_rates;
1162 return intel_dp->num_sink_rates;
fc0f8e25 1163 }
12f6a2e2
VS
1164
1165 *sink_rates = default_rates;
1166
1167 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1168}
1169
a8f3ef61 1170static int
1db10e28 1171intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1172{
637a9c63
SJ
1173 if (IS_SKYLAKE(dev)) {
1174 *source_rates = skl_rates;
1175 return ARRAY_SIZE(skl_rates);
fe51bfb9
VS
1176 } else if (IS_CHERRYVIEW(dev)) {
1177 *source_rates = chv_rates;
1178 return ARRAY_SIZE(chv_rates);
a8f3ef61 1179 }
636280ba
VS
1180
1181 *source_rates = default_rates;
1182
1db10e28
VS
1183 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1184 /* WaDisableHBR2:skl */
1185 return (DP_LINK_BW_2_7 >> 3) + 1;
1186 else if (INTEL_INFO(dev)->gen >= 8 ||
1187 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1188 return (DP_LINK_BW_5_4 >> 3) + 1;
1189 else
1190 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1191}
1192
c6bb3538
DV
1193static void
1194intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1195 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1196{
1197 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1198 const struct dp_link_dpll *divisor = NULL;
1199 int i, count = 0;
c6bb3538
DV
1200
1201 if (IS_G4X(dev)) {
9dd4ffdf
CML
1202 divisor = gen4_dpll;
1203 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1204 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1205 divisor = pch_dpll;
1206 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1207 } else if (IS_CHERRYVIEW(dev)) {
1208 divisor = chv_dpll;
1209 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1210 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1211 divisor = vlv_dpll;
1212 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1213 }
9dd4ffdf
CML
1214
1215 if (divisor && count) {
1216 for (i = 0; i < count; i++) {
1217 if (link_bw == divisor[i].link_bw) {
1218 pipe_config->dpll = divisor[i].dpll;
1219 pipe_config->clock_set = true;
1220 break;
1221 }
1222 }
c6bb3538
DV
1223 }
1224}
1225
2ecae76a
VS
1226static int intersect_rates(const int *source_rates, int source_len,
1227 const int *sink_rates, int sink_len,
94ca719e 1228 int *common_rates)
a8f3ef61
SJ
1229{
1230 int i = 0, j = 0, k = 0;
1231
a8f3ef61
SJ
1232 while (i < source_len && j < sink_len) {
1233 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1234 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1235 return k;
94ca719e 1236 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1237 ++k;
1238 ++i;
1239 ++j;
1240 } else if (source_rates[i] < sink_rates[j]) {
1241 ++i;
1242 } else {
1243 ++j;
1244 }
1245 }
1246 return k;
1247}
1248
94ca719e
VS
1249static int intel_dp_common_rates(struct intel_dp *intel_dp,
1250 int *common_rates)
2ecae76a
VS
1251{
1252 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1253 const int *source_rates, *sink_rates;
1254 int source_len, sink_len;
1255
1256 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1257 source_len = intel_dp_source_rates(dev, &source_rates);
1258
1259 return intersect_rates(source_rates, source_len,
1260 sink_rates, sink_len,
94ca719e 1261 common_rates);
2ecae76a
VS
1262}
1263
0336400e
VS
1264static void snprintf_int_array(char *str, size_t len,
1265 const int *array, int nelem)
1266{
1267 int i;
1268
1269 str[0] = '\0';
1270
1271 for (i = 0; i < nelem; i++) {
b2f505be 1272 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1273 if (r >= len)
1274 return;
1275 str += r;
1276 len -= r;
1277 }
1278}
1279
1280static void intel_dp_print_rates(struct intel_dp *intel_dp)
1281{
1282 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1283 const int *source_rates, *sink_rates;
94ca719e
VS
1284 int source_len, sink_len, common_len;
1285 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1286 char str[128]; /* FIXME: too big for stack? */
1287
1288 if ((drm_debug & DRM_UT_KMS) == 0)
1289 return;
1290
1291 source_len = intel_dp_source_rates(dev, &source_rates);
1292 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1293 DRM_DEBUG_KMS("source rates: %s\n", str);
1294
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1296 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1297 DRM_DEBUG_KMS("sink rates: %s\n", str);
1298
94ca719e
VS
1299 common_len = intel_dp_common_rates(intel_dp, common_rates);
1300 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1301 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1302}
1303
f4896f15 1304static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1305{
1306 int i = 0;
1307
1308 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1309 if (find == rates[i])
1310 break;
1311
1312 return i;
1313}
1314
50fec21a
VS
1315int
1316intel_dp_max_link_rate(struct intel_dp *intel_dp)
1317{
1318 int rates[DP_MAX_SUPPORTED_RATES] = {};
1319 int len;
1320
94ca719e 1321 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1322 if (WARN_ON(len <= 0))
1323 return 162000;
1324
1325 return rates[rate_to_index(0, rates) - 1];
1326}
1327
ed4e9c1d
VS
1328int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1329{
94ca719e 1330 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1331}
1332
00c09d70 1333bool
5bfe2ac0 1334intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1335 struct intel_crtc_state *pipe_config)
a4fc5ed6 1336{
5bfe2ac0 1337 struct drm_device *dev = encoder->base.dev;
36008365 1338 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1339 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1340 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1341 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1342 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1343 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1344 int lane_count, clock;
56071a20 1345 int min_lane_count = 1;
eeb6324d 1346 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1347 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1348 int min_clock = 0;
a8f3ef61 1349 int max_clock;
083f9560 1350 int bpp, mode_rate;
ff9a6750 1351 int link_avail, link_clock;
94ca719e
VS
1352 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1353 int common_len;
a8f3ef61 1354
94ca719e 1355 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1356
1357 /* No common link rates between source and sink */
94ca719e 1358 WARN_ON(common_len <= 0);
a8f3ef61 1359
94ca719e 1360 max_clock = common_len - 1;
a4fc5ed6 1361
bc7d38a4 1362 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1363 pipe_config->has_pch_encoder = true;
1364
03afc4a2 1365 pipe_config->has_dp_encoder = true;
f769cd24 1366 pipe_config->has_drrs = false;
9fcb1704 1367 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1368
dd06f90e
JN
1369 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1370 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1371 adjusted_mode);
a1b2278e
CK
1372
1373 if (INTEL_INFO(dev)->gen >= 9) {
1374 int ret;
1375 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1376 if (ret)
1377 return ret;
1378 }
1379
2dd24552
JB
1380 if (!HAS_PCH_SPLIT(dev))
1381 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1382 intel_connector->panel.fitting_mode);
1383 else
b074cec8
JB
1384 intel_pch_panel_fitting(intel_crtc, pipe_config,
1385 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1386 }
1387
cb1793ce 1388 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1389 return false;
1390
083f9560 1391 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1392 "max bw %d pixel clock %iKHz\n",
94ca719e 1393 max_lane_count, common_rates[max_clock],
241bfc38 1394 adjusted_mode->crtc_clock);
083f9560 1395
36008365
DV
1396 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1397 * bpc in between. */
3e7ca985 1398 bpp = pipe_config->pipe_bpp;
56071a20
JN
1399 if (is_edp(intel_dp)) {
1400 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1401 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1402 dev_priv->vbt.edp_bpp);
1403 bpp = dev_priv->vbt.edp_bpp;
1404 }
1405
344c5bbc
JN
1406 /*
1407 * Use the maximum clock and number of lanes the eDP panel
1408 * advertizes being capable of. The panels are generally
1409 * designed to support only a single clock and lane
1410 * configuration, and typically these values correspond to the
1411 * native resolution of the panel.
1412 */
1413 min_lane_count = max_lane_count;
1414 min_clock = max_clock;
7984211e 1415 }
657445fe 1416
36008365 1417 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1418 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1419 bpp);
36008365 1420
c6930992 1421 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1422 for (lane_count = min_lane_count;
1423 lane_count <= max_lane_count;
1424 lane_count <<= 1) {
1425
94ca719e 1426 link_clock = common_rates[clock];
36008365
DV
1427 link_avail = intel_dp_max_data_rate(link_clock,
1428 lane_count);
1429
1430 if (mode_rate <= link_avail) {
1431 goto found;
1432 }
1433 }
1434 }
1435 }
c4867936 1436
36008365 1437 return false;
3685a8f3 1438
36008365 1439found:
55bc60db
VS
1440 if (intel_dp->color_range_auto) {
1441 /*
1442 * See:
1443 * CEA-861-E - 5.1 Default Encoding Parameters
1444 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1445 */
18316c8c 1446 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1447 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1448 else
1449 intel_dp->color_range = 0;
1450 }
1451
3685a8f3 1452 if (intel_dp->color_range)
50f3b016 1453 pipe_config->limited_color_range = true;
a4fc5ed6 1454
36008365 1455 intel_dp->lane_count = lane_count;
a8f3ef61 1456
94ca719e 1457 if (intel_dp->num_sink_rates) {
bc27b7d3 1458 intel_dp->link_bw = 0;
a8f3ef61 1459 intel_dp->rate_select =
94ca719e 1460 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1461 } else {
1462 intel_dp->link_bw =
94ca719e 1463 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1464 intel_dp->rate_select = 0;
a8f3ef61
SJ
1465 }
1466
657445fe 1467 pipe_config->pipe_bpp = bpp;
94ca719e 1468 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1469
36008365
DV
1470 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1471 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1472 pipe_config->port_clock, bpp);
36008365
DV
1473 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1474 mode_rate, link_avail);
a4fc5ed6 1475
03afc4a2 1476 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1477 adjusted_mode->crtc_clock,
1478 pipe_config->port_clock,
03afc4a2 1479 &pipe_config->dp_m_n);
9d1a455b 1480
439d7ac0 1481 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1482 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1483 pipe_config->has_drrs = true;
439d7ac0
PB
1484 intel_link_compute_m_n(bpp, lane_count,
1485 intel_connector->panel.downclock_mode->clock,
1486 pipe_config->port_clock,
1487 &pipe_config->dp_m2_n2);
1488 }
1489
5416d871 1490 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1491 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
977bb38d
S
1492 else if (IS_BROXTON(dev))
1493 /* handled in ddi */;
5416d871 1494 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1495 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1496 else
1497 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1498
03afc4a2 1499 return true;
a4fc5ed6
KP
1500}
1501
7c62a164 1502static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1503{
7c62a164
DV
1504 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1505 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1506 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1507 struct drm_i915_private *dev_priv = dev->dev_private;
1508 u32 dpa_ctl;
1509
6e3c9717
ACO
1510 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1511 crtc->config->port_clock);
ea9b6006
DV
1512 dpa_ctl = I915_READ(DP_A);
1513 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1514
6e3c9717 1515 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1516 /* For a long time we've carried around a ILK-DevA w/a for the
1517 * 160MHz clock. If we're really unlucky, it's still required.
1518 */
1519 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1520 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1521 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1522 } else {
1523 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1524 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1525 }
1ce17038 1526
ea9b6006
DV
1527 I915_WRITE(DP_A, dpa_ctl);
1528
1529 POSTING_READ(DP_A);
1530 udelay(500);
1531}
1532
8ac33ed3 1533static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1534{
b934223d 1535 struct drm_device *dev = encoder->base.dev;
417e822d 1536 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1537 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1538 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1539 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1540 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1541
417e822d 1542 /*
1a2eb460 1543 * There are four kinds of DP registers:
417e822d
KP
1544 *
1545 * IBX PCH
1a2eb460
KP
1546 * SNB CPU
1547 * IVB CPU
417e822d
KP
1548 * CPT PCH
1549 *
1550 * IBX PCH and CPU are the same for almost everything,
1551 * except that the CPU DP PLL is configured in this
1552 * register
1553 *
1554 * CPT PCH is quite different, having many bits moved
1555 * to the TRANS_DP_CTL register instead. That
1556 * configuration happens (oddly) in ironlake_pch_enable
1557 */
9c9e7927 1558
417e822d
KP
1559 /* Preserve the BIOS-computed detected bit. This is
1560 * supposed to be read-only.
1561 */
1562 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1563
417e822d 1564 /* Handle DP bits in common between all three register formats */
417e822d 1565 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1566 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1567
6e3c9717 1568 if (crtc->config->has_audio)
ea5b213a 1569 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1570
417e822d 1571 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1572
39e5fa88 1573 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1574 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1575 intel_dp->DP |= DP_SYNC_HS_HIGH;
1576 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1577 intel_dp->DP |= DP_SYNC_VS_HIGH;
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1579
6aba5b6c 1580 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1581 intel_dp->DP |= DP_ENHANCED_FRAMING;
1582
7c62a164 1583 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1584 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1585 u32 trans_dp;
1586
39e5fa88 1587 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1588
1589 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1590 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1591 trans_dp |= TRANS_DP_ENH_FRAMING;
1592 else
1593 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1594 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1595 } else {
b2634017 1596 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1597 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1598
1599 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1600 intel_dp->DP |= DP_SYNC_HS_HIGH;
1601 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1602 intel_dp->DP |= DP_SYNC_VS_HIGH;
1603 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1604
6aba5b6c 1605 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1606 intel_dp->DP |= DP_ENHANCED_FRAMING;
1607
39e5fa88 1608 if (IS_CHERRYVIEW(dev))
44f37d1f 1609 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1610 else if (crtc->pipe == PIPE_B)
1611 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1612 }
a4fc5ed6
KP
1613}
1614
ffd6749d
PZ
1615#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1616#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1617
1a5ef5b7
PZ
1618#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1619#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1620
ffd6749d
PZ
1621#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1622#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1623
4be73780 1624static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1625 u32 mask,
1626 u32 value)
bd943159 1627{
30add22d 1628 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1629 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1630 u32 pp_stat_reg, pp_ctrl_reg;
1631
e39b999a
VS
1632 lockdep_assert_held(&dev_priv->pps_mutex);
1633
bf13e81b
JN
1634 pp_stat_reg = _pp_stat_reg(intel_dp);
1635 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1636
99ea7127 1637 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1638 mask, value,
1639 I915_READ(pp_stat_reg),
1640 I915_READ(pp_ctrl_reg));
32ce697c 1641
453c5420 1642 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1643 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1644 I915_READ(pp_stat_reg),
1645 I915_READ(pp_ctrl_reg));
32ce697c 1646 }
54c136d4
CW
1647
1648 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1649}
32ce697c 1650
4be73780 1651static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1652{
1653 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1654 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1655}
1656
4be73780 1657static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1658{
1659 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1660 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1661}
1662
4be73780 1663static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1664{
1665 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1666
1667 /* When we disable the VDD override bit last we have to do the manual
1668 * wait. */
1669 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1670 intel_dp->panel_power_cycle_delay);
1671
4be73780 1672 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1673}
1674
4be73780 1675static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1676{
1677 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1678 intel_dp->backlight_on_delay);
1679}
1680
4be73780 1681static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1682{
1683 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1684 intel_dp->backlight_off_delay);
1685}
99ea7127 1686
832dd3c1
KP
1687/* Read the current pp_control value, unlocking the register if it
1688 * is locked
1689 */
1690
453c5420 1691static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1692{
453c5420
JB
1693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1694 struct drm_i915_private *dev_priv = dev->dev_private;
1695 u32 control;
832dd3c1 1696
e39b999a
VS
1697 lockdep_assert_held(&dev_priv->pps_mutex);
1698
bf13e81b 1699 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1700 control &= ~PANEL_UNLOCK_MASK;
1701 control |= PANEL_UNLOCK_REGS;
1702 return control;
bd943159
KP
1703}
1704
951468f3
VS
1705/*
1706 * Must be paired with edp_panel_vdd_off().
1707 * Must hold pps_mutex around the whole on/off sequence.
1708 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1709 */
1e0560e0 1710static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1711{
30add22d 1712 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1713 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1714 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1715 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1716 enum intel_display_power_domain power_domain;
5d613501 1717 u32 pp;
453c5420 1718 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1719 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1720
e39b999a
VS
1721 lockdep_assert_held(&dev_priv->pps_mutex);
1722
97af61f5 1723 if (!is_edp(intel_dp))
adddaaf4 1724 return false;
bd943159 1725
2c623c11 1726 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1727 intel_dp->want_panel_vdd = true;
99ea7127 1728
4be73780 1729 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1730 return need_to_disable;
b0665d57 1731
4e6e1a54
ID
1732 power_domain = intel_display_port_power_domain(intel_encoder);
1733 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1734
3936fcf4
VS
1735 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1736 port_name(intel_dig_port->port));
bd943159 1737
4be73780
DV
1738 if (!edp_have_panel_power(intel_dp))
1739 wait_panel_power_cycle(intel_dp);
99ea7127 1740
453c5420 1741 pp = ironlake_get_pp_control(intel_dp);
5d613501 1742 pp |= EDP_FORCE_VDD;
ebf33b18 1743
bf13e81b
JN
1744 pp_stat_reg = _pp_stat_reg(intel_dp);
1745 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1746
1747 I915_WRITE(pp_ctrl_reg, pp);
1748 POSTING_READ(pp_ctrl_reg);
1749 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1750 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1751 /*
1752 * If the panel wasn't on, delay before accessing aux channel
1753 */
4be73780 1754 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1755 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1756 port_name(intel_dig_port->port));
f01eca2e 1757 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1758 }
adddaaf4
JN
1759
1760 return need_to_disable;
1761}
1762
951468f3
VS
1763/*
1764 * Must be paired with intel_edp_panel_vdd_off() or
1765 * intel_edp_panel_off().
1766 * Nested calls to these functions are not allowed since
1767 * we drop the lock. Caller must use some higher level
1768 * locking to prevent nested calls from other threads.
1769 */
b80d6c78 1770void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1771{
c695b6b6 1772 bool vdd;
adddaaf4 1773
c695b6b6
VS
1774 if (!is_edp(intel_dp))
1775 return;
1776
773538e8 1777 pps_lock(intel_dp);
c695b6b6 1778 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1779 pps_unlock(intel_dp);
c695b6b6 1780
e2c719b7 1781 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1782 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1783}
1784
4be73780 1785static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1786{
30add22d 1787 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1788 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1789 struct intel_digital_port *intel_dig_port =
1790 dp_to_dig_port(intel_dp);
1791 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1792 enum intel_display_power_domain power_domain;
5d613501 1793 u32 pp;
453c5420 1794 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1795
e39b999a 1796 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1797
15e899a0 1798 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1799
15e899a0 1800 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1801 return;
b0665d57 1802
3936fcf4
VS
1803 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1804 port_name(intel_dig_port->port));
bd943159 1805
be2c9196
VS
1806 pp = ironlake_get_pp_control(intel_dp);
1807 pp &= ~EDP_FORCE_VDD;
453c5420 1808
be2c9196
VS
1809 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1810 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1811
be2c9196
VS
1812 I915_WRITE(pp_ctrl_reg, pp);
1813 POSTING_READ(pp_ctrl_reg);
90791a5c 1814
be2c9196
VS
1815 /* Make sure sequencer is idle before allowing subsequent activity */
1816 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1817 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1818
be2c9196
VS
1819 if ((pp & POWER_TARGET_ON) == 0)
1820 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1821
be2c9196
VS
1822 power_domain = intel_display_port_power_domain(intel_encoder);
1823 intel_display_power_put(dev_priv, power_domain);
bd943159 1824}
5d613501 1825
4be73780 1826static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1827{
1828 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1829 struct intel_dp, panel_vdd_work);
bd943159 1830
773538e8 1831 pps_lock(intel_dp);
15e899a0
VS
1832 if (!intel_dp->want_panel_vdd)
1833 edp_panel_vdd_off_sync(intel_dp);
773538e8 1834 pps_unlock(intel_dp);
bd943159
KP
1835}
1836
aba86890
ID
1837static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1838{
1839 unsigned long delay;
1840
1841 /*
1842 * Queue the timer to fire a long time from now (relative to the power
1843 * down delay) to keep the panel power up across a sequence of
1844 * operations.
1845 */
1846 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1847 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1848}
1849
951468f3
VS
1850/*
1851 * Must be paired with edp_panel_vdd_on().
1852 * Must hold pps_mutex around the whole on/off sequence.
1853 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1854 */
4be73780 1855static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1856{
e39b999a
VS
1857 struct drm_i915_private *dev_priv =
1858 intel_dp_to_dev(intel_dp)->dev_private;
1859
1860 lockdep_assert_held(&dev_priv->pps_mutex);
1861
97af61f5
KP
1862 if (!is_edp(intel_dp))
1863 return;
5d613501 1864
e2c719b7 1865 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1866 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1867
bd943159
KP
1868 intel_dp->want_panel_vdd = false;
1869
aba86890 1870 if (sync)
4be73780 1871 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1872 else
1873 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1874}
1875
9f0fb5be 1876static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1877{
30add22d 1878 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1879 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1880 u32 pp;
453c5420 1881 u32 pp_ctrl_reg;
9934c132 1882
9f0fb5be
VS
1883 lockdep_assert_held(&dev_priv->pps_mutex);
1884
97af61f5 1885 if (!is_edp(intel_dp))
bd943159 1886 return;
99ea7127 1887
3936fcf4
VS
1888 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1889 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1890
e7a89ace
VS
1891 if (WARN(edp_have_panel_power(intel_dp),
1892 "eDP port %c panel power already on\n",
1893 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1894 return;
9934c132 1895
4be73780 1896 wait_panel_power_cycle(intel_dp);
37c6c9b0 1897
bf13e81b 1898 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1899 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1900 if (IS_GEN5(dev)) {
1901 /* ILK workaround: disable reset around power sequence */
1902 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1903 I915_WRITE(pp_ctrl_reg, pp);
1904 POSTING_READ(pp_ctrl_reg);
05ce1a49 1905 }
37c6c9b0 1906
1c0ae80a 1907 pp |= POWER_TARGET_ON;
99ea7127
KP
1908 if (!IS_GEN5(dev))
1909 pp |= PANEL_POWER_RESET;
1910
453c5420
JB
1911 I915_WRITE(pp_ctrl_reg, pp);
1912 POSTING_READ(pp_ctrl_reg);
9934c132 1913
4be73780 1914 wait_panel_on(intel_dp);
dce56b3c 1915 intel_dp->last_power_on = jiffies;
9934c132 1916
05ce1a49
KP
1917 if (IS_GEN5(dev)) {
1918 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1919 I915_WRITE(pp_ctrl_reg, pp);
1920 POSTING_READ(pp_ctrl_reg);
05ce1a49 1921 }
9f0fb5be 1922}
e39b999a 1923
9f0fb5be
VS
1924void intel_edp_panel_on(struct intel_dp *intel_dp)
1925{
1926 if (!is_edp(intel_dp))
1927 return;
1928
1929 pps_lock(intel_dp);
1930 edp_panel_on(intel_dp);
773538e8 1931 pps_unlock(intel_dp);
9934c132
JB
1932}
1933
9f0fb5be
VS
1934
1935static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1936{
4e6e1a54
ID
1937 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1938 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1939 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1940 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1941 enum intel_display_power_domain power_domain;
99ea7127 1942 u32 pp;
453c5420 1943 u32 pp_ctrl_reg;
9934c132 1944
9f0fb5be
VS
1945 lockdep_assert_held(&dev_priv->pps_mutex);
1946
97af61f5
KP
1947 if (!is_edp(intel_dp))
1948 return;
37c6c9b0 1949
3936fcf4
VS
1950 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1951 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1952
3936fcf4
VS
1953 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1954 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1955
453c5420 1956 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1957 /* We need to switch off panel power _and_ force vdd, for otherwise some
1958 * panels get very unhappy and cease to work. */
b3064154
PJ
1959 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1960 EDP_BLC_ENABLE);
453c5420 1961
bf13e81b 1962 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1963
849e39f5
PZ
1964 intel_dp->want_panel_vdd = false;
1965
453c5420
JB
1966 I915_WRITE(pp_ctrl_reg, pp);
1967 POSTING_READ(pp_ctrl_reg);
9934c132 1968
dce56b3c 1969 intel_dp->last_power_cycle = jiffies;
4be73780 1970 wait_panel_off(intel_dp);
849e39f5
PZ
1971
1972 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1973 power_domain = intel_display_port_power_domain(intel_encoder);
1974 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1975}
e39b999a 1976
9f0fb5be
VS
1977void intel_edp_panel_off(struct intel_dp *intel_dp)
1978{
1979 if (!is_edp(intel_dp))
1980 return;
e39b999a 1981
9f0fb5be
VS
1982 pps_lock(intel_dp);
1983 edp_panel_off(intel_dp);
773538e8 1984 pps_unlock(intel_dp);
9934c132
JB
1985}
1986
1250d107
JN
1987/* Enable backlight in the panel power control. */
1988static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1989{
da63a9f2
PZ
1990 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1991 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1992 struct drm_i915_private *dev_priv = dev->dev_private;
1993 u32 pp;
453c5420 1994 u32 pp_ctrl_reg;
32f9d658 1995
01cb9ea6
JB
1996 /*
1997 * If we enable the backlight right away following a panel power
1998 * on, we may see slight flicker as the panel syncs with the eDP
1999 * link. So delay a bit to make sure the image is solid before
2000 * allowing it to appear.
2001 */
4be73780 2002 wait_backlight_on(intel_dp);
e39b999a 2003
773538e8 2004 pps_lock(intel_dp);
e39b999a 2005
453c5420 2006 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2007 pp |= EDP_BLC_ENABLE;
453c5420 2008
bf13e81b 2009 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2010
2011 I915_WRITE(pp_ctrl_reg, pp);
2012 POSTING_READ(pp_ctrl_reg);
e39b999a 2013
773538e8 2014 pps_unlock(intel_dp);
32f9d658
ZW
2015}
2016
1250d107
JN
2017/* Enable backlight PWM and backlight PP control. */
2018void intel_edp_backlight_on(struct intel_dp *intel_dp)
2019{
2020 if (!is_edp(intel_dp))
2021 return;
2022
2023 DRM_DEBUG_KMS("\n");
2024
2025 intel_panel_enable_backlight(intel_dp->attached_connector);
2026 _intel_edp_backlight_on(intel_dp);
2027}
2028
2029/* Disable backlight in the panel power control. */
2030static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2031{
30add22d 2032 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2033 struct drm_i915_private *dev_priv = dev->dev_private;
2034 u32 pp;
453c5420 2035 u32 pp_ctrl_reg;
32f9d658 2036
f01eca2e
KP
2037 if (!is_edp(intel_dp))
2038 return;
2039
773538e8 2040 pps_lock(intel_dp);
e39b999a 2041
453c5420 2042 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2043 pp &= ~EDP_BLC_ENABLE;
453c5420 2044
bf13e81b 2045 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2046
2047 I915_WRITE(pp_ctrl_reg, pp);
2048 POSTING_READ(pp_ctrl_reg);
f7d2323c 2049
773538e8 2050 pps_unlock(intel_dp);
e39b999a
VS
2051
2052 intel_dp->last_backlight_off = jiffies;
f7d2323c 2053 edp_wait_backlight_off(intel_dp);
1250d107 2054}
f7d2323c 2055
1250d107
JN
2056/* Disable backlight PP control and backlight PWM. */
2057void intel_edp_backlight_off(struct intel_dp *intel_dp)
2058{
2059 if (!is_edp(intel_dp))
2060 return;
2061
2062 DRM_DEBUG_KMS("\n");
f7d2323c 2063
1250d107 2064 _intel_edp_backlight_off(intel_dp);
f7d2323c 2065 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2066}
a4fc5ed6 2067
73580fb7
JN
2068/*
2069 * Hook for controlling the panel power control backlight through the bl_power
2070 * sysfs attribute. Take care to handle multiple calls.
2071 */
2072static void intel_edp_backlight_power(struct intel_connector *connector,
2073 bool enable)
2074{
2075 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2076 bool is_enabled;
2077
773538e8 2078 pps_lock(intel_dp);
e39b999a 2079 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2080 pps_unlock(intel_dp);
73580fb7
JN
2081
2082 if (is_enabled == enable)
2083 return;
2084
23ba9373
JN
2085 DRM_DEBUG_KMS("panel power control backlight %s\n",
2086 enable ? "enable" : "disable");
73580fb7
JN
2087
2088 if (enable)
2089 _intel_edp_backlight_on(intel_dp);
2090 else
2091 _intel_edp_backlight_off(intel_dp);
2092}
2093
2bd2ad64 2094static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2095{
da63a9f2
PZ
2096 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2097 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2098 struct drm_device *dev = crtc->dev;
d240f20f
JB
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2100 u32 dpa_ctl;
2101
2bd2ad64
DV
2102 assert_pipe_disabled(dev_priv,
2103 to_intel_crtc(crtc)->pipe);
2104
d240f20f
JB
2105 DRM_DEBUG_KMS("\n");
2106 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2107 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2108 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2109
2110 /* We don't adjust intel_dp->DP while tearing down the link, to
2111 * facilitate link retraining (e.g. after hotplug). Hence clear all
2112 * enable bits here to ensure that we don't enable too much. */
2113 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2114 intel_dp->DP |= DP_PLL_ENABLE;
2115 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2116 POSTING_READ(DP_A);
2117 udelay(200);
d240f20f
JB
2118}
2119
2bd2ad64 2120static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2121{
da63a9f2
PZ
2122 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2123 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2124 struct drm_device *dev = crtc->dev;
d240f20f
JB
2125 struct drm_i915_private *dev_priv = dev->dev_private;
2126 u32 dpa_ctl;
2127
2bd2ad64
DV
2128 assert_pipe_disabled(dev_priv,
2129 to_intel_crtc(crtc)->pipe);
2130
d240f20f 2131 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2132 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2133 "dp pll off, should be on\n");
2134 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2135
2136 /* We can't rely on the value tracked for the DP register in
2137 * intel_dp->DP because link_down must not change that (otherwise link
2138 * re-training will fail. */
298b0b39 2139 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2140 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2141 POSTING_READ(DP_A);
d240f20f
JB
2142 udelay(200);
2143}
2144
c7ad3810 2145/* If the sink supports it, try to set the power state appropriately */
c19b0669 2146void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2147{
2148 int ret, i;
2149
2150 /* Should have a valid DPCD by this point */
2151 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2152 return;
2153
2154 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2155 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2156 DP_SET_POWER_D3);
c7ad3810
JB
2157 } else {
2158 /*
2159 * When turning on, we need to retry for 1ms to give the sink
2160 * time to wake up.
2161 */
2162 for (i = 0; i < 3; i++) {
9d1a1031
JN
2163 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2164 DP_SET_POWER_D0);
c7ad3810
JB
2165 if (ret == 1)
2166 break;
2167 msleep(1);
2168 }
2169 }
f9cac721
JN
2170
2171 if (ret != 1)
2172 DRM_DEBUG_KMS("failed to %s sink power state\n",
2173 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2174}
2175
19d8fe15
DV
2176static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2177 enum pipe *pipe)
d240f20f 2178{
19d8fe15 2179 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2180 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2181 struct drm_device *dev = encoder->base.dev;
2182 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2183 enum intel_display_power_domain power_domain;
2184 u32 tmp;
2185
2186 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2187 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2188 return false;
2189
2190 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2191
2192 if (!(tmp & DP_PORT_EN))
2193 return false;
2194
39e5fa88 2195 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2196 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2197 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2198 enum pipe p;
19d8fe15 2199
adc289d7
VS
2200 for_each_pipe(dev_priv, p) {
2201 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2202 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2203 *pipe = p;
19d8fe15
DV
2204 return true;
2205 }
2206 }
19d8fe15 2207
4a0833ec
DV
2208 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2209 intel_dp->output_reg);
39e5fa88
VS
2210 } else if (IS_CHERRYVIEW(dev)) {
2211 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2212 } else {
2213 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2214 }
d240f20f 2215
19d8fe15
DV
2216 return true;
2217}
d240f20f 2218
045ac3b5 2219static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2220 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2221{
2222 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2223 u32 tmp, flags = 0;
63000ef6
XZ
2224 struct drm_device *dev = encoder->base.dev;
2225 struct drm_i915_private *dev_priv = dev->dev_private;
2226 enum port port = dp_to_dig_port(intel_dp)->port;
2227 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2228 int dotclock;
045ac3b5 2229
9ed109a7 2230 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2231
2232 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2233
39e5fa88
VS
2234 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2235 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2236 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2237 flags |= DRM_MODE_FLAG_PHSYNC;
2238 else
2239 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2240
39e5fa88 2241 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2242 flags |= DRM_MODE_FLAG_PVSYNC;
2243 else
2244 flags |= DRM_MODE_FLAG_NVSYNC;
2245 } else {
39e5fa88 2246 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2247 flags |= DRM_MODE_FLAG_PHSYNC;
2248 else
2249 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2250
39e5fa88 2251 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2252 flags |= DRM_MODE_FLAG_PVSYNC;
2253 else
2254 flags |= DRM_MODE_FLAG_NVSYNC;
2255 }
045ac3b5 2256
2d112de7 2257 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2258
8c875fca
VS
2259 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2260 tmp & DP_COLOR_RANGE_16_235)
2261 pipe_config->limited_color_range = true;
2262
eb14cb74
VS
2263 pipe_config->has_dp_encoder = true;
2264
2265 intel_dp_get_m_n(crtc, pipe_config);
2266
18442d08 2267 if (port == PORT_A) {
f1f644dc
JB
2268 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2269 pipe_config->port_clock = 162000;
2270 else
2271 pipe_config->port_clock = 270000;
2272 }
18442d08
VS
2273
2274 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2275 &pipe_config->dp_m_n);
2276
2277 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2278 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2279
2d112de7 2280 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2281
c6cd2ee2
JN
2282 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2283 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2284 /*
2285 * This is a big fat ugly hack.
2286 *
2287 * Some machines in UEFI boot mode provide us a VBT that has 18
2288 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2289 * unknown we fail to light up. Yet the same BIOS boots up with
2290 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2291 * max, not what it tells us to use.
2292 *
2293 * Note: This will still be broken if the eDP panel is not lit
2294 * up by the BIOS, and thus we can't get the mode at module
2295 * load.
2296 */
2297 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2298 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2299 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2300 }
045ac3b5
JB
2301}
2302
e8cb4558 2303static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2304{
e8cb4558 2305 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2306 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2307 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2308
6e3c9717 2309 if (crtc->config->has_audio)
495a5bb8 2310 intel_audio_codec_disable(encoder);
6cb49835 2311
b32c6f48
RV
2312 if (HAS_PSR(dev) && !HAS_DDI(dev))
2313 intel_psr_disable(intel_dp);
2314
6cb49835
DV
2315 /* Make sure the panel is off before trying to change the mode. But also
2316 * ensure that we have vdd while we switch off the panel. */
24f3e092 2317 intel_edp_panel_vdd_on(intel_dp);
4be73780 2318 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2319 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2320 intel_edp_panel_off(intel_dp);
3739850b 2321
08aff3fe
VS
2322 /* disable the port before the pipe on g4x */
2323 if (INTEL_INFO(dev)->gen < 5)
3739850b 2324 intel_dp_link_down(intel_dp);
d240f20f
JB
2325}
2326
08aff3fe 2327static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2328{
2bd2ad64 2329 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2330 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2331
49277c31 2332 intel_dp_link_down(intel_dp);
08aff3fe
VS
2333 if (port == PORT_A)
2334 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2335}
2336
2337static void vlv_post_disable_dp(struct intel_encoder *encoder)
2338{
2339 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2340
2341 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2342}
2343
580d3811
VS
2344static void chv_post_disable_dp(struct intel_encoder *encoder)
2345{
2346 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2347 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2348 struct drm_device *dev = encoder->base.dev;
2349 struct drm_i915_private *dev_priv = dev->dev_private;
2350 struct intel_crtc *intel_crtc =
2351 to_intel_crtc(encoder->base.crtc);
2352 enum dpio_channel ch = vlv_dport_to_channel(dport);
2353 enum pipe pipe = intel_crtc->pipe;
2354 u32 val;
2355
2356 intel_dp_link_down(intel_dp);
2357
a580516d 2358 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2359
2360 /* Propagate soft reset to data lane reset */
97fd4d5c 2361 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2362 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2363 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2364
97fd4d5c
VS
2365 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2366 val |= CHV_PCS_REQ_SOFTRESET_EN;
2367 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2368
2369 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2370 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2371 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2372
2373 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2374 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2375 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811 2376
a580516d 2377 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2378}
2379
7b13b58a
VS
2380static void
2381_intel_dp_set_link_train(struct intel_dp *intel_dp,
2382 uint32_t *DP,
2383 uint8_t dp_train_pat)
2384{
2385 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2386 struct drm_device *dev = intel_dig_port->base.base.dev;
2387 struct drm_i915_private *dev_priv = dev->dev_private;
2388 enum port port = intel_dig_port->port;
2389
2390 if (HAS_DDI(dev)) {
2391 uint32_t temp = I915_READ(DP_TP_CTL(port));
2392
2393 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2394 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2395 else
2396 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2397
2398 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2399 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2400 case DP_TRAINING_PATTERN_DISABLE:
2401 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2402
2403 break;
2404 case DP_TRAINING_PATTERN_1:
2405 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2406 break;
2407 case DP_TRAINING_PATTERN_2:
2408 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2409 break;
2410 case DP_TRAINING_PATTERN_3:
2411 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2412 break;
2413 }
2414 I915_WRITE(DP_TP_CTL(port), temp);
2415
39e5fa88
VS
2416 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2417 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2418 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2419
2420 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2421 case DP_TRAINING_PATTERN_DISABLE:
2422 *DP |= DP_LINK_TRAIN_OFF_CPT;
2423 break;
2424 case DP_TRAINING_PATTERN_1:
2425 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2426 break;
2427 case DP_TRAINING_PATTERN_2:
2428 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2429 break;
2430 case DP_TRAINING_PATTERN_3:
2431 DRM_ERROR("DP training pattern 3 not supported\n");
2432 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2433 break;
2434 }
2435
2436 } else {
2437 if (IS_CHERRYVIEW(dev))
2438 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2439 else
2440 *DP &= ~DP_LINK_TRAIN_MASK;
2441
2442 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2443 case DP_TRAINING_PATTERN_DISABLE:
2444 *DP |= DP_LINK_TRAIN_OFF;
2445 break;
2446 case DP_TRAINING_PATTERN_1:
2447 *DP |= DP_LINK_TRAIN_PAT_1;
2448 break;
2449 case DP_TRAINING_PATTERN_2:
2450 *DP |= DP_LINK_TRAIN_PAT_2;
2451 break;
2452 case DP_TRAINING_PATTERN_3:
2453 if (IS_CHERRYVIEW(dev)) {
2454 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2455 } else {
2456 DRM_ERROR("DP training pattern 3 not supported\n");
2457 *DP |= DP_LINK_TRAIN_PAT_2;
2458 }
2459 break;
2460 }
2461 }
2462}
2463
2464static void intel_dp_enable_port(struct intel_dp *intel_dp)
2465{
2466 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2467 struct drm_i915_private *dev_priv = dev->dev_private;
2468
7b13b58a
VS
2469 /* enable with pattern 1 (as per spec) */
2470 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2471 DP_TRAINING_PATTERN_1);
2472
2473 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2474 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2475
2476 /*
2477 * Magic for VLV/CHV. We _must_ first set up the register
2478 * without actually enabling the port, and then do another
2479 * write to enable the port. Otherwise link training will
2480 * fail when the power sequencer is freshly used for this port.
2481 */
2482 intel_dp->DP |= DP_PORT_EN;
2483
2484 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2485 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2486}
2487
e8cb4558 2488static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2489{
e8cb4558
DV
2490 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2491 struct drm_device *dev = encoder->base.dev;
2492 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2493 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2494 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2495 unsigned int lane_mask = 0x0;
5d613501 2496
0c33d8d7
DV
2497 if (WARN_ON(dp_reg & DP_PORT_EN))
2498 return;
5d613501 2499
093e3f13
VS
2500 pps_lock(intel_dp);
2501
2502 if (IS_VALLEYVIEW(dev))
2503 vlv_init_panel_power_sequencer(intel_dp);
2504
7b13b58a 2505 intel_dp_enable_port(intel_dp);
093e3f13
VS
2506
2507 edp_panel_vdd_on(intel_dp);
2508 edp_panel_on(intel_dp);
2509 edp_panel_vdd_off(intel_dp, true);
2510
2511 pps_unlock(intel_dp);
2512
61234fa5 2513 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2514 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2515 lane_mask);
61234fa5 2516
f01eca2e 2517 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2518 intel_dp_start_link_train(intel_dp);
33a34e4e 2519 intel_dp_complete_link_train(intel_dp);
3ab9c637 2520 intel_dp_stop_link_train(intel_dp);
c1dec79a 2521
6e3c9717 2522 if (crtc->config->has_audio) {
c1dec79a
JN
2523 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2524 pipe_name(crtc->pipe));
2525 intel_audio_codec_enable(encoder);
2526 }
ab1f90f9 2527}
89b667f8 2528
ecff4f3b
JN
2529static void g4x_enable_dp(struct intel_encoder *encoder)
2530{
828f5c6e
JN
2531 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2532
ecff4f3b 2533 intel_enable_dp(encoder);
4be73780 2534 intel_edp_backlight_on(intel_dp);
ab1f90f9 2535}
89b667f8 2536
ab1f90f9
JN
2537static void vlv_enable_dp(struct intel_encoder *encoder)
2538{
828f5c6e
JN
2539 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2540
4be73780 2541 intel_edp_backlight_on(intel_dp);
b32c6f48 2542 intel_psr_enable(intel_dp);
d240f20f
JB
2543}
2544
ecff4f3b 2545static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2546{
2547 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2548 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2549
8ac33ed3
DV
2550 intel_dp_prepare(encoder);
2551
d41f1efb
DV
2552 /* Only ilk+ has port A */
2553 if (dport->port == PORT_A) {
2554 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2555 ironlake_edp_pll_on(intel_dp);
d41f1efb 2556 }
ab1f90f9
JN
2557}
2558
83b84597
VS
2559static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2560{
2561 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2562 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2563 enum pipe pipe = intel_dp->pps_pipe;
2564 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2565
2566 edp_panel_vdd_off_sync(intel_dp);
2567
2568 /*
2569 * VLV seems to get confused when multiple power seqeuencers
2570 * have the same port selected (even if only one has power/vdd
2571 * enabled). The failure manifests as vlv_wait_port_ready() failing
2572 * CHV on the other hand doesn't seem to mind having the same port
2573 * selected in multiple power seqeuencers, but let's clear the
2574 * port select always when logically disconnecting a power sequencer
2575 * from a port.
2576 */
2577 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2578 pipe_name(pipe), port_name(intel_dig_port->port));
2579 I915_WRITE(pp_on_reg, 0);
2580 POSTING_READ(pp_on_reg);
2581
2582 intel_dp->pps_pipe = INVALID_PIPE;
2583}
2584
a4a5d2f8
VS
2585static void vlv_steal_power_sequencer(struct drm_device *dev,
2586 enum pipe pipe)
2587{
2588 struct drm_i915_private *dev_priv = dev->dev_private;
2589 struct intel_encoder *encoder;
2590
2591 lockdep_assert_held(&dev_priv->pps_mutex);
2592
ac3c12e4
VS
2593 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2594 return;
2595
a4a5d2f8
VS
2596 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2597 base.head) {
2598 struct intel_dp *intel_dp;
773538e8 2599 enum port port;
a4a5d2f8
VS
2600
2601 if (encoder->type != INTEL_OUTPUT_EDP)
2602 continue;
2603
2604 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2605 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2606
2607 if (intel_dp->pps_pipe != pipe)
2608 continue;
2609
2610 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2611 pipe_name(pipe), port_name(port));
a4a5d2f8 2612
034e43c6
VS
2613 WARN(encoder->connectors_active,
2614 "stealing pipe %c power sequencer from active eDP port %c\n",
2615 pipe_name(pipe), port_name(port));
a4a5d2f8 2616
a4a5d2f8 2617 /* make sure vdd is off before we steal it */
83b84597 2618 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2619 }
2620}
2621
2622static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2623{
2624 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2625 struct intel_encoder *encoder = &intel_dig_port->base;
2626 struct drm_device *dev = encoder->base.dev;
2627 struct drm_i915_private *dev_priv = dev->dev_private;
2628 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2629
2630 lockdep_assert_held(&dev_priv->pps_mutex);
2631
093e3f13
VS
2632 if (!is_edp(intel_dp))
2633 return;
2634
a4a5d2f8
VS
2635 if (intel_dp->pps_pipe == crtc->pipe)
2636 return;
2637
2638 /*
2639 * If another power sequencer was being used on this
2640 * port previously make sure to turn off vdd there while
2641 * we still have control of it.
2642 */
2643 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2644 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2645
2646 /*
2647 * We may be stealing the power
2648 * sequencer from another port.
2649 */
2650 vlv_steal_power_sequencer(dev, crtc->pipe);
2651
2652 /* now it's all ours */
2653 intel_dp->pps_pipe = crtc->pipe;
2654
2655 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2656 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2657
2658 /* init power sequencer on this pipe and port */
36b5f425
VS
2659 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2660 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2661}
2662
ab1f90f9 2663static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2664{
2bd2ad64 2665 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2666 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2667 struct drm_device *dev = encoder->base.dev;
89b667f8 2668 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2669 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2670 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2671 int pipe = intel_crtc->pipe;
2672 u32 val;
a4fc5ed6 2673
a580516d 2674 mutex_lock(&dev_priv->sb_lock);
89b667f8 2675
ab3c759a 2676 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2677 val = 0;
2678 if (pipe)
2679 val |= (1<<21);
2680 else
2681 val &= ~(1<<21);
2682 val |= 0x001000c4;
ab3c759a
CML
2683 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2684 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2685 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2686
a580516d 2687 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2688
2689 intel_enable_dp(encoder);
89b667f8
JB
2690}
2691
ecff4f3b 2692static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2693{
2694 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2695 struct drm_device *dev = encoder->base.dev;
2696 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2697 struct intel_crtc *intel_crtc =
2698 to_intel_crtc(encoder->base.crtc);
e4607fcf 2699 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2700 int pipe = intel_crtc->pipe;
89b667f8 2701
8ac33ed3
DV
2702 intel_dp_prepare(encoder);
2703
89b667f8 2704 /* Program Tx lane resets to default */
a580516d 2705 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2706 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2707 DPIO_PCS_TX_LANE2_RESET |
2708 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2709 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2710 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2711 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2712 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2713 DPIO_PCS_CLK_SOFT_RESET);
2714
2715 /* Fix up inter-pair skew failure */
ab3c759a
CML
2716 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2717 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2718 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2719 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2720}
2721
e4a1d846
CML
2722static void chv_pre_enable_dp(struct intel_encoder *encoder)
2723{
2724 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2725 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2726 struct drm_device *dev = encoder->base.dev;
2727 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2728 struct intel_crtc *intel_crtc =
2729 to_intel_crtc(encoder->base.crtc);
2730 enum dpio_channel ch = vlv_dport_to_channel(dport);
2731 int pipe = intel_crtc->pipe;
2e523e98 2732 int data, i, stagger;
949c1d43 2733 u32 val;
e4a1d846 2734
a580516d 2735 mutex_lock(&dev_priv->sb_lock);
949c1d43 2736
570e2a74
VS
2737 /* allow hardware to manage TX FIFO reset source */
2738 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2739 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2740 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2741
2742 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2743 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2744 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2745
949c1d43 2746 /* Deassert soft data lane reset*/
97fd4d5c 2747 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2748 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2749 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2750
2751 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2752 val |= CHV_PCS_REQ_SOFTRESET_EN;
2753 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2754
2755 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2756 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2757 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2758
97fd4d5c 2759 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2760 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2761 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2762
2763 /* Program Tx lane latency optimal setting*/
e4a1d846 2764 for (i = 0; i < 4; i++) {
e4a1d846
CML
2765 /* Set the upar bit */
2766 data = (i == 1) ? 0x0 : 0x1;
2767 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2768 data << DPIO_UPAR_SHIFT);
2769 }
2770
2771 /* Data lane stagger programming */
2e523e98
VS
2772 if (intel_crtc->config->port_clock > 270000)
2773 stagger = 0x18;
2774 else if (intel_crtc->config->port_clock > 135000)
2775 stagger = 0xd;
2776 else if (intel_crtc->config->port_clock > 67500)
2777 stagger = 0x7;
2778 else if (intel_crtc->config->port_clock > 33750)
2779 stagger = 0x4;
2780 else
2781 stagger = 0x2;
2782
2783 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2784 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2785 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2786
2787 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2788 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2789 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2790
2791 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2792 DPIO_LANESTAGGER_STRAP(stagger) |
2793 DPIO_LANESTAGGER_STRAP_OVRD |
2794 DPIO_TX1_STAGGER_MASK(0x1f) |
2795 DPIO_TX1_STAGGER_MULT(6) |
2796 DPIO_TX2_STAGGER_MULT(0));
2797
2798 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2799 DPIO_LANESTAGGER_STRAP(stagger) |
2800 DPIO_LANESTAGGER_STRAP_OVRD |
2801 DPIO_TX1_STAGGER_MASK(0x1f) |
2802 DPIO_TX1_STAGGER_MULT(7) |
2803 DPIO_TX2_STAGGER_MULT(5));
e4a1d846 2804
a580516d 2805 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2806
e4a1d846 2807 intel_enable_dp(encoder);
e4a1d846
CML
2808}
2809
9197c88b
VS
2810static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2811{
2812 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2813 struct drm_device *dev = encoder->base.dev;
2814 struct drm_i915_private *dev_priv = dev->dev_private;
2815 struct intel_crtc *intel_crtc =
2816 to_intel_crtc(encoder->base.crtc);
2817 enum dpio_channel ch = vlv_dport_to_channel(dport);
2818 enum pipe pipe = intel_crtc->pipe;
2819 u32 val;
2820
625695f8
VS
2821 intel_dp_prepare(encoder);
2822
a580516d 2823 mutex_lock(&dev_priv->sb_lock);
9197c88b 2824
b9e5ac3c
VS
2825 /* program left/right clock distribution */
2826 if (pipe != PIPE_B) {
2827 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2828 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2829 if (ch == DPIO_CH0)
2830 val |= CHV_BUFLEFTENA1_FORCE;
2831 if (ch == DPIO_CH1)
2832 val |= CHV_BUFRIGHTENA1_FORCE;
2833 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2834 } else {
2835 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2836 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2837 if (ch == DPIO_CH0)
2838 val |= CHV_BUFLEFTENA2_FORCE;
2839 if (ch == DPIO_CH1)
2840 val |= CHV_BUFRIGHTENA2_FORCE;
2841 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2842 }
2843
9197c88b
VS
2844 /* program clock channel usage */
2845 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2846 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2847 if (pipe != PIPE_B)
2848 val &= ~CHV_PCS_USEDCLKCHANNEL;
2849 else
2850 val |= CHV_PCS_USEDCLKCHANNEL;
2851 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2852
2853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2854 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2855 if (pipe != PIPE_B)
2856 val &= ~CHV_PCS_USEDCLKCHANNEL;
2857 else
2858 val |= CHV_PCS_USEDCLKCHANNEL;
2859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2860
2861 /*
2862 * This a a bit weird since generally CL
2863 * matches the pipe, but here we need to
2864 * pick the CL based on the port.
2865 */
2866 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2867 if (pipe != PIPE_B)
2868 val &= ~CHV_CMN_USEDCLKCHANNEL;
2869 else
2870 val |= CHV_CMN_USEDCLKCHANNEL;
2871 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2872
a580516d 2873 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2874}
2875
a4fc5ed6 2876/*
df0c237d
JB
2877 * Native read with retry for link status and receiver capability reads for
2878 * cases where the sink may still be asleep.
9d1a1031
JN
2879 *
2880 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2881 * supposed to retry 3 times per the spec.
a4fc5ed6 2882 */
9d1a1031
JN
2883static ssize_t
2884intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2885 void *buffer, size_t size)
a4fc5ed6 2886{
9d1a1031
JN
2887 ssize_t ret;
2888 int i;
61da5fab 2889
f6a19066
VS
2890 /*
2891 * Sometime we just get the same incorrect byte repeated
2892 * over the entire buffer. Doing just one throw away read
2893 * initially seems to "solve" it.
2894 */
2895 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2896
61da5fab 2897 for (i = 0; i < 3; i++) {
9d1a1031
JN
2898 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2899 if (ret == size)
2900 return ret;
61da5fab
JB
2901 msleep(1);
2902 }
a4fc5ed6 2903
9d1a1031 2904 return ret;
a4fc5ed6
KP
2905}
2906
2907/*
2908 * Fetch AUX CH registers 0x202 - 0x207 which contain
2909 * link status information
2910 */
2911static bool
93f62dad 2912intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2913{
9d1a1031
JN
2914 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2915 DP_LANE0_1_STATUS,
2916 link_status,
2917 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2918}
2919
1100244e 2920/* These are source-specific values. */
a4fc5ed6 2921static uint8_t
1a2eb460 2922intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2923{
30add22d 2924 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2925 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2926 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2927
9314726b
VK
2928 if (IS_BROXTON(dev))
2929 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2930 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 2931 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 2932 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2933 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2934 } else if (IS_VALLEYVIEW(dev))
bd60018a 2935 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2936 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2937 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2938 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2939 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2940 else
bd60018a 2941 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2942}
2943
2944static uint8_t
2945intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2946{
30add22d 2947 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2948 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2949
5a9d1f1a
DL
2950 if (INTEL_INFO(dev)->gen >= 9) {
2951 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2953 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2954 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2955 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2956 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2957 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2958 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2959 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2960 default:
2961 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2962 }
2963 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2964 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2965 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2966 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2967 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2968 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2969 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2970 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2971 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2972 default:
bd60018a 2973 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2974 }
e2fa6fba
P
2975 } else if (IS_VALLEYVIEW(dev)) {
2976 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2978 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2980 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2982 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2984 default:
bd60018a 2985 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2986 }
bc7d38a4 2987 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2988 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2989 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2990 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2991 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2992 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2993 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2994 default:
bd60018a 2995 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2996 }
2997 } else {
2998 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2999 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3000 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3001 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3002 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3004 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3005 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3006 default:
bd60018a 3007 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3008 }
a4fc5ed6
KP
3009 }
3010}
3011
5829975c 3012static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3013{
3014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3015 struct drm_i915_private *dev_priv = dev->dev_private;
3016 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3017 struct intel_crtc *intel_crtc =
3018 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3019 unsigned long demph_reg_value, preemph_reg_value,
3020 uniqtranscale_reg_value;
3021 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3022 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3023 int pipe = intel_crtc->pipe;
e2fa6fba
P
3024
3025 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3026 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3027 preemph_reg_value = 0x0004000;
3028 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3029 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3030 demph_reg_value = 0x2B405555;
3031 uniqtranscale_reg_value = 0x552AB83A;
3032 break;
bd60018a 3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3034 demph_reg_value = 0x2B404040;
3035 uniqtranscale_reg_value = 0x5548B83A;
3036 break;
bd60018a 3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3038 demph_reg_value = 0x2B245555;
3039 uniqtranscale_reg_value = 0x5560B83A;
3040 break;
bd60018a 3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3042 demph_reg_value = 0x2B405555;
3043 uniqtranscale_reg_value = 0x5598DA3A;
3044 break;
3045 default:
3046 return 0;
3047 }
3048 break;
bd60018a 3049 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3050 preemph_reg_value = 0x0002000;
3051 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3052 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3053 demph_reg_value = 0x2B404040;
3054 uniqtranscale_reg_value = 0x5552B83A;
3055 break;
bd60018a 3056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3057 demph_reg_value = 0x2B404848;
3058 uniqtranscale_reg_value = 0x5580B83A;
3059 break;
bd60018a 3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3061 demph_reg_value = 0x2B404040;
3062 uniqtranscale_reg_value = 0x55ADDA3A;
3063 break;
3064 default:
3065 return 0;
3066 }
3067 break;
bd60018a 3068 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3069 preemph_reg_value = 0x0000000;
3070 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3071 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3072 demph_reg_value = 0x2B305555;
3073 uniqtranscale_reg_value = 0x5570B83A;
3074 break;
bd60018a 3075 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3076 demph_reg_value = 0x2B2B4040;
3077 uniqtranscale_reg_value = 0x55ADDA3A;
3078 break;
3079 default:
3080 return 0;
3081 }
3082 break;
bd60018a 3083 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3084 preemph_reg_value = 0x0006000;
3085 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3086 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3087 demph_reg_value = 0x1B405555;
3088 uniqtranscale_reg_value = 0x55ADDA3A;
3089 break;
3090 default:
3091 return 0;
3092 }
3093 break;
3094 default:
3095 return 0;
3096 }
3097
a580516d 3098 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3099 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3100 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3101 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3102 uniqtranscale_reg_value);
ab3c759a
CML
3103 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3104 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3105 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3106 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3107 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3108
3109 return 0;
3110}
3111
5829975c 3112static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3113{
3114 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3115 struct drm_i915_private *dev_priv = dev->dev_private;
3116 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3117 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3118 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3119 uint8_t train_set = intel_dp->train_set[0];
3120 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3121 enum pipe pipe = intel_crtc->pipe;
3122 int i;
e4a1d846
CML
3123
3124 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3125 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3126 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3128 deemph_reg_value = 128;
3129 margin_reg_value = 52;
3130 break;
bd60018a 3131 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3132 deemph_reg_value = 128;
3133 margin_reg_value = 77;
3134 break;
bd60018a 3135 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3136 deemph_reg_value = 128;
3137 margin_reg_value = 102;
3138 break;
bd60018a 3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3140 deemph_reg_value = 128;
3141 margin_reg_value = 154;
3142 /* FIXME extra to set for 1200 */
3143 break;
3144 default:
3145 return 0;
3146 }
3147 break;
bd60018a 3148 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3149 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3150 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3151 deemph_reg_value = 85;
3152 margin_reg_value = 78;
3153 break;
bd60018a 3154 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3155 deemph_reg_value = 85;
3156 margin_reg_value = 116;
3157 break;
bd60018a 3158 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3159 deemph_reg_value = 85;
3160 margin_reg_value = 154;
3161 break;
3162 default:
3163 return 0;
3164 }
3165 break;
bd60018a 3166 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3167 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3169 deemph_reg_value = 64;
3170 margin_reg_value = 104;
3171 break;
bd60018a 3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3173 deemph_reg_value = 64;
3174 margin_reg_value = 154;
3175 break;
3176 default:
3177 return 0;
3178 }
3179 break;
bd60018a 3180 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3181 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3182 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3183 deemph_reg_value = 43;
3184 margin_reg_value = 154;
3185 break;
3186 default:
3187 return 0;
3188 }
3189 break;
3190 default:
3191 return 0;
3192 }
3193
a580516d 3194 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3195
3196 /* Clear calc init */
1966e59e
VS
3197 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3198 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3199 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3200 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3201 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3202
3203 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3204 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3205 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3206 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3207 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3208
a02ef3c7
VS
3209 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3210 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3211 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3212 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3213
3214 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3215 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3216 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3217 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3218
e4a1d846 3219 /* Program swing deemph */
f72df8db
VS
3220 for (i = 0; i < 4; i++) {
3221 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3222 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3223 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3224 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3225 }
e4a1d846
CML
3226
3227 /* Program swing margin */
f72df8db
VS
3228 for (i = 0; i < 4; i++) {
3229 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3230 val &= ~DPIO_SWING_MARGIN000_MASK;
3231 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3232 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3233 }
e4a1d846
CML
3234
3235 /* Disable unique transition scale */
f72df8db
VS
3236 for (i = 0; i < 4; i++) {
3237 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3238 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3239 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3240 }
e4a1d846
CML
3241
3242 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3243 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3244 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3245 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3246
3247 /*
3248 * The document said it needs to set bit 27 for ch0 and bit 26
3249 * for ch1. Might be a typo in the doc.
3250 * For now, for this unique transition scale selection, set bit
3251 * 27 for ch0 and ch1.
3252 */
f72df8db
VS
3253 for (i = 0; i < 4; i++) {
3254 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3255 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3256 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3257 }
e4a1d846 3258
f72df8db
VS
3259 for (i = 0; i < 4; i++) {
3260 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3261 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3262 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3263 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3264 }
e4a1d846
CML
3265 }
3266
3267 /* Start swing calculation */
1966e59e
VS
3268 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3269 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3270 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3271
3272 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3273 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3274 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3275
3276 /* LRC Bypass */
3277 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3278 val |= DPIO_LRC_BYPASS;
3279 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3280
a580516d 3281 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3282
3283 return 0;
3284}
3285
a4fc5ed6 3286static void
0301b3ac
JN
3287intel_get_adjust_train(struct intel_dp *intel_dp,
3288 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3289{
3290 uint8_t v = 0;
3291 uint8_t p = 0;
3292 int lane;
1a2eb460
KP
3293 uint8_t voltage_max;
3294 uint8_t preemph_max;
a4fc5ed6 3295
33a34e4e 3296 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3297 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3298 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3299
3300 if (this_v > v)
3301 v = this_v;
3302 if (this_p > p)
3303 p = this_p;
3304 }
3305
1a2eb460 3306 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3307 if (v >= voltage_max)
3308 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3309
1a2eb460
KP
3310 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3311 if (p >= preemph_max)
3312 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3313
3314 for (lane = 0; lane < 4; lane++)
33a34e4e 3315 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3316}
3317
3318static uint32_t
5829975c 3319gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3320{
3cf2efb1 3321 uint32_t signal_levels = 0;
a4fc5ed6 3322
3cf2efb1 3323 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3325 default:
3326 signal_levels |= DP_VOLTAGE_0_4;
3327 break;
bd60018a 3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3329 signal_levels |= DP_VOLTAGE_0_6;
3330 break;
bd60018a 3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3332 signal_levels |= DP_VOLTAGE_0_8;
3333 break;
bd60018a 3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3335 signal_levels |= DP_VOLTAGE_1_2;
3336 break;
3337 }
3cf2efb1 3338 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3339 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3340 default:
3341 signal_levels |= DP_PRE_EMPHASIS_0;
3342 break;
bd60018a 3343 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3344 signal_levels |= DP_PRE_EMPHASIS_3_5;
3345 break;
bd60018a 3346 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3347 signal_levels |= DP_PRE_EMPHASIS_6;
3348 break;
bd60018a 3349 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3350 signal_levels |= DP_PRE_EMPHASIS_9_5;
3351 break;
3352 }
3353 return signal_levels;
3354}
3355
e3421a18
ZW
3356/* Gen6's DP voltage swing and pre-emphasis control */
3357static uint32_t
5829975c 3358gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3359{
3c5a62b5
YL
3360 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3361 DP_TRAIN_PRE_EMPHASIS_MASK);
3362 switch (signal_levels) {
bd60018a
SJ
3363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3364 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3365 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3367 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3370 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3373 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3375 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3376 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3377 default:
3c5a62b5
YL
3378 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3379 "0x%x\n", signal_levels);
3380 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3381 }
3382}
3383
1a2eb460
KP
3384/* Gen7's DP voltage swing and pre-emphasis control */
3385static uint32_t
5829975c 3386gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3387{
3388 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3389 DP_TRAIN_PRE_EMPHASIS_MASK);
3390 switch (signal_levels) {
bd60018a 3391 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3392 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3393 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3394 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3395 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3396 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3397
bd60018a 3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3399 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3401 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3402
bd60018a 3403 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3404 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3406 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3407
3408 default:
3409 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3410 "0x%x\n", signal_levels);
3411 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3412 }
3413}
3414
d6c0d722
PZ
3415/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3416static uint32_t
5829975c 3417hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3418{
d6c0d722
PZ
3419 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3420 DP_TRAIN_PRE_EMPHASIS_MASK);
3421 switch (signal_levels) {
bd60018a 3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3423 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3425 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3426 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3427 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3428 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3429 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3430
bd60018a 3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3432 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3434 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3435 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3436 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3437
bd60018a 3438 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3439 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3441 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3442
3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3444 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3445 default:
3446 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3447 "0x%x\n", signal_levels);
c5fe6a06 3448 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3449 }
a4fc5ed6
KP
3450}
3451
5829975c 3452static void bxt_signal_levels(struct intel_dp *intel_dp)
96fb9f9b
VK
3453{
3454 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3455 enum port port = dport->port;
3456 struct drm_device *dev = dport->base.base.dev;
3457 struct intel_encoder *encoder = &dport->base;
3458 uint8_t train_set = intel_dp->train_set[0];
3459 uint32_t level = 0;
3460
3461 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3462 DP_TRAIN_PRE_EMPHASIS_MASK);
3463 switch (signal_levels) {
3464 default:
3465 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3467 level = 0;
3468 break;
3469 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3470 level = 1;
3471 break;
3472 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3473 level = 2;
3474 break;
3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3476 level = 3;
3477 break;
3478 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3479 level = 4;
3480 break;
3481 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3482 level = 5;
3483 break;
3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3485 level = 6;
3486 break;
3487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3488 level = 7;
3489 break;
3490 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3491 level = 8;
3492 break;
3493 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3494 level = 9;
3495 break;
3496 }
3497
3498 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3499}
3500
f0a3424e
PZ
3501/* Properly updates "DP" with the correct signal levels. */
3502static void
3503intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3504{
3505 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3506 enum port port = intel_dig_port->port;
f0a3424e
PZ
3507 struct drm_device *dev = intel_dig_port->base.base.dev;
3508 uint32_t signal_levels, mask;
3509 uint8_t train_set = intel_dp->train_set[0];
3510
96fb9f9b
VK
3511 if (IS_BROXTON(dev)) {
3512 signal_levels = 0;
5829975c 3513 bxt_signal_levels(intel_dp);
96fb9f9b
VK
3514 mask = 0;
3515 } else if (HAS_DDI(dev)) {
5829975c 3516 signal_levels = hsw_signal_levels(train_set);
f0a3424e 3517 mask = DDI_BUF_EMP_MASK;
e4a1d846 3518 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3519 signal_levels = chv_signal_levels(intel_dp);
e4a1d846 3520 mask = 0;
e2fa6fba 3521 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3522 signal_levels = vlv_signal_levels(intel_dp);
e2fa6fba 3523 mask = 0;
bc7d38a4 3524 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3525 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3526 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3527 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3528 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3529 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3530 } else {
5829975c 3531 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3532 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3533 }
3534
96fb9f9b
VK
3535 if (mask)
3536 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3537
3538 DRM_DEBUG_KMS("Using vswing level %d\n",
3539 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3540 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3541 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3542 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3543
3544 *DP = (*DP & ~mask) | signal_levels;
3545}
3546
a4fc5ed6 3547static bool
ea5b213a 3548intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3549 uint32_t *DP,
58e10eb9 3550 uint8_t dp_train_pat)
a4fc5ed6 3551{
174edf1f
PZ
3552 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3553 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3554 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3555 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3556 int ret, len;
a4fc5ed6 3557
7b13b58a 3558 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3559
70aff66c 3560 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3561 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3562
2cdfe6c8
JN
3563 buf[0] = dp_train_pat;
3564 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3565 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3566 /* don't write DP_TRAINING_LANEx_SET on disable */
3567 len = 1;
3568 } else {
3569 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3570 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3571 len = intel_dp->lane_count + 1;
47ea7542 3572 }
a4fc5ed6 3573
9d1a1031
JN
3574 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3575 buf, len);
2cdfe6c8
JN
3576
3577 return ret == len;
a4fc5ed6
KP
3578}
3579
70aff66c
JN
3580static bool
3581intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3582 uint8_t dp_train_pat)
3583{
4e96c977
MK
3584 if (!intel_dp->train_set_valid)
3585 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3586 intel_dp_set_signal_levels(intel_dp, DP);
3587 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3588}
3589
3590static bool
3591intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3592 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3593{
3594 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3595 struct drm_device *dev = intel_dig_port->base.base.dev;
3596 struct drm_i915_private *dev_priv = dev->dev_private;
3597 int ret;
3598
3599 intel_get_adjust_train(intel_dp, link_status);
3600 intel_dp_set_signal_levels(intel_dp, DP);
3601
3602 I915_WRITE(intel_dp->output_reg, *DP);
3603 POSTING_READ(intel_dp->output_reg);
3604
9d1a1031
JN
3605 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3606 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3607
3608 return ret == intel_dp->lane_count;
3609}
3610
3ab9c637
ID
3611static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3612{
3613 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3614 struct drm_device *dev = intel_dig_port->base.base.dev;
3615 struct drm_i915_private *dev_priv = dev->dev_private;
3616 enum port port = intel_dig_port->port;
3617 uint32_t val;
3618
3619 if (!HAS_DDI(dev))
3620 return;
3621
3622 val = I915_READ(DP_TP_CTL(port));
3623 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3624 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3625 I915_WRITE(DP_TP_CTL(port), val);
3626
3627 /*
3628 * On PORT_A we can have only eDP in SST mode. There the only reason
3629 * we need to set idle transmission mode is to work around a HW issue
3630 * where we enable the pipe while not in idle link-training mode.
3631 * In this case there is requirement to wait for a minimum number of
3632 * idle patterns to be sent.
3633 */
3634 if (port == PORT_A)
3635 return;
3636
3637 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3638 1))
3639 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3640}
3641
33a34e4e 3642/* Enable corresponding port and start training pattern 1 */
c19b0669 3643void
33a34e4e 3644intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3645{
da63a9f2 3646 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3647 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3648 int i;
3649 uint8_t voltage;
cdb0e95b 3650 int voltage_tries, loop_tries;
ea5b213a 3651 uint32_t DP = intel_dp->DP;
6aba5b6c 3652 uint8_t link_config[2];
a4fc5ed6 3653
affa9354 3654 if (HAS_DDI(dev))
c19b0669
PZ
3655 intel_ddi_prepare_link_retrain(encoder);
3656
3cf2efb1 3657 /* Write the link configuration data */
6aba5b6c
JN
3658 link_config[0] = intel_dp->link_bw;
3659 link_config[1] = intel_dp->lane_count;
3660 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3661 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3662 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3663 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3664 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3665 &intel_dp->rate_select, 1);
6aba5b6c
JN
3666
3667 link_config[0] = 0;
3668 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3669 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3670
3671 DP |= DP_PORT_EN;
1a2eb460 3672
70aff66c
JN
3673 /* clock recovery */
3674 if (!intel_dp_reset_link_train(intel_dp, &DP,
3675 DP_TRAINING_PATTERN_1 |
3676 DP_LINK_SCRAMBLING_DISABLE)) {
3677 DRM_ERROR("failed to enable link training\n");
3678 return;
3679 }
3680
a4fc5ed6 3681 voltage = 0xff;
cdb0e95b
KP
3682 voltage_tries = 0;
3683 loop_tries = 0;
a4fc5ed6 3684 for (;;) {
70aff66c 3685 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3686
a7c9655f 3687 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3688 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3689 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3690 break;
93f62dad 3691 }
a4fc5ed6 3692
01916270 3693 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3694 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3695 break;
3696 }
3697
4e96c977
MK
3698 /*
3699 * if we used previously trained voltage and pre-emphasis values
3700 * and we don't get clock recovery, reset link training values
3701 */
3702 if (intel_dp->train_set_valid) {
3703 DRM_DEBUG_KMS("clock recovery not ok, reset");
3704 /* clear the flag as we are not reusing train set */
3705 intel_dp->train_set_valid = false;
3706 if (!intel_dp_reset_link_train(intel_dp, &DP,
3707 DP_TRAINING_PATTERN_1 |
3708 DP_LINK_SCRAMBLING_DISABLE)) {
3709 DRM_ERROR("failed to enable link training\n");
3710 return;
3711 }
3712 continue;
3713 }
3714
3cf2efb1
CW
3715 /* Check to see if we've tried the max voltage */
3716 for (i = 0; i < intel_dp->lane_count; i++)
3717 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3718 break;
3b4f819d 3719 if (i == intel_dp->lane_count) {
b06fbda3
DV
3720 ++loop_tries;
3721 if (loop_tries == 5) {
3def84b3 3722 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3723 break;
3724 }
70aff66c
JN
3725 intel_dp_reset_link_train(intel_dp, &DP,
3726 DP_TRAINING_PATTERN_1 |
3727 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3728 voltage_tries = 0;
3729 continue;
3730 }
a4fc5ed6 3731
3cf2efb1 3732 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3733 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3734 ++voltage_tries;
b06fbda3 3735 if (voltage_tries == 5) {
3def84b3 3736 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3737 break;
3738 }
3739 } else
3740 voltage_tries = 0;
3741 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3742
70aff66c
JN
3743 /* Update training set as requested by target */
3744 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3745 DRM_ERROR("failed to update link training\n");
3746 break;
3747 }
a4fc5ed6
KP
3748 }
3749
33a34e4e
JB
3750 intel_dp->DP = DP;
3751}
3752
c19b0669 3753void
33a34e4e
JB
3754intel_dp_complete_link_train(struct intel_dp *intel_dp)
3755{
33a34e4e 3756 bool channel_eq = false;
37f80975 3757 int tries, cr_tries;
33a34e4e 3758 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3759 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3760
3761 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3762 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3763 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3764
a4fc5ed6 3765 /* channel equalization */
70aff66c 3766 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3767 training_pattern |
70aff66c
JN
3768 DP_LINK_SCRAMBLING_DISABLE)) {
3769 DRM_ERROR("failed to start channel equalization\n");
3770 return;
3771 }
3772
a4fc5ed6 3773 tries = 0;
37f80975 3774 cr_tries = 0;
a4fc5ed6
KP
3775 channel_eq = false;
3776 for (;;) {
70aff66c 3777 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3778
37f80975
JB
3779 if (cr_tries > 5) {
3780 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3781 break;
3782 }
3783
a7c9655f 3784 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3785 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3786 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3787 break;
70aff66c 3788 }
a4fc5ed6 3789
37f80975 3790 /* Make sure clock is still ok */
01916270 3791 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
4e96c977 3792 intel_dp->train_set_valid = false;
37f80975 3793 intel_dp_start_link_train(intel_dp);
70aff66c 3794 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3795 training_pattern |
70aff66c 3796 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3797 cr_tries++;
3798 continue;
3799 }
3800
1ffdff13 3801 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3802 channel_eq = true;
3803 break;
3804 }
a4fc5ed6 3805
37f80975
JB
3806 /* Try 5 times, then try clock recovery if that fails */
3807 if (tries > 5) {
4e96c977 3808 intel_dp->train_set_valid = false;
37f80975 3809 intel_dp_start_link_train(intel_dp);
70aff66c 3810 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3811 training_pattern |
70aff66c 3812 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3813 tries = 0;
3814 cr_tries++;
3815 continue;
3816 }
a4fc5ed6 3817
70aff66c
JN
3818 /* Update training set as requested by target */
3819 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3820 DRM_ERROR("failed to update link training\n");
3821 break;
3822 }
3cf2efb1 3823 ++tries;
869184a6 3824 }
3cf2efb1 3825
3ab9c637
ID
3826 intel_dp_set_idle_link_train(intel_dp);
3827
3828 intel_dp->DP = DP;
3829
4e96c977 3830 if (channel_eq) {
5fa836a9 3831 intel_dp->train_set_valid = true;
07f42258 3832 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3833 }
3ab9c637
ID
3834}
3835
3836void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3837{
70aff66c 3838 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3839 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3840}
3841
3842static void
ea5b213a 3843intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3844{
da63a9f2 3845 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3846 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3847 enum port port = intel_dig_port->port;
da63a9f2 3848 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3849 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3850 uint32_t DP = intel_dp->DP;
a4fc5ed6 3851
bc76e320 3852 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3853 return;
3854
0c33d8d7 3855 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3856 return;
3857
28c97730 3858 DRM_DEBUG_KMS("\n");
32f9d658 3859
39e5fa88
VS
3860 if ((IS_GEN7(dev) && port == PORT_A) ||
3861 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3862 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3863 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3864 } else {
aad3d14d
VS
3865 if (IS_CHERRYVIEW(dev))
3866 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3867 else
3868 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3869 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3870 }
1612c8bd 3871 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3872 POSTING_READ(intel_dp->output_reg);
5eb08b69 3873
1612c8bd
VS
3874 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3875 I915_WRITE(intel_dp->output_reg, DP);
3876 POSTING_READ(intel_dp->output_reg);
3877
3878 /*
3879 * HW workaround for IBX, we need to move the port
3880 * to transcoder A after disabling it to allow the
3881 * matching HDMI port to be enabled on transcoder A.
3882 */
3883 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3884 /* always enable with pattern 1 (as per spec) */
3885 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3886 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3887 I915_WRITE(intel_dp->output_reg, DP);
3888 POSTING_READ(intel_dp->output_reg);
3889
3890 DP &= ~DP_PORT_EN;
5bddd17f 3891 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3892 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3893 }
3894
f01eca2e 3895 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3896}
3897
26d61aad
KP
3898static bool
3899intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3900{
a031d709
RV
3901 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3902 struct drm_device *dev = dig_port->base.base.dev;
3903 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3904 uint8_t rev;
a031d709 3905
9d1a1031
JN
3906 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3907 sizeof(intel_dp->dpcd)) < 0)
edb39244 3908 return false; /* aux transfer failed */
92fd8fd1 3909
a8e98153 3910 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3911
edb39244
AJ
3912 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3913 return false; /* DPCD not present */
3914
2293bb5c
SK
3915 /* Check if the panel supports PSR */
3916 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3917 if (is_edp(intel_dp)) {
9d1a1031
JN
3918 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3919 intel_dp->psr_dpcd,
3920 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3921 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3922 dev_priv->psr.sink_support = true;
50003939 3923 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3924 }
474d1ec4
SJ
3925
3926 if (INTEL_INFO(dev)->gen >= 9 &&
3927 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3928 uint8_t frame_sync_cap;
3929
3930 dev_priv->psr.sink_support = true;
3931 intel_dp_dpcd_read_wake(&intel_dp->aux,
3932 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3933 &frame_sync_cap, 1);
3934 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3935 /* PSR2 needs frame sync as well */
3936 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3937 DRM_DEBUG_KMS("PSR2 %s on sink",
3938 dev_priv->psr.psr2_support ? "supported" : "not supported");
3939 }
50003939
JN
3940 }
3941
7809a611 3942 /* Training Pattern 3 support, both source and sink */
06ea66b6 3943 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3944 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3945 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3946 intel_dp->use_tps3 = true;
f8d8a672 3947 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3948 } else
3949 intel_dp->use_tps3 = false;
3950
fc0f8e25
SJ
3951 /* Intermediate frequency support */
3952 if (is_edp(intel_dp) &&
3953 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3954 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3955 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3956 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3957 int i;
3958
fc0f8e25
SJ
3959 intel_dp_dpcd_read_wake(&intel_dp->aux,
3960 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3961 sink_rates,
3962 sizeof(sink_rates));
ea2d8a42 3963
94ca719e
VS
3964 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3965 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3966
3967 if (val == 0)
3968 break;
3969
af77b974
SJ
3970 /* Value read is in kHz while drm clock is saved in deca-kHz */
3971 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3972 }
94ca719e 3973 intel_dp->num_sink_rates = i;
fc0f8e25 3974 }
0336400e
VS
3975
3976 intel_dp_print_rates(intel_dp);
3977
edb39244
AJ
3978 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3979 DP_DWN_STRM_PORT_PRESENT))
3980 return true; /* native DP sink */
3981
3982 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3983 return true; /* no per-port downstream info */
3984
9d1a1031
JN
3985 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3986 intel_dp->downstream_ports,
3987 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3988 return false; /* downstream port status fetch failed */
3989
3990 return true;
92fd8fd1
KP
3991}
3992
0d198328
AJ
3993static void
3994intel_dp_probe_oui(struct intel_dp *intel_dp)
3995{
3996 u8 buf[3];
3997
3998 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3999 return;
4000
9d1a1031 4001 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
4002 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4003 buf[0], buf[1], buf[2]);
4004
9d1a1031 4005 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
4006 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4007 buf[0], buf[1], buf[2]);
4008}
4009
0e32b39c
DA
4010static bool
4011intel_dp_probe_mst(struct intel_dp *intel_dp)
4012{
4013 u8 buf[1];
4014
4015 if (!intel_dp->can_mst)
4016 return false;
4017
4018 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4019 return false;
4020
0e32b39c
DA
4021 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4022 if (buf[0] & DP_MST_CAP) {
4023 DRM_DEBUG_KMS("Sink is MST capable\n");
4024 intel_dp->is_mst = true;
4025 } else {
4026 DRM_DEBUG_KMS("Sink is not MST capable\n");
4027 intel_dp->is_mst = false;
4028 }
4029 }
0e32b39c
DA
4030
4031 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4032 return intel_dp->is_mst;
4033}
4034
d2e216d0
RV
4035int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4036{
4037 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4038 struct drm_device *dev = intel_dig_port->base.base.dev;
4039 struct intel_crtc *intel_crtc =
4040 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
4041 u8 buf;
4042 int test_crc_count;
4043 int attempts = 6;
4373f0f2 4044 int ret = 0;
d2e216d0 4045
4373f0f2 4046 hsw_disable_ips(intel_crtc);
d2e216d0 4047
4373f0f2
PZ
4048 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4049 ret = -EIO;
4050 goto out;
4051 }
4052
4053 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4054 ret = -ENOTTY;
4055 goto out;
4056 }
d2e216d0 4057
4373f0f2
PZ
4058 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4059 ret = -EIO;
4060 goto out;
4061 }
1dda5f93 4062
9d1a1031 4063 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4373f0f2
PZ
4064 buf | DP_TEST_SINK_START) < 0) {
4065 ret = -EIO;
4066 goto out;
4067 }
4068
4069 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4070 ret = -EIO;
4071 goto out;
4072 }
d2e216d0 4073
ad9dc91b 4074 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 4075
ad9dc91b 4076 do {
1dda5f93 4077 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4078 DP_TEST_SINK_MISC, &buf) < 0) {
4079 ret = -EIO;
4080 goto out;
4081 }
ad9dc91b
RV
4082 intel_wait_for_vblank(dev, intel_crtc->pipe);
4083 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4084
4085 if (attempts == 0) {
90bd1f46 4086 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4373f0f2
PZ
4087 ret = -ETIMEDOUT;
4088 goto out;
ad9dc91b 4089 }
d2e216d0 4090
4373f0f2
PZ
4091 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4092 ret = -EIO;
4093 goto out;
4094 }
d2e216d0 4095
4373f0f2
PZ
4096 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4097 ret = -EIO;
4098 goto out;
4099 }
1dda5f93 4100 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4373f0f2
PZ
4101 buf & ~DP_TEST_SINK_START) < 0) {
4102 ret = -EIO;
4103 goto out;
4104 }
4105out:
4106 hsw_enable_ips(intel_crtc);
4107 return ret;
d2e216d0
RV
4108}
4109
a60f0e38
JB
4110static bool
4111intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4112{
9d1a1031
JN
4113 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4114 DP_DEVICE_SERVICE_IRQ_VECTOR,
4115 sink_irq_vector, 1) == 1;
a60f0e38
JB
4116}
4117
0e32b39c
DA
4118static bool
4119intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4120{
4121 int ret;
4122
4123 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4124 DP_SINK_COUNT_ESI,
4125 sink_irq_vector, 14);
4126 if (ret != 14)
4127 return false;
4128
4129 return true;
4130}
4131
c5d5ab7a
TP
4132static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4133{
4134 uint8_t test_result = DP_TEST_ACK;
4135 return test_result;
4136}
4137
4138static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4139{
4140 uint8_t test_result = DP_TEST_NAK;
4141 return test_result;
4142}
4143
4144static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4145{
c5d5ab7a 4146 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4147 struct intel_connector *intel_connector = intel_dp->attached_connector;
4148 struct drm_connector *connector = &intel_connector->base;
4149
4150 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4151 connector->edid_corrupt ||
559be30c
TP
4152 intel_dp->aux.i2c_defer_count > 6) {
4153 /* Check EDID read for NACKs, DEFERs and corruption
4154 * (DP CTS 1.2 Core r1.1)
4155 * 4.2.2.4 : Failed EDID read, I2C_NAK
4156 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4157 * 4.2.2.6 : EDID corruption detected
4158 * Use failsafe mode for all cases
4159 */
4160 if (intel_dp->aux.i2c_nack_count > 0 ||
4161 intel_dp->aux.i2c_defer_count > 0)
4162 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4163 intel_dp->aux.i2c_nack_count,
4164 intel_dp->aux.i2c_defer_count);
4165 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4166 } else {
4167 if (!drm_dp_dpcd_write(&intel_dp->aux,
4168 DP_TEST_EDID_CHECKSUM,
4169 &intel_connector->detect_edid->checksum,
5a1cc655 4170 1))
559be30c
TP
4171 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4172
4173 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4174 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4175 }
4176
4177 /* Set test active flag here so userspace doesn't interrupt things */
4178 intel_dp->compliance_test_active = 1;
4179
c5d5ab7a
TP
4180 return test_result;
4181}
4182
4183static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4184{
c5d5ab7a
TP
4185 uint8_t test_result = DP_TEST_NAK;
4186 return test_result;
4187}
4188
4189static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4190{
4191 uint8_t response = DP_TEST_NAK;
4192 uint8_t rxdata = 0;
4193 int status = 0;
4194
559be30c 4195 intel_dp->compliance_test_active = 0;
c5d5ab7a 4196 intel_dp->compliance_test_type = 0;
559be30c
TP
4197 intel_dp->compliance_test_data = 0;
4198
c5d5ab7a
TP
4199 intel_dp->aux.i2c_nack_count = 0;
4200 intel_dp->aux.i2c_defer_count = 0;
4201
4202 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4203 if (status <= 0) {
4204 DRM_DEBUG_KMS("Could not read test request from sink\n");
4205 goto update_status;
4206 }
4207
4208 switch (rxdata) {
4209 case DP_TEST_LINK_TRAINING:
4210 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4211 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4212 response = intel_dp_autotest_link_training(intel_dp);
4213 break;
4214 case DP_TEST_LINK_VIDEO_PATTERN:
4215 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4216 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4217 response = intel_dp_autotest_video_pattern(intel_dp);
4218 break;
4219 case DP_TEST_LINK_EDID_READ:
4220 DRM_DEBUG_KMS("EDID test requested\n");
4221 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4222 response = intel_dp_autotest_edid(intel_dp);
4223 break;
4224 case DP_TEST_LINK_PHY_TEST_PATTERN:
4225 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4226 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4227 response = intel_dp_autotest_phy_pattern(intel_dp);
4228 break;
4229 default:
4230 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4231 break;
4232 }
4233
4234update_status:
4235 status = drm_dp_dpcd_write(&intel_dp->aux,
4236 DP_TEST_RESPONSE,
4237 &response, 1);
4238 if (status <= 0)
4239 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4240}
4241
0e32b39c
DA
4242static int
4243intel_dp_check_mst_status(struct intel_dp *intel_dp)
4244{
4245 bool bret;
4246
4247 if (intel_dp->is_mst) {
4248 u8 esi[16] = { 0 };
4249 int ret = 0;
4250 int retry;
4251 bool handled;
4252 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4253go_again:
4254 if (bret == true) {
4255
4256 /* check link status - esi[10] = 0x200c */
4257 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4258 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4259 intel_dp_start_link_train(intel_dp);
4260 intel_dp_complete_link_train(intel_dp);
4261 intel_dp_stop_link_train(intel_dp);
4262 }
4263
6f34cc39 4264 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4265 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4266
4267 if (handled) {
4268 for (retry = 0; retry < 3; retry++) {
4269 int wret;
4270 wret = drm_dp_dpcd_write(&intel_dp->aux,
4271 DP_SINK_COUNT_ESI+1,
4272 &esi[1], 3);
4273 if (wret == 3) {
4274 break;
4275 }
4276 }
4277
4278 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4279 if (bret == true) {
6f34cc39 4280 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4281 goto go_again;
4282 }
4283 } else
4284 ret = 0;
4285
4286 return ret;
4287 } else {
4288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4289 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4290 intel_dp->is_mst = false;
4291 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4292 /* send a hotplug event */
4293 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4294 }
4295 }
4296 return -EINVAL;
4297}
4298
a4fc5ed6
KP
4299/*
4300 * According to DP spec
4301 * 5.1.2:
4302 * 1. Read DPCD
4303 * 2. Configure link according to Receiver Capabilities
4304 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4305 * 4. Check link status on receipt of hot-plug interrupt
4306 */
a5146200 4307static void
ea5b213a 4308intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4309{
5b215bcf 4310 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4311 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4312 u8 sink_irq_vector;
93f62dad 4313 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4314
5b215bcf
DA
4315 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4316
da63a9f2 4317 if (!intel_encoder->connectors_active)
d2b996ac 4318 return;
59cd09e1 4319
da63a9f2 4320 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4321 return;
4322
1a125d8a
ID
4323 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4324 return;
4325
92fd8fd1 4326 /* Try to read receiver status if the link appears to be up */
93f62dad 4327 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4328 return;
4329 }
4330
92fd8fd1 4331 /* Now read the DPCD to see if it's actually running */
26d61aad 4332 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4333 return;
4334 }
4335
a60f0e38
JB
4336 /* Try to read the source of the interrupt */
4337 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4338 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4339 /* Clear interrupt source */
9d1a1031
JN
4340 drm_dp_dpcd_writeb(&intel_dp->aux,
4341 DP_DEVICE_SERVICE_IRQ_VECTOR,
4342 sink_irq_vector);
a60f0e38
JB
4343
4344 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4345 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4346 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4347 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4348 }
4349
1ffdff13 4350 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4351 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4352 intel_encoder->base.name);
33a34e4e
JB
4353 intel_dp_start_link_train(intel_dp);
4354 intel_dp_complete_link_train(intel_dp);
3ab9c637 4355 intel_dp_stop_link_train(intel_dp);
33a34e4e 4356 }
a4fc5ed6 4357}
a4fc5ed6 4358
caf9ab24 4359/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4360static enum drm_connector_status
26d61aad 4361intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4362{
caf9ab24 4363 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4364 uint8_t type;
4365
4366 if (!intel_dp_get_dpcd(intel_dp))
4367 return connector_status_disconnected;
4368
4369 /* if there's no downstream port, we're done */
4370 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4371 return connector_status_connected;
caf9ab24
AJ
4372
4373 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4374 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4375 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4376 uint8_t reg;
9d1a1031
JN
4377
4378 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4379 &reg, 1) < 0)
caf9ab24 4380 return connector_status_unknown;
9d1a1031 4381
23235177
AJ
4382 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4383 : connector_status_disconnected;
caf9ab24
AJ
4384 }
4385
4386 /* If no HPD, poke DDC gently */
0b99836f 4387 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4388 return connector_status_connected;
caf9ab24
AJ
4389
4390 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4391 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4392 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4393 if (type == DP_DS_PORT_TYPE_VGA ||
4394 type == DP_DS_PORT_TYPE_NON_EDID)
4395 return connector_status_unknown;
4396 } else {
4397 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4398 DP_DWN_STRM_PORT_TYPE_MASK;
4399 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4400 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4401 return connector_status_unknown;
4402 }
caf9ab24
AJ
4403
4404 /* Anything else is out of spec, warn and ignore */
4405 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4406 return connector_status_disconnected;
71ba9000
AJ
4407}
4408
d410b56d
CW
4409static enum drm_connector_status
4410edp_detect(struct intel_dp *intel_dp)
4411{
4412 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4413 enum drm_connector_status status;
4414
4415 status = intel_panel_detect(dev);
4416 if (status == connector_status_unknown)
4417 status = connector_status_connected;
4418
4419 return status;
4420}
4421
5eb08b69 4422static enum drm_connector_status
a9756bb5 4423ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4424{
30add22d 4425 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4426 struct drm_i915_private *dev_priv = dev->dev_private;
4427 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4428
1b469639
DL
4429 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4430 return connector_status_disconnected;
4431
26d61aad 4432 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4433}
4434
2a592bec
DA
4435static int g4x_digital_port_connected(struct drm_device *dev,
4436 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4437{
a4fc5ed6 4438 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4439 uint32_t bit;
5eb08b69 4440
232a6ee9
TP
4441 if (IS_VALLEYVIEW(dev)) {
4442 switch (intel_dig_port->port) {
4443 case PORT_B:
4444 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4445 break;
4446 case PORT_C:
4447 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4448 break;
4449 case PORT_D:
4450 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4451 break;
4452 default:
2a592bec 4453 return -EINVAL;
232a6ee9
TP
4454 }
4455 } else {
4456 switch (intel_dig_port->port) {
4457 case PORT_B:
4458 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4459 break;
4460 case PORT_C:
4461 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4462 break;
4463 case PORT_D:
4464 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4465 break;
4466 default:
2a592bec 4467 return -EINVAL;
232a6ee9 4468 }
a4fc5ed6
KP
4469 }
4470
10f76a38 4471 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4472 return 0;
4473 return 1;
4474}
4475
4476static enum drm_connector_status
4477g4x_dp_detect(struct intel_dp *intel_dp)
4478{
4479 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4481 int ret;
4482
4483 /* Can't disconnect eDP, but you can close the lid... */
4484 if (is_edp(intel_dp)) {
4485 enum drm_connector_status status;
4486
4487 status = intel_panel_detect(dev);
4488 if (status == connector_status_unknown)
4489 status = connector_status_connected;
4490 return status;
4491 }
4492
4493 ret = g4x_digital_port_connected(dev, intel_dig_port);
4494 if (ret == -EINVAL)
4495 return connector_status_unknown;
4496 else if (ret == 0)
a4fc5ed6
KP
4497 return connector_status_disconnected;
4498
26d61aad 4499 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4500}
4501
8c241fef 4502static struct edid *
beb60608 4503intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4504{
beb60608 4505 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4506
9cd300e0
JN
4507 /* use cached edid if we have one */
4508 if (intel_connector->edid) {
9cd300e0
JN
4509 /* invalid edid */
4510 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4511 return NULL;
4512
55e9edeb 4513 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4514 } else
4515 return drm_get_edid(&intel_connector->base,
4516 &intel_dp->aux.ddc);
4517}
8c241fef 4518
beb60608
CW
4519static void
4520intel_dp_set_edid(struct intel_dp *intel_dp)
4521{
4522 struct intel_connector *intel_connector = intel_dp->attached_connector;
4523 struct edid *edid;
8c241fef 4524
beb60608
CW
4525 edid = intel_dp_get_edid(intel_dp);
4526 intel_connector->detect_edid = edid;
4527
4528 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4529 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4530 else
4531 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4532}
4533
beb60608
CW
4534static void
4535intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4536{
beb60608 4537 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4538
beb60608
CW
4539 kfree(intel_connector->detect_edid);
4540 intel_connector->detect_edid = NULL;
9cd300e0 4541
beb60608
CW
4542 intel_dp->has_audio = false;
4543}
d6f24d0f 4544
beb60608
CW
4545static enum intel_display_power_domain
4546intel_dp_power_get(struct intel_dp *dp)
4547{
4548 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4549 enum intel_display_power_domain power_domain;
4550
4551 power_domain = intel_display_port_power_domain(encoder);
4552 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4553
4554 return power_domain;
4555}
d6f24d0f 4556
beb60608
CW
4557static void
4558intel_dp_power_put(struct intel_dp *dp,
4559 enum intel_display_power_domain power_domain)
4560{
4561 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4562 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4563}
4564
a9756bb5
ZW
4565static enum drm_connector_status
4566intel_dp_detect(struct drm_connector *connector, bool force)
4567{
4568 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4569 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4570 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4571 struct drm_device *dev = connector->dev;
a9756bb5 4572 enum drm_connector_status status;
671dedd2 4573 enum intel_display_power_domain power_domain;
0e32b39c 4574 bool ret;
09b1eb13 4575 u8 sink_irq_vector;
a9756bb5 4576
164c8598 4577 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4578 connector->base.id, connector->name);
beb60608 4579 intel_dp_unset_edid(intel_dp);
164c8598 4580
0e32b39c
DA
4581 if (intel_dp->is_mst) {
4582 /* MST devices are disconnected from a monitor POV */
4583 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4584 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4585 return connector_status_disconnected;
0e32b39c
DA
4586 }
4587
beb60608 4588 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4589
d410b56d
CW
4590 /* Can't disconnect eDP, but you can close the lid... */
4591 if (is_edp(intel_dp))
4592 status = edp_detect(intel_dp);
4593 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4594 status = ironlake_dp_detect(intel_dp);
4595 else
4596 status = g4x_dp_detect(intel_dp);
4597 if (status != connector_status_connected)
c8c8fb33 4598 goto out;
a9756bb5 4599
0d198328
AJ
4600 intel_dp_probe_oui(intel_dp);
4601
0e32b39c
DA
4602 ret = intel_dp_probe_mst(intel_dp);
4603 if (ret) {
4604 /* if we are in MST mode then this connector
4605 won't appear connected or have anything with EDID on it */
4606 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4607 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4608 status = connector_status_disconnected;
4609 goto out;
4610 }
4611
beb60608 4612 intel_dp_set_edid(intel_dp);
a9756bb5 4613
d63885da
PZ
4614 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4615 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4616 status = connector_status_connected;
4617
09b1eb13
TP
4618 /* Try to read the source of the interrupt */
4619 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4620 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4621 /* Clear interrupt source */
4622 drm_dp_dpcd_writeb(&intel_dp->aux,
4623 DP_DEVICE_SERVICE_IRQ_VECTOR,
4624 sink_irq_vector);
4625
4626 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4627 intel_dp_handle_test_request(intel_dp);
4628 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4629 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4630 }
4631
c8c8fb33 4632out:
beb60608 4633 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4634 return status;
a4fc5ed6
KP
4635}
4636
beb60608
CW
4637static void
4638intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4639{
df0e9248 4640 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4641 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4642 enum intel_display_power_domain power_domain;
a4fc5ed6 4643
beb60608
CW
4644 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4645 connector->base.id, connector->name);
4646 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4647
beb60608
CW
4648 if (connector->status != connector_status_connected)
4649 return;
671dedd2 4650
beb60608
CW
4651 power_domain = intel_dp_power_get(intel_dp);
4652
4653 intel_dp_set_edid(intel_dp);
4654
4655 intel_dp_power_put(intel_dp, power_domain);
4656
4657 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4658 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4659}
4660
4661static int intel_dp_get_modes(struct drm_connector *connector)
4662{
4663 struct intel_connector *intel_connector = to_intel_connector(connector);
4664 struct edid *edid;
4665
4666 edid = intel_connector->detect_edid;
4667 if (edid) {
4668 int ret = intel_connector_update_modes(connector, edid);
4669 if (ret)
4670 return ret;
4671 }
32f9d658 4672
f8779fda 4673 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4674 if (is_edp(intel_attached_dp(connector)) &&
4675 intel_connector->panel.fixed_mode) {
f8779fda 4676 struct drm_display_mode *mode;
beb60608
CW
4677
4678 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4679 intel_connector->panel.fixed_mode);
f8779fda 4680 if (mode) {
32f9d658
ZW
4681 drm_mode_probed_add(connector, mode);
4682 return 1;
4683 }
4684 }
beb60608 4685
32f9d658 4686 return 0;
a4fc5ed6
KP
4687}
4688
1aad7ac0
CW
4689static bool
4690intel_dp_detect_audio(struct drm_connector *connector)
4691{
1aad7ac0 4692 bool has_audio = false;
beb60608 4693 struct edid *edid;
1aad7ac0 4694
beb60608
CW
4695 edid = to_intel_connector(connector)->detect_edid;
4696 if (edid)
1aad7ac0 4697 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4698
1aad7ac0
CW
4699 return has_audio;
4700}
4701
f684960e
CW
4702static int
4703intel_dp_set_property(struct drm_connector *connector,
4704 struct drm_property *property,
4705 uint64_t val)
4706{
e953fd7b 4707 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4708 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4709 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4710 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4711 int ret;
4712
662595df 4713 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4714 if (ret)
4715 return ret;
4716
3f43c48d 4717 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4718 int i = val;
4719 bool has_audio;
4720
4721 if (i == intel_dp->force_audio)
f684960e
CW
4722 return 0;
4723
1aad7ac0 4724 intel_dp->force_audio = i;
f684960e 4725
c3e5f67b 4726 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4727 has_audio = intel_dp_detect_audio(connector);
4728 else
c3e5f67b 4729 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4730
4731 if (has_audio == intel_dp->has_audio)
f684960e
CW
4732 return 0;
4733
1aad7ac0 4734 intel_dp->has_audio = has_audio;
f684960e
CW
4735 goto done;
4736 }
4737
e953fd7b 4738 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4739 bool old_auto = intel_dp->color_range_auto;
4740 uint32_t old_range = intel_dp->color_range;
4741
55bc60db
VS
4742 switch (val) {
4743 case INTEL_BROADCAST_RGB_AUTO:
4744 intel_dp->color_range_auto = true;
4745 break;
4746 case INTEL_BROADCAST_RGB_FULL:
4747 intel_dp->color_range_auto = false;
4748 intel_dp->color_range = 0;
4749 break;
4750 case INTEL_BROADCAST_RGB_LIMITED:
4751 intel_dp->color_range_auto = false;
4752 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4753 break;
4754 default:
4755 return -EINVAL;
4756 }
ae4edb80
DV
4757
4758 if (old_auto == intel_dp->color_range_auto &&
4759 old_range == intel_dp->color_range)
4760 return 0;
4761
e953fd7b
CW
4762 goto done;
4763 }
4764
53b41837
YN
4765 if (is_edp(intel_dp) &&
4766 property == connector->dev->mode_config.scaling_mode_property) {
4767 if (val == DRM_MODE_SCALE_NONE) {
4768 DRM_DEBUG_KMS("no scaling not supported\n");
4769 return -EINVAL;
4770 }
4771
4772 if (intel_connector->panel.fitting_mode == val) {
4773 /* the eDP scaling property is not changed */
4774 return 0;
4775 }
4776 intel_connector->panel.fitting_mode = val;
4777
4778 goto done;
4779 }
4780
f684960e
CW
4781 return -EINVAL;
4782
4783done:
c0c36b94
CW
4784 if (intel_encoder->base.crtc)
4785 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4786
4787 return 0;
4788}
4789
a4fc5ed6 4790static void
73845adf 4791intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4792{
1d508706 4793 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4794
10e972d3 4795 kfree(intel_connector->detect_edid);
beb60608 4796
9cd300e0
JN
4797 if (!IS_ERR_OR_NULL(intel_connector->edid))
4798 kfree(intel_connector->edid);
4799
acd8db10
PZ
4800 /* Can't call is_edp() since the encoder may have been destroyed
4801 * already. */
4802 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4803 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4804
a4fc5ed6 4805 drm_connector_cleanup(connector);
55f78c43 4806 kfree(connector);
a4fc5ed6
KP
4807}
4808
00c09d70 4809void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4810{
da63a9f2
PZ
4811 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4812 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4813
4f71d0cb 4814 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4815 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4816 if (is_edp(intel_dp)) {
4817 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4818 /*
4819 * vdd might still be enabled do to the delayed vdd off.
4820 * Make sure vdd is actually turned off here.
4821 */
773538e8 4822 pps_lock(intel_dp);
4be73780 4823 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4824 pps_unlock(intel_dp);
4825
01527b31
CT
4826 if (intel_dp->edp_notifier.notifier_call) {
4827 unregister_reboot_notifier(&intel_dp->edp_notifier);
4828 intel_dp->edp_notifier.notifier_call = NULL;
4829 }
bd943159 4830 }
c8bd0e49 4831 drm_encoder_cleanup(encoder);
da63a9f2 4832 kfree(intel_dig_port);
24d05927
DV
4833}
4834
07f9cd0b
ID
4835static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4836{
4837 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4838
4839 if (!is_edp(intel_dp))
4840 return;
4841
951468f3
VS
4842 /*
4843 * vdd might still be enabled do to the delayed vdd off.
4844 * Make sure vdd is actually turned off here.
4845 */
afa4e53a 4846 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4847 pps_lock(intel_dp);
07f9cd0b 4848 edp_panel_vdd_off_sync(intel_dp);
773538e8 4849 pps_unlock(intel_dp);
07f9cd0b
ID
4850}
4851
49e6bc51
VS
4852static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4853{
4854 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4855 struct drm_device *dev = intel_dig_port->base.base.dev;
4856 struct drm_i915_private *dev_priv = dev->dev_private;
4857 enum intel_display_power_domain power_domain;
4858
4859 lockdep_assert_held(&dev_priv->pps_mutex);
4860
4861 if (!edp_have_panel_vdd(intel_dp))
4862 return;
4863
4864 /*
4865 * The VDD bit needs a power domain reference, so if the bit is
4866 * already enabled when we boot or resume, grab this reference and
4867 * schedule a vdd off, so we don't hold on to the reference
4868 * indefinitely.
4869 */
4870 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4871 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4872 intel_display_power_get(dev_priv, power_domain);
4873
4874 edp_panel_vdd_schedule_off(intel_dp);
4875}
4876
6d93c0c4
ID
4877static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4878{
49e6bc51
VS
4879 struct intel_dp *intel_dp;
4880
4881 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4882 return;
4883
4884 intel_dp = enc_to_intel_dp(encoder);
4885
4886 pps_lock(intel_dp);
4887
4888 /*
4889 * Read out the current power sequencer assignment,
4890 * in case the BIOS did something with it.
4891 */
4892 if (IS_VALLEYVIEW(encoder->dev))
4893 vlv_initial_power_sequencer_setup(intel_dp);
4894
4895 intel_edp_panel_vdd_sanitize(intel_dp);
4896
4897 pps_unlock(intel_dp);
6d93c0c4
ID
4898}
4899
a4fc5ed6 4900static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4901 .dpms = intel_connector_dpms,
a4fc5ed6 4902 .detect = intel_dp_detect,
beb60608 4903 .force = intel_dp_force,
a4fc5ed6 4904 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4905 .set_property = intel_dp_set_property,
2545e4a6 4906 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4907 .destroy = intel_dp_connector_destroy,
c6f95f27 4908 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4909 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4910};
4911
4912static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4913 .get_modes = intel_dp_get_modes,
4914 .mode_valid = intel_dp_mode_valid,
df0e9248 4915 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4916};
4917
a4fc5ed6 4918static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4919 .reset = intel_dp_encoder_reset,
24d05927 4920 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4921};
4922
b2c5c181 4923enum irqreturn
13cf5504
DA
4924intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4925{
4926 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4927 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4928 struct drm_device *dev = intel_dig_port->base.base.dev;
4929 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4930 enum intel_display_power_domain power_domain;
b2c5c181 4931 enum irqreturn ret = IRQ_NONE;
1c767b33 4932
0e32b39c
DA
4933 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4934 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4935
7a7f84cc
VS
4936 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4937 /*
4938 * vdd off can generate a long pulse on eDP which
4939 * would require vdd on to handle it, and thus we
4940 * would end up in an endless cycle of
4941 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4942 */
4943 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4944 port_name(intel_dig_port->port));
a8b3d52f 4945 return IRQ_HANDLED;
7a7f84cc
VS
4946 }
4947
26fbb774
VS
4948 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4949 port_name(intel_dig_port->port),
0e32b39c 4950 long_hpd ? "long" : "short");
13cf5504 4951
1c767b33
ID
4952 power_domain = intel_display_port_power_domain(intel_encoder);
4953 intel_display_power_get(dev_priv, power_domain);
4954
0e32b39c 4955 if (long_hpd) {
5fa836a9
MK
4956 /* indicate that we need to restart link training */
4957 intel_dp->train_set_valid = false;
2a592bec
DA
4958
4959 if (HAS_PCH_SPLIT(dev)) {
4960 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4961 goto mst_fail;
4962 } else {
4963 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4964 goto mst_fail;
4965 }
0e32b39c
DA
4966
4967 if (!intel_dp_get_dpcd(intel_dp)) {
4968 goto mst_fail;
4969 }
4970
4971 intel_dp_probe_oui(intel_dp);
4972
4973 if (!intel_dp_probe_mst(intel_dp))
4974 goto mst_fail;
4975
4976 } else {
4977 if (intel_dp->is_mst) {
1c767b33 4978 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4979 goto mst_fail;
4980 }
4981
4982 if (!intel_dp->is_mst) {
4983 /*
4984 * we'll check the link status via the normal hot plug path later -
4985 * but for short hpds we should check it now
4986 */
5b215bcf 4987 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4988 intel_dp_check_link_status(intel_dp);
5b215bcf 4989 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4990 }
4991 }
b2c5c181
DV
4992
4993 ret = IRQ_HANDLED;
4994
1c767b33 4995 goto put_power;
0e32b39c
DA
4996mst_fail:
4997 /* if we were in MST mode, and device is not there get out of MST mode */
4998 if (intel_dp->is_mst) {
4999 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5000 intel_dp->is_mst = false;
5001 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5002 }
1c767b33
ID
5003put_power:
5004 intel_display_power_put(dev_priv, power_domain);
5005
5006 return ret;
13cf5504
DA
5007}
5008
e3421a18
ZW
5009/* Return which DP Port should be selected for Transcoder DP control */
5010int
0206e353 5011intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5012{
5013 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5014 struct intel_encoder *intel_encoder;
5015 struct intel_dp *intel_dp;
e3421a18 5016
fa90ecef
PZ
5017 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5018 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5019
fa90ecef
PZ
5020 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5021 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5022 return intel_dp->output_reg;
e3421a18 5023 }
ea5b213a 5024
e3421a18
ZW
5025 return -1;
5026}
5027
36e83a18 5028/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5029bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5030{
5031 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5032 union child_device_config *p_child;
36e83a18 5033 int i;
5d8a7752
VS
5034 static const short port_mapping[] = {
5035 [PORT_B] = PORT_IDPB,
5036 [PORT_C] = PORT_IDPC,
5037 [PORT_D] = PORT_IDPD,
5038 };
36e83a18 5039
3b32a35b
VS
5040 if (port == PORT_A)
5041 return true;
5042
41aa3448 5043 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5044 return false;
5045
41aa3448
RV
5046 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5047 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5048
5d8a7752 5049 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5050 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5051 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5052 return true;
5053 }
5054 return false;
5055}
5056
0e32b39c 5057void
f684960e
CW
5058intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5059{
53b41837
YN
5060 struct intel_connector *intel_connector = to_intel_connector(connector);
5061
3f43c48d 5062 intel_attach_force_audio_property(connector);
e953fd7b 5063 intel_attach_broadcast_rgb_property(connector);
55bc60db 5064 intel_dp->color_range_auto = true;
53b41837
YN
5065
5066 if (is_edp(intel_dp)) {
5067 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5068 drm_object_attach_property(
5069 &connector->base,
53b41837 5070 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5071 DRM_MODE_SCALE_ASPECT);
5072 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5073 }
f684960e
CW
5074}
5075
dada1a9f
ID
5076static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5077{
5078 intel_dp->last_power_cycle = jiffies;
5079 intel_dp->last_power_on = jiffies;
5080 intel_dp->last_backlight_off = jiffies;
5081}
5082
67a54566
DV
5083static void
5084intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5085 struct intel_dp *intel_dp)
67a54566
DV
5086{
5087 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5088 struct edp_power_seq cur, vbt, spec,
5089 *final = &intel_dp->pps_delays;
67a54566 5090 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 5091 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5092
e39b999a
VS
5093 lockdep_assert_held(&dev_priv->pps_mutex);
5094
81ddbc69
VS
5095 /* already initialized? */
5096 if (final->t11_t12 != 0)
5097 return;
5098
453c5420 5099 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5100 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5101 pp_on_reg = PCH_PP_ON_DELAYS;
5102 pp_off_reg = PCH_PP_OFF_DELAYS;
5103 pp_div_reg = PCH_PP_DIVISOR;
5104 } else {
bf13e81b
JN
5105 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5106
5107 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5108 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5109 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5110 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5111 }
67a54566
DV
5112
5113 /* Workaround: Need to write PP_CONTROL with the unlock key as
5114 * the very first thing. */
453c5420 5115 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 5116 I915_WRITE(pp_ctrl_reg, pp);
67a54566 5117
453c5420
JB
5118 pp_on = I915_READ(pp_on_reg);
5119 pp_off = I915_READ(pp_off_reg);
5120 pp_div = I915_READ(pp_div_reg);
67a54566
DV
5121
5122 /* Pull timing values out of registers */
5123 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5124 PANEL_POWER_UP_DELAY_SHIFT;
5125
5126 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5127 PANEL_LIGHT_ON_DELAY_SHIFT;
5128
5129 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5130 PANEL_LIGHT_OFF_DELAY_SHIFT;
5131
5132 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5133 PANEL_POWER_DOWN_DELAY_SHIFT;
5134
5135 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5136 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5137
5138 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5139 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5140
41aa3448 5141 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5142
5143 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5144 * our hw here, which are all in 100usec. */
5145 spec.t1_t3 = 210 * 10;
5146 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5147 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5148 spec.t10 = 500 * 10;
5149 /* This one is special and actually in units of 100ms, but zero
5150 * based in the hw (so we need to add 100 ms). But the sw vbt
5151 * table multiplies it with 1000 to make it in units of 100usec,
5152 * too. */
5153 spec.t11_t12 = (510 + 100) * 10;
5154
5155 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5156 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5157
5158 /* Use the max of the register settings and vbt. If both are
5159 * unset, fall back to the spec limits. */
36b5f425 5160#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5161 spec.field : \
5162 max(cur.field, vbt.field))
5163 assign_final(t1_t3);
5164 assign_final(t8);
5165 assign_final(t9);
5166 assign_final(t10);
5167 assign_final(t11_t12);
5168#undef assign_final
5169
36b5f425 5170#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5171 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5172 intel_dp->backlight_on_delay = get_delay(t8);
5173 intel_dp->backlight_off_delay = get_delay(t9);
5174 intel_dp->panel_power_down_delay = get_delay(t10);
5175 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5176#undef get_delay
5177
f30d26e4
JN
5178 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5179 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5180 intel_dp->panel_power_cycle_delay);
5181
5182 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5183 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5184}
5185
5186static void
5187intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5188 struct intel_dp *intel_dp)
f30d26e4
JN
5189{
5190 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5191 u32 pp_on, pp_off, pp_div, port_sel = 0;
5192 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5193 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 5194 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5195 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5196
e39b999a 5197 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
5198
5199 if (HAS_PCH_SPLIT(dev)) {
5200 pp_on_reg = PCH_PP_ON_DELAYS;
5201 pp_off_reg = PCH_PP_OFF_DELAYS;
5202 pp_div_reg = PCH_PP_DIVISOR;
5203 } else {
bf13e81b
JN
5204 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5205
5206 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5207 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5208 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5209 }
5210
b2f19d1a
PZ
5211 /*
5212 * And finally store the new values in the power sequencer. The
5213 * backlight delays are set to 1 because we do manual waits on them. For
5214 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5215 * we'll end up waiting for the backlight off delay twice: once when we
5216 * do the manual sleep, and once when we disable the panel and wait for
5217 * the PP_STATUS bit to become zero.
5218 */
f30d26e4 5219 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5220 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5221 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5222 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5223 /* Compute the divisor for the pp clock, simply match the Bspec
5224 * formula. */
453c5420 5225 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 5226 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
5227 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5228
5229 /* Haswell doesn't have any port selection bits for the panel
5230 * power sequencer any more. */
bc7d38a4 5231 if (IS_VALLEYVIEW(dev)) {
ad933b56 5232 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5233 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5234 if (port == PORT_A)
a24c144c 5235 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5236 else
a24c144c 5237 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5238 }
5239
453c5420
JB
5240 pp_on |= port_sel;
5241
5242 I915_WRITE(pp_on_reg, pp_on);
5243 I915_WRITE(pp_off_reg, pp_off);
5244 I915_WRITE(pp_div_reg, pp_div);
67a54566 5245
67a54566 5246 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5247 I915_READ(pp_on_reg),
5248 I915_READ(pp_off_reg),
5249 I915_READ(pp_div_reg));
f684960e
CW
5250}
5251
b33a2815
VK
5252/**
5253 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5254 * @dev: DRM device
5255 * @refresh_rate: RR to be programmed
5256 *
5257 * This function gets called when refresh rate (RR) has to be changed from
5258 * one frequency to another. Switches can be between high and low RR
5259 * supported by the panel or to any other RR based on media playback (in
5260 * this case, RR value needs to be passed from user space).
5261 *
5262 * The caller of this function needs to take a lock on dev_priv->drrs.
5263 */
96178eeb 5264static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5265{
5266 struct drm_i915_private *dev_priv = dev->dev_private;
5267 struct intel_encoder *encoder;
96178eeb
VK
5268 struct intel_digital_port *dig_port = NULL;
5269 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5270 struct intel_crtc_state *config = NULL;
439d7ac0 5271 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5272 u32 reg, val;
96178eeb 5273 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5274
5275 if (refresh_rate <= 0) {
5276 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5277 return;
5278 }
5279
96178eeb
VK
5280 if (intel_dp == NULL) {
5281 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5282 return;
5283 }
5284
1fcc9d1c 5285 /*
e4d59f6b
RV
5286 * FIXME: This needs proper synchronization with psr state for some
5287 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5288 */
439d7ac0 5289
96178eeb
VK
5290 dig_port = dp_to_dig_port(intel_dp);
5291 encoder = &dig_port->base;
723f9aab 5292 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5293
5294 if (!intel_crtc) {
5295 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5296 return;
5297 }
5298
6e3c9717 5299 config = intel_crtc->config;
439d7ac0 5300
96178eeb 5301 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5302 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5303 return;
5304 }
5305
96178eeb
VK
5306 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5307 refresh_rate)
439d7ac0
PB
5308 index = DRRS_LOW_RR;
5309
96178eeb 5310 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5311 DRM_DEBUG_KMS(
5312 "DRRS requested for previously set RR...ignoring\n");
5313 return;
5314 }
5315
5316 if (!intel_crtc->active) {
5317 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5318 return;
5319 }
5320
44395bfe 5321 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5322 switch (index) {
5323 case DRRS_HIGH_RR:
5324 intel_dp_set_m_n(intel_crtc, M1_N1);
5325 break;
5326 case DRRS_LOW_RR:
5327 intel_dp_set_m_n(intel_crtc, M2_N2);
5328 break;
5329 case DRRS_MAX_RR:
5330 default:
5331 DRM_ERROR("Unsupported refreshrate type\n");
5332 }
5333 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5334 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5335 val = I915_READ(reg);
a4c30b1d 5336
439d7ac0 5337 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5338 if (IS_VALLEYVIEW(dev))
5339 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5340 else
5341 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5342 } else {
6fa7aec1
VK
5343 if (IS_VALLEYVIEW(dev))
5344 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5345 else
5346 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5347 }
5348 I915_WRITE(reg, val);
5349 }
5350
4e9ac947
VK
5351 dev_priv->drrs.refresh_rate_type = index;
5352
5353 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5354}
5355
b33a2815
VK
5356/**
5357 * intel_edp_drrs_enable - init drrs struct if supported
5358 * @intel_dp: DP struct
5359 *
5360 * Initializes frontbuffer_bits and drrs.dp
5361 */
c395578e
VK
5362void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5363{
5364 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5365 struct drm_i915_private *dev_priv = dev->dev_private;
5366 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5367 struct drm_crtc *crtc = dig_port->base.base.crtc;
5368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5369
5370 if (!intel_crtc->config->has_drrs) {
5371 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5372 return;
5373 }
5374
5375 mutex_lock(&dev_priv->drrs.mutex);
5376 if (WARN_ON(dev_priv->drrs.dp)) {
5377 DRM_ERROR("DRRS already enabled\n");
5378 goto unlock;
5379 }
5380
5381 dev_priv->drrs.busy_frontbuffer_bits = 0;
5382
5383 dev_priv->drrs.dp = intel_dp;
5384
5385unlock:
5386 mutex_unlock(&dev_priv->drrs.mutex);
5387}
5388
b33a2815
VK
5389/**
5390 * intel_edp_drrs_disable - Disable DRRS
5391 * @intel_dp: DP struct
5392 *
5393 */
c395578e
VK
5394void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5395{
5396 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5397 struct drm_i915_private *dev_priv = dev->dev_private;
5398 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5399 struct drm_crtc *crtc = dig_port->base.base.crtc;
5400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5401
5402 if (!intel_crtc->config->has_drrs)
5403 return;
5404
5405 mutex_lock(&dev_priv->drrs.mutex);
5406 if (!dev_priv->drrs.dp) {
5407 mutex_unlock(&dev_priv->drrs.mutex);
5408 return;
5409 }
5410
5411 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5412 intel_dp_set_drrs_state(dev_priv->dev,
5413 intel_dp->attached_connector->panel.
5414 fixed_mode->vrefresh);
5415
5416 dev_priv->drrs.dp = NULL;
5417 mutex_unlock(&dev_priv->drrs.mutex);
5418
5419 cancel_delayed_work_sync(&dev_priv->drrs.work);
5420}
5421
4e9ac947
VK
5422static void intel_edp_drrs_downclock_work(struct work_struct *work)
5423{
5424 struct drm_i915_private *dev_priv =
5425 container_of(work, typeof(*dev_priv), drrs.work.work);
5426 struct intel_dp *intel_dp;
5427
5428 mutex_lock(&dev_priv->drrs.mutex);
5429
5430 intel_dp = dev_priv->drrs.dp;
5431
5432 if (!intel_dp)
5433 goto unlock;
5434
439d7ac0 5435 /*
4e9ac947
VK
5436 * The delayed work can race with an invalidate hence we need to
5437 * recheck.
439d7ac0
PB
5438 */
5439
4e9ac947
VK
5440 if (dev_priv->drrs.busy_frontbuffer_bits)
5441 goto unlock;
439d7ac0 5442
4e9ac947
VK
5443 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5444 intel_dp_set_drrs_state(dev_priv->dev,
5445 intel_dp->attached_connector->panel.
5446 downclock_mode->vrefresh);
439d7ac0 5447
4e9ac947 5448unlock:
4e9ac947 5449 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5450}
5451
b33a2815
VK
5452/**
5453 * intel_edp_drrs_invalidate - Invalidate DRRS
5454 * @dev: DRM device
5455 * @frontbuffer_bits: frontbuffer plane tracking bits
5456 *
5457 * When there is a disturbance on screen (due to cursor movement/time
5458 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5459 * high RR.
5460 *
5461 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5462 */
a93fad0f
VK
5463void intel_edp_drrs_invalidate(struct drm_device *dev,
5464 unsigned frontbuffer_bits)
5465{
5466 struct drm_i915_private *dev_priv = dev->dev_private;
5467 struct drm_crtc *crtc;
5468 enum pipe pipe;
5469
9da7d693 5470 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5471 return;
5472
88f933a8 5473 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5474
a93fad0f 5475 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5476 if (!dev_priv->drrs.dp) {
5477 mutex_unlock(&dev_priv->drrs.mutex);
5478 return;
5479 }
5480
a93fad0f
VK
5481 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5482 pipe = to_intel_crtc(crtc)->pipe;
5483
5484 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5485 intel_dp_set_drrs_state(dev_priv->dev,
5486 dev_priv->drrs.dp->attached_connector->panel.
5487 fixed_mode->vrefresh);
5488 }
5489
5490 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5491
5492 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5493 mutex_unlock(&dev_priv->drrs.mutex);
5494}
5495
b33a2815
VK
5496/**
5497 * intel_edp_drrs_flush - Flush DRRS
5498 * @dev: DRM device
5499 * @frontbuffer_bits: frontbuffer plane tracking bits
5500 *
5501 * When there is no movement on screen, DRRS work can be scheduled.
5502 * This DRRS work is responsible for setting relevant registers after a
5503 * timeout of 1 second.
5504 *
5505 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5506 */
a93fad0f
VK
5507void intel_edp_drrs_flush(struct drm_device *dev,
5508 unsigned frontbuffer_bits)
5509{
5510 struct drm_i915_private *dev_priv = dev->dev_private;
5511 struct drm_crtc *crtc;
5512 enum pipe pipe;
5513
9da7d693 5514 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5515 return;
5516
88f933a8 5517 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5518
a93fad0f 5519 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5520 if (!dev_priv->drrs.dp) {
5521 mutex_unlock(&dev_priv->drrs.mutex);
5522 return;
5523 }
5524
a93fad0f
VK
5525 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5526 pipe = to_intel_crtc(crtc)->pipe;
5527 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5528
a93fad0f
VK
5529 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5530 !dev_priv->drrs.busy_frontbuffer_bits)
5531 schedule_delayed_work(&dev_priv->drrs.work,
5532 msecs_to_jiffies(1000));
5533 mutex_unlock(&dev_priv->drrs.mutex);
5534}
5535
b33a2815
VK
5536/**
5537 * DOC: Display Refresh Rate Switching (DRRS)
5538 *
5539 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5540 * which enables swtching between low and high refresh rates,
5541 * dynamically, based on the usage scenario. This feature is applicable
5542 * for internal panels.
5543 *
5544 * Indication that the panel supports DRRS is given by the panel EDID, which
5545 * would list multiple refresh rates for one resolution.
5546 *
5547 * DRRS is of 2 types - static and seamless.
5548 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5549 * (may appear as a blink on screen) and is used in dock-undock scenario.
5550 * Seamless DRRS involves changing RR without any visual effect to the user
5551 * and can be used during normal system usage. This is done by programming
5552 * certain registers.
5553 *
5554 * Support for static/seamless DRRS may be indicated in the VBT based on
5555 * inputs from the panel spec.
5556 *
5557 * DRRS saves power by switching to low RR based on usage scenarios.
5558 *
5559 * eDP DRRS:-
5560 * The implementation is based on frontbuffer tracking implementation.
5561 * When there is a disturbance on the screen triggered by user activity or a
5562 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5563 * When there is no movement on screen, after a timeout of 1 second, a switch
5564 * to low RR is made.
5565 * For integration with frontbuffer tracking code,
5566 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5567 *
5568 * DRRS can be further extended to support other internal panels and also
5569 * the scenario of video playback wherein RR is set based on the rate
5570 * requested by userspace.
5571 */
5572
5573/**
5574 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5575 * @intel_connector: eDP connector
5576 * @fixed_mode: preferred mode of panel
5577 *
5578 * This function is called only once at driver load to initialize basic
5579 * DRRS stuff.
5580 *
5581 * Returns:
5582 * Downclock mode if panel supports it, else return NULL.
5583 * DRRS support is determined by the presence of downclock mode (apart
5584 * from VBT setting).
5585 */
4f9db5b5 5586static struct drm_display_mode *
96178eeb
VK
5587intel_dp_drrs_init(struct intel_connector *intel_connector,
5588 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5589{
5590 struct drm_connector *connector = &intel_connector->base;
96178eeb 5591 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5592 struct drm_i915_private *dev_priv = dev->dev_private;
5593 struct drm_display_mode *downclock_mode = NULL;
5594
9da7d693
DV
5595 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5596 mutex_init(&dev_priv->drrs.mutex);
5597
4f9db5b5
PB
5598 if (INTEL_INFO(dev)->gen <= 6) {
5599 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5600 return NULL;
5601 }
5602
5603 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5604 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5605 return NULL;
5606 }
5607
5608 downclock_mode = intel_find_panel_downclock
5609 (dev, fixed_mode, connector);
5610
5611 if (!downclock_mode) {
a1d26342 5612 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5613 return NULL;
5614 }
5615
96178eeb 5616 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5617
96178eeb 5618 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5619 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5620 return downclock_mode;
5621}
5622
ed92f0b2 5623static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5624 struct intel_connector *intel_connector)
ed92f0b2
PZ
5625{
5626 struct drm_connector *connector = &intel_connector->base;
5627 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5628 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5629 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5630 struct drm_i915_private *dev_priv = dev->dev_private;
5631 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5632 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5633 bool has_dpcd;
5634 struct drm_display_mode *scan;
5635 struct edid *edid;
6517d273 5636 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5637
5638 if (!is_edp(intel_dp))
5639 return true;
5640
49e6bc51
VS
5641 pps_lock(intel_dp);
5642 intel_edp_panel_vdd_sanitize(intel_dp);
5643 pps_unlock(intel_dp);
63635217 5644
ed92f0b2 5645 /* Cache DPCD and EDID for edp. */
ed92f0b2 5646 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5647
5648 if (has_dpcd) {
5649 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5650 dev_priv->no_aux_handshake =
5651 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5652 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5653 } else {
5654 /* if this fails, presume the device is a ghost */
5655 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5656 return false;
5657 }
5658
5659 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5660 pps_lock(intel_dp);
36b5f425 5661 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5662 pps_unlock(intel_dp);
ed92f0b2 5663
060c8778 5664 mutex_lock(&dev->mode_config.mutex);
0b99836f 5665 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5666 if (edid) {
5667 if (drm_add_edid_modes(connector, edid)) {
5668 drm_mode_connector_update_edid_property(connector,
5669 edid);
5670 drm_edid_to_eld(connector, edid);
5671 } else {
5672 kfree(edid);
5673 edid = ERR_PTR(-EINVAL);
5674 }
5675 } else {
5676 edid = ERR_PTR(-ENOENT);
5677 }
5678 intel_connector->edid = edid;
5679
5680 /* prefer fixed mode from EDID if available */
5681 list_for_each_entry(scan, &connector->probed_modes, head) {
5682 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5683 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5684 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5685 intel_connector, fixed_mode);
ed92f0b2
PZ
5686 break;
5687 }
5688 }
5689
5690 /* fallback to VBT if available for eDP */
5691 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5692 fixed_mode = drm_mode_duplicate(dev,
5693 dev_priv->vbt.lfp_lvds_vbt_mode);
5694 if (fixed_mode)
5695 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5696 }
060c8778 5697 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5698
01527b31
CT
5699 if (IS_VALLEYVIEW(dev)) {
5700 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5701 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5702
5703 /*
5704 * Figure out the current pipe for the initial backlight setup.
5705 * If the current pipe isn't valid, try the PPS pipe, and if that
5706 * fails just assume pipe A.
5707 */
5708 if (IS_CHERRYVIEW(dev))
5709 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5710 else
5711 pipe = PORT_TO_PIPE(intel_dp->DP);
5712
5713 if (pipe != PIPE_A && pipe != PIPE_B)
5714 pipe = intel_dp->pps_pipe;
5715
5716 if (pipe != PIPE_A && pipe != PIPE_B)
5717 pipe = PIPE_A;
5718
5719 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5720 pipe_name(pipe));
01527b31
CT
5721 }
5722
4f9db5b5 5723 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5724 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5725 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5726
5727 return true;
5728}
5729
16c25533 5730bool
f0fec3f2
PZ
5731intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5732 struct intel_connector *intel_connector)
a4fc5ed6 5733{
f0fec3f2
PZ
5734 struct drm_connector *connector = &intel_connector->base;
5735 struct intel_dp *intel_dp = &intel_dig_port->dp;
5736 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5737 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5738 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5739 enum port port = intel_dig_port->port;
0b99836f 5740 int type;
a4fc5ed6 5741
a4a5d2f8
VS
5742 intel_dp->pps_pipe = INVALID_PIPE;
5743
ec5b01dd 5744 /* intel_dp vfuncs */
b6b5e383
DL
5745 if (INTEL_INFO(dev)->gen >= 9)
5746 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5747 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5748 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5749 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5750 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5751 else if (HAS_PCH_SPLIT(dev))
5752 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5753 else
5754 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5755
b9ca5fad
DL
5756 if (INTEL_INFO(dev)->gen >= 9)
5757 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5758 else
5759 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5760
0767935e
DV
5761 /* Preserve the current hw state. */
5762 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5763 intel_dp->attached_connector = intel_connector;
3d3dc149 5764
3b32a35b 5765 if (intel_dp_is_edp(dev, port))
b329530c 5766 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5767 else
5768 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5769
f7d24902
ID
5770 /*
5771 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5772 * for DP the encoder type can be set by the caller to
5773 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5774 */
5775 if (type == DRM_MODE_CONNECTOR_eDP)
5776 intel_encoder->type = INTEL_OUTPUT_EDP;
5777
c17ed5b5
VS
5778 /* eDP only on port B and/or C on vlv/chv */
5779 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5780 port != PORT_B && port != PORT_C))
5781 return false;
5782
e7281eab
ID
5783 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5784 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5785 port_name(port));
5786
b329530c 5787 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5788 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5789
a4fc5ed6
KP
5790 connector->interlace_allowed = true;
5791 connector->doublescan_allowed = 0;
5792
f0fec3f2 5793 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5794 edp_panel_vdd_work);
a4fc5ed6 5795
df0e9248 5796 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5797 drm_connector_register(connector);
a4fc5ed6 5798
affa9354 5799 if (HAS_DDI(dev))
bcbc889b
PZ
5800 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5801 else
5802 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5803 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5804
0b99836f 5805 /* Set up the hotplug pin. */
ab9d7c30
PZ
5806 switch (port) {
5807 case PORT_A:
1d843f9d 5808 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5809 break;
5810 case PORT_B:
1d843f9d 5811 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5812 break;
5813 case PORT_C:
1d843f9d 5814 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5815 break;
5816 case PORT_D:
1d843f9d 5817 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5818 break;
5819 default:
ad1c0b19 5820 BUG();
5eb08b69
ZW
5821 }
5822
dada1a9f 5823 if (is_edp(intel_dp)) {
773538e8 5824 pps_lock(intel_dp);
1e74a324
VS
5825 intel_dp_init_panel_power_timestamps(intel_dp);
5826 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5827 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5828 else
36b5f425 5829 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5830 pps_unlock(intel_dp);
dada1a9f 5831 }
0095e6dc 5832
9d1a1031 5833 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5834
0e32b39c 5835 /* init MST on ports that can support it */
0c9b3715
JN
5836 if (HAS_DP_MST(dev) &&
5837 (port == PORT_B || port == PORT_C || port == PORT_D))
5838 intel_dp_mst_encoder_init(intel_dig_port,
5839 intel_connector->base.base.id);
0e32b39c 5840
36b5f425 5841 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5842 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5843 if (is_edp(intel_dp)) {
5844 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5845 /*
5846 * vdd might still be enabled do to the delayed vdd off.
5847 * Make sure vdd is actually turned off here.
5848 */
773538e8 5849 pps_lock(intel_dp);
4be73780 5850 edp_panel_vdd_off_sync(intel_dp);
773538e8 5851 pps_unlock(intel_dp);
15b1d171 5852 }
34ea3d38 5853 drm_connector_unregister(connector);
b2f246a8 5854 drm_connector_cleanup(connector);
16c25533 5855 return false;
b2f246a8 5856 }
32f9d658 5857
f684960e
CW
5858 intel_dp_add_properties(intel_dp, connector);
5859
a4fc5ed6
KP
5860 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5861 * 0xd. Failure to do so will result in spurious interrupts being
5862 * generated on the port when a cable is not attached.
5863 */
5864 if (IS_G4X(dev) && !IS_GM45(dev)) {
5865 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5866 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5867 }
16c25533 5868
aa7471d2
JN
5869 i915_debugfs_connector_add(connector);
5870
16c25533 5871 return true;
a4fc5ed6 5872}
f0fec3f2
PZ
5873
5874void
5875intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5876{
13cf5504 5877 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5878 struct intel_digital_port *intel_dig_port;
5879 struct intel_encoder *intel_encoder;
5880 struct drm_encoder *encoder;
5881 struct intel_connector *intel_connector;
5882
b14c5679 5883 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5884 if (!intel_dig_port)
5885 return;
5886
08d9bc92 5887 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5888 if (!intel_connector) {
5889 kfree(intel_dig_port);
5890 return;
5891 }
5892
5893 intel_encoder = &intel_dig_port->base;
5894 encoder = &intel_encoder->base;
5895
5896 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5897 DRM_MODE_ENCODER_TMDS);
5898
5bfe2ac0 5899 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5900 intel_encoder->disable = intel_disable_dp;
00c09d70 5901 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5902 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5903 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5904 if (IS_CHERRYVIEW(dev)) {
9197c88b 5905 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5906 intel_encoder->pre_enable = chv_pre_enable_dp;
5907 intel_encoder->enable = vlv_enable_dp;
580d3811 5908 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5909 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5910 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5911 intel_encoder->pre_enable = vlv_pre_enable_dp;
5912 intel_encoder->enable = vlv_enable_dp;
49277c31 5913 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5914 } else {
ecff4f3b
JN
5915 intel_encoder->pre_enable = g4x_pre_enable_dp;
5916 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5917 if (INTEL_INFO(dev)->gen >= 5)
5918 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5919 }
f0fec3f2 5920
174edf1f 5921 intel_dig_port->port = port;
f0fec3f2
PZ
5922 intel_dig_port->dp.output_reg = output_reg;
5923
00c09d70 5924 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5925 if (IS_CHERRYVIEW(dev)) {
5926 if (port == PORT_D)
5927 intel_encoder->crtc_mask = 1 << 2;
5928 else
5929 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5930 } else {
5931 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5932 }
bc079e8b 5933 intel_encoder->cloneable = 0;
f0fec3f2 5934
13cf5504 5935 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 5936 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 5937
15b1d171
PZ
5938 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5939 drm_encoder_cleanup(encoder);
5940 kfree(intel_dig_port);
b2f246a8 5941 kfree(intel_connector);
15b1d171 5942 }
f0fec3f2 5943}
0e32b39c
DA
5944
5945void intel_dp_mst_suspend(struct drm_device *dev)
5946{
5947 struct drm_i915_private *dev_priv = dev->dev_private;
5948 int i;
5949
5950 /* disable MST */
5951 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 5952 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
5953 if (!intel_dig_port)
5954 continue;
5955
5956 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5957 if (!intel_dig_port->dp.can_mst)
5958 continue;
5959 if (intel_dig_port->dp.is_mst)
5960 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5961 }
5962 }
5963}
5964
5965void intel_dp_mst_resume(struct drm_device *dev)
5966{
5967 struct drm_i915_private *dev_priv = dev->dev_private;
5968 int i;
5969
5970 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 5971 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
5972 if (!intel_dig_port)
5973 continue;
5974 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5975 int ret;
5976
5977 if (!intel_dig_port->dp.can_mst)
5978 continue;
5979
5980 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5981 if (ret != 0) {
5982 intel_dp_check_mst_status(&intel_dig_port->dp);
5983 }
5984 }
5985 }
5986}