]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Remove special case from intel_supported_rates()
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 91
cfcb0fc9
JB
92/**
93 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94 * @intel_dp: DP struct
95 *
96 * If a CPU or PCH DP output is attached to an eDP panel, this function
97 * will return true, and false otherwise.
98 */
99static bool is_edp(struct intel_dp *intel_dp)
100{
da63a9f2
PZ
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
104}
105
68b4d824 106static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 107{
68b4d824
ID
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
111}
112
df0e9248
CW
113static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114{
fa90ecef 115 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
116}
117
ea5b213a 118static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 119static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 120static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 121static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
122static void vlv_steal_power_sequencer(struct drm_device *dev,
123 enum pipe pipe);
a4fc5ed6 124
0e32b39c 125int
ea5b213a 126intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 127{
7183dc29 128 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
06ea66b6 129 struct drm_device *dev = intel_dp->attached_connector->base.dev;
a4fc5ed6
KP
130
131 switch (max_link_bw) {
132 case DP_LINK_BW_1_62:
133 case DP_LINK_BW_2_7:
134 break;
d4eead50 135 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
8749be86
DL
136 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
137 /* WaDisableHBR2:skl */
138 max_link_bw = DP_LINK_BW_2_7;
139 else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
9bbfd20a 140 INTEL_INFO(dev)->gen >= 8) &&
06ea66b6
TP
141 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
142 max_link_bw = DP_LINK_BW_5_4;
143 else
144 max_link_bw = DP_LINK_BW_2_7;
d4eead50 145 break;
a4fc5ed6 146 default:
d4eead50
ID
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
a4fc5ed6
KP
149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153}
154
eeb6324d
PZ
155static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156{
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169}
170
cd9dde44
AJ
171/*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
a4fc5ed6 188static int
c898261c 189intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 190{
cd9dde44 191 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
192}
193
fe27d53e
DA
194static int
195intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196{
197 return (max_link_clock * max_lanes * 8) / 10;
198}
199
c19de8eb 200static enum drm_mode_status
a4fc5ed6
KP
201intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203{
df0e9248 204 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 209
dd06f90e
JN
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
212 return MODE_PANEL;
213
dd06f90e 214 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 215 return MODE_PANEL;
03afc4a2
DV
216
217 target_clock = fixed_mode->clock;
7de56f43
ZY
218 }
219
36008365 220 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
eeb6324d 221 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
c4867936 227 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
0af78a2b
DV
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
a4fc5ed6
KP
235 return MODE_OK;
236}
237
a4f1289e 238uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
239{
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248}
249
c2af70e2 250static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
251{
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257}
258
fb0f8fbf
KP
259/* hrawclock is 1/4 the FSB frequency */
260static int
261intel_hrawclk(struct drm_device *dev)
262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
9473c8f4
VP
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
fb0f8fbf
KP
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291}
292
bf13e81b
JN
293static void
294intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 295 struct intel_dp *intel_dp);
bf13e81b
JN
296static void
297intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 298 struct intel_dp *intel_dp);
bf13e81b 299
773538e8
VS
300static void pps_lock(struct intel_dp *intel_dp)
301{
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316}
317
318static void pps_unlock(struct intel_dp *intel_dp)
319{
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330}
331
961a0db0
VS
332static void
333vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334{
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 339 bool pll_enabled;
961a0db0
VS
340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
d288f65f
VS
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
961a0db0
VS
373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
961a0db0
VS
390}
391
bf13e81b
JN
392static enum pipe
393vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394{
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 400 enum pipe pipe;
bf13e81b 401
e39b999a 402 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 403
a8c3344e
VS
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
a4a5d2f8
VS
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
409
410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
a8c3344e
VS
432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
a4a5d2f8 435
a8c3344e
VS
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
36b5f425
VS
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 446
961a0db0
VS
447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
452
453 return intel_dp->pps_pipe;
454}
455
6491ab27
VS
456typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461{
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463}
464
465static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467{
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469}
470
471static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473{
474 return true;
475}
bf13e81b 476
a4a5d2f8 477static enum pipe
6491ab27
VS
478vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
a4a5d2f8
VS
481{
482 enum pipe pipe;
bf13e81b 483
bf13e81b
JN
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
6491ab27
VS
491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
a4a5d2f8 494 return pipe;
bf13e81b
JN
495 }
496
a4a5d2f8
VS
497 return INVALID_PIPE;
498}
499
500static void
501vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502{
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
6491ab27
VS
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
a4a5d2f8
VS
522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
bf13e81b
JN
528 }
529
a4a5d2f8
VS
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
36b5f425
VS
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
535}
536
773538e8
VS
537void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538{
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
bf13e81b
JN
564}
565
566static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567{
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
570 if (HAS_PCH_SPLIT(dev))
571 return PCH_PP_CONTROL;
572 else
573 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
574}
575
576static u32 _pp_stat_reg(struct intel_dp *intel_dp)
577{
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
579
580 if (HAS_PCH_SPLIT(dev))
581 return PCH_PP_STATUS;
582 else
583 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
584}
585
01527b31
CT
586/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
587 This function only applicable when panel PM state is not to be tracked */
588static int edp_notify_handler(struct notifier_block *this, unsigned long code,
589 void *unused)
590{
591 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
592 edp_notifier);
593 struct drm_device *dev = intel_dp_to_dev(intel_dp);
594 struct drm_i915_private *dev_priv = dev->dev_private;
595 u32 pp_div;
596 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
597
598 if (!is_edp(intel_dp) || code != SYS_RESTART)
599 return 0;
600
773538e8 601 pps_lock(intel_dp);
e39b999a 602
01527b31 603 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
604 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
605
01527b31
CT
606 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
607 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
608 pp_div = I915_READ(pp_div_reg);
609 pp_div &= PP_REFERENCE_DIVIDER_MASK;
610
611 /* 0x1F write to PP_DIV_REG sets max cycle delay */
612 I915_WRITE(pp_div_reg, pp_div | 0x1F);
613 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
614 msleep(intel_dp->panel_power_cycle_delay);
615 }
616
773538e8 617 pps_unlock(intel_dp);
e39b999a 618
01527b31
CT
619 return 0;
620}
621
4be73780 622static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 623{
30add22d 624 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
625 struct drm_i915_private *dev_priv = dev->dev_private;
626
e39b999a
VS
627 lockdep_assert_held(&dev_priv->pps_mutex);
628
9a42356b
VS
629 if (IS_VALLEYVIEW(dev) &&
630 intel_dp->pps_pipe == INVALID_PIPE)
631 return false;
632
bf13e81b 633 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
634}
635
4be73780 636static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 637{
30add22d 638 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
639 struct drm_i915_private *dev_priv = dev->dev_private;
640
e39b999a
VS
641 lockdep_assert_held(&dev_priv->pps_mutex);
642
9a42356b
VS
643 if (IS_VALLEYVIEW(dev) &&
644 intel_dp->pps_pipe == INVALID_PIPE)
645 return false;
646
773538e8 647 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
648}
649
9b984dae
KP
650static void
651intel_dp_check_edp(struct intel_dp *intel_dp)
652{
30add22d 653 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 654 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 655
9b984dae
KP
656 if (!is_edp(intel_dp))
657 return;
453c5420 658
4be73780 659 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
660 WARN(1, "eDP powered off while attempting aux channel communication.\n");
661 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
662 I915_READ(_pp_stat_reg(intel_dp)),
663 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
664 }
665}
666
9ee32fea
DV
667static uint32_t
668intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
669{
670 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
671 struct drm_device *dev = intel_dig_port->base.base.dev;
672 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 673 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
674 uint32_t status;
675 bool done;
676
ef04f00d 677#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 678 if (has_aux_irq)
b18ac466 679 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 680 msecs_to_jiffies_timeout(10));
9ee32fea
DV
681 else
682 done = wait_for_atomic(C, 10) == 0;
683 if (!done)
684 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
685 has_aux_irq);
686#undef C
687
688 return status;
689}
690
ec5b01dd 691static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 692{
174edf1f
PZ
693 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
694 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 695
ec5b01dd
DL
696 /*
697 * The clock divider is based off the hrawclk, and would like to run at
698 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 699 */
ec5b01dd
DL
700 return index ? 0 : intel_hrawclk(dev) / 2;
701}
702
703static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704{
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707
708 if (index)
709 return 0;
710
711 if (intel_dig_port->port == PORT_A) {
712 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 713 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 714 else
b84a1cf8 715 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
716 } else {
717 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
718 }
719}
720
721static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
722{
723 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724 struct drm_device *dev = intel_dig_port->base.base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726
727 if (intel_dig_port->port == PORT_A) {
728 if (index)
729 return 0;
730 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
731 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732 /* Workaround for non-ULT HSW */
bc86625a
CW
733 switch (index) {
734 case 0: return 63;
735 case 1: return 72;
736 default: return 0;
737 }
ec5b01dd 738 } else {
bc86625a 739 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 740 }
b84a1cf8
RV
741}
742
ec5b01dd
DL
743static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744{
745 return index ? 0 : 100;
746}
747
b6b5e383
DL
748static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
749{
750 /*
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
754 */
755 return index ? 0 : 1;
756}
757
5ed12a19
DL
758static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
759 bool has_aux_irq,
760 int send_bytes,
761 uint32_t aux_clock_divider)
762{
763 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764 struct drm_device *dev = intel_dig_port->base.base.dev;
765 uint32_t precharge, timeout;
766
767 if (IS_GEN6(dev))
768 precharge = 3;
769 else
770 precharge = 5;
771
772 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
774 else
775 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
776
777 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 778 DP_AUX_CH_CTL_DONE |
5ed12a19 779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 781 timeout |
788d4433 782 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 785 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
786}
787
b9ca5fad
DL
788static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 bool has_aux_irq,
790 int send_bytes,
791 uint32_t unused)
792{
793 return DP_AUX_CH_CTL_SEND_BUSY |
794 DP_AUX_CH_CTL_DONE |
795 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR |
797 DP_AUX_CH_CTL_TIME_OUT_1600us |
798 DP_AUX_CH_CTL_RECEIVE_ERROR |
799 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
801}
802
b84a1cf8
RV
803static int
804intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 805 const uint8_t *send, int send_bytes,
b84a1cf8
RV
806 uint8_t *recv, int recv_size)
807{
808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809 struct drm_device *dev = intel_dig_port->base.base.dev;
810 struct drm_i915_private *dev_priv = dev->dev_private;
811 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
812 uint32_t ch_data = ch_ctl + 4;
bc86625a 813 uint32_t aux_clock_divider;
b84a1cf8
RV
814 int i, ret, recv_bytes;
815 uint32_t status;
5ed12a19 816 int try, clock = 0;
4e6b788c 817 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
818 bool vdd;
819
773538e8 820 pps_lock(intel_dp);
e39b999a 821
72c3500a
VS
822 /*
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
826 * ourselves.
827 */
1e0560e0 828 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
829
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
832 * deep sleep states.
833 */
834 pm_qos_update_request(&dev_priv->pm_qos, 0);
835
836 intel_dp_check_edp(intel_dp);
5eb08b69 837
c67a470b
PZ
838 intel_aux_display_runtime_get(dev_priv);
839
11bee43e
JB
840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
ef04f00d 842 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
843 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
844 break;
845 msleep(1);
846 }
847
848 if (try == 3) {
849 WARN(1, "dp_aux_ch not started status 0x%08x\n",
850 I915_READ(ch_ctl));
9ee32fea
DV
851 ret = -EBUSY;
852 goto out;
4f7f7b7e
CW
853 }
854
46a5ae9f
PZ
855 /* Only 5 data registers! */
856 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
857 ret = -E2BIG;
858 goto out;
859 }
860
ec5b01dd 861 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
862 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
863 has_aux_irq,
864 send_bytes,
865 aux_clock_divider);
5ed12a19 866
bc86625a
CW
867 /* Must try at least 3 times according to DP spec */
868 for (try = 0; try < 5; try++) {
869 /* Load the send data into the aux channel data registers */
870 for (i = 0; i < send_bytes; i += 4)
871 I915_WRITE(ch_data + i,
a4f1289e
RV
872 intel_dp_pack_aux(send + i,
873 send_bytes - i));
bc86625a
CW
874
875 /* Send the command and wait for it to complete */
5ed12a19 876 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
877
878 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
879
880 /* Clear done status and any errors */
881 I915_WRITE(ch_ctl,
882 status |
883 DP_AUX_CH_CTL_DONE |
884 DP_AUX_CH_CTL_TIME_OUT_ERROR |
885 DP_AUX_CH_CTL_RECEIVE_ERROR);
886
887 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
888 DP_AUX_CH_CTL_RECEIVE_ERROR))
889 continue;
890 if (status & DP_AUX_CH_CTL_DONE)
891 break;
892 }
4f7f7b7e 893 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
894 break;
895 }
896
a4fc5ed6 897 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 898 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
899 ret = -EBUSY;
900 goto out;
a4fc5ed6
KP
901 }
902
903 /* Check for timeout or receive error.
904 * Timeouts occur when the sink is not connected
905 */
a5b3da54 906 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 907 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
908 ret = -EIO;
909 goto out;
a5b3da54 910 }
1ae8c0a5
KP
911
912 /* Timeouts occur when the device isn't connected, so they're
913 * "normal" -- don't fill the kernel log with these */
a5b3da54 914 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 915 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
916 ret = -ETIMEDOUT;
917 goto out;
a4fc5ed6
KP
918 }
919
920 /* Unload any bytes sent back from the other side */
921 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
922 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
923 if (recv_bytes > recv_size)
924 recv_bytes = recv_size;
0206e353 925
4f7f7b7e 926 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
927 intel_dp_unpack_aux(I915_READ(ch_data + i),
928 recv + i, recv_bytes - i);
a4fc5ed6 929
9ee32fea
DV
930 ret = recv_bytes;
931out:
932 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 933 intel_aux_display_runtime_put(dev_priv);
9ee32fea 934
884f19e9
JN
935 if (vdd)
936 edp_panel_vdd_off(intel_dp, false);
937
773538e8 938 pps_unlock(intel_dp);
e39b999a 939
9ee32fea 940 return ret;
a4fc5ed6
KP
941}
942
a6c8aff0
JN
943#define BARE_ADDRESS_SIZE 3
944#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
945static ssize_t
946intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 947{
9d1a1031
JN
948 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
949 uint8_t txbuf[20], rxbuf[20];
950 size_t txsize, rxsize;
a4fc5ed6 951 int ret;
a4fc5ed6 952
9d1a1031
JN
953 txbuf[0] = msg->request << 4;
954 txbuf[1] = msg->address >> 8;
955 txbuf[2] = msg->address & 0xff;
956 txbuf[3] = msg->size - 1;
46a5ae9f 957
9d1a1031
JN
958 switch (msg->request & ~DP_AUX_I2C_MOT) {
959 case DP_AUX_NATIVE_WRITE:
960 case DP_AUX_I2C_WRITE:
a6c8aff0 961 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 962 rxsize = 1;
f51a44b9 963
9d1a1031
JN
964 if (WARN_ON(txsize > 20))
965 return -E2BIG;
a4fc5ed6 966
9d1a1031 967 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 968
9d1a1031
JN
969 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
970 if (ret > 0) {
971 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 972
9d1a1031
JN
973 /* Return payload size. */
974 ret = msg->size;
975 }
976 break;
46a5ae9f 977
9d1a1031
JN
978 case DP_AUX_NATIVE_READ:
979 case DP_AUX_I2C_READ:
a6c8aff0 980 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 981 rxsize = msg->size + 1;
a4fc5ed6 982
9d1a1031
JN
983 if (WARN_ON(rxsize > 20))
984 return -E2BIG;
a4fc5ed6 985
9d1a1031
JN
986 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
987 if (ret > 0) {
988 msg->reply = rxbuf[0] >> 4;
989 /*
990 * Assume happy day, and copy the data. The caller is
991 * expected to check msg->reply before touching it.
992 *
993 * Return payload size.
994 */
995 ret--;
996 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 997 }
9d1a1031
JN
998 break;
999
1000 default:
1001 ret = -EINVAL;
1002 break;
a4fc5ed6 1003 }
f51a44b9 1004
9d1a1031 1005 return ret;
a4fc5ed6
KP
1006}
1007
9d1a1031
JN
1008static void
1009intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1010{
1011 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1012 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1013 enum port port = intel_dig_port->port;
0b99836f 1014 const char *name = NULL;
ab2c0672
DA
1015 int ret;
1016
33ad6626
JN
1017 switch (port) {
1018 case PORT_A:
1019 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1020 name = "DPDDC-A";
ab2c0672 1021 break;
33ad6626
JN
1022 case PORT_B:
1023 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1024 name = "DPDDC-B";
ab2c0672 1025 break;
33ad6626
JN
1026 case PORT_C:
1027 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1028 name = "DPDDC-C";
ab2c0672 1029 break;
33ad6626
JN
1030 case PORT_D:
1031 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1032 name = "DPDDC-D";
33ad6626
JN
1033 break;
1034 default:
1035 BUG();
ab2c0672
DA
1036 }
1037
1b1aad75
DL
1038 /*
1039 * The AUX_CTL register is usually DP_CTL + 0x10.
1040 *
1041 * On Haswell and Broadwell though:
1042 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1043 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1044 *
1045 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1046 */
1047 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1048 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1049
0b99836f 1050 intel_dp->aux.name = name;
9d1a1031
JN
1051 intel_dp->aux.dev = dev->dev;
1052 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1053
0b99836f
JN
1054 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1055 connector->base.kdev->kobj.name);
8316f337 1056
4f71d0cb 1057 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1058 if (ret < 0) {
4f71d0cb 1059 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1060 name, ret);
1061 return;
ab2c0672 1062 }
8a5e6aeb 1063
0b99836f
JN
1064 ret = sysfs_create_link(&connector->base.kdev->kobj,
1065 &intel_dp->aux.ddc.dev.kobj,
1066 intel_dp->aux.ddc.dev.kobj.name);
1067 if (ret < 0) {
1068 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1069 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1070 }
a4fc5ed6
KP
1071}
1072
80f65de3
ID
1073static void
1074intel_dp_connector_unregister(struct intel_connector *intel_connector)
1075{
1076 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1077
0e32b39c
DA
1078 if (!intel_connector->mst_port)
1079 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1080 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1081 intel_connector_unregister(intel_connector);
1082}
1083
5416d871 1084static void
c3346ef6 1085skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1086{
1087 u32 ctrl1;
1088
1089 pipe_config->ddi_pll_sel = SKL_DPLL0;
1090 pipe_config->dpll_hw_state.cfgcr1 = 0;
1091 pipe_config->dpll_hw_state.cfgcr2 = 0;
1092
1093 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1094 switch (link_clock / 2) {
1095 case 81000:
5416d871
DL
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1097 SKL_DPLL0);
1098 break;
c3346ef6 1099 case 135000:
5416d871
DL
1100 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1101 SKL_DPLL0);
1102 break;
c3346ef6 1103 case 270000:
5416d871
DL
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1105 SKL_DPLL0);
1106 break;
c3346ef6
SJ
1107 case 162000:
1108 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1109 SKL_DPLL0);
1110 break;
1111 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1112 results in CDCLK change. Need to handle the change of CDCLK by
1113 disabling pipes and re-enabling them */
1114 case 108000:
1115 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1116 SKL_DPLL0);
1117 break;
1118 case 216000:
1119 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1120 SKL_DPLL0);
1121 break;
1122
5416d871
DL
1123 }
1124 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1125}
1126
0e50338c 1127static void
5cec258b 1128hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1129{
1130 switch (link_bw) {
1131 case DP_LINK_BW_1_62:
1132 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1133 break;
1134 case DP_LINK_BW_2_7:
1135 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1136 break;
1137 case DP_LINK_BW_5_4:
1138 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1139 break;
1140 }
1141}
1142
fc0f8e25 1143static int
12f6a2e2 1144intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1145{
12f6a2e2
VS
1146 if (intel_dp->num_supported_rates) {
1147 *sink_rates = intel_dp->supported_rates;
ea2d8a42 1148 return intel_dp->num_supported_rates;
fc0f8e25 1149 }
12f6a2e2
VS
1150
1151 *sink_rates = default_rates;
1152
1153 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1154}
1155
a8f3ef61 1156static int
636280ba 1157intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61
SJ
1158{
1159 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a8f3ef61 1160
636280ba
VS
1161 if (INTEL_INFO(dev)->gen >= 9) {
1162 *source_rates = gen9_rates;
1163 return ARRAY_SIZE(gen9_rates);
a8f3ef61 1164 }
636280ba
VS
1165
1166 *source_rates = default_rates;
1167
1168 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
a8f3ef61
SJ
1169}
1170
c6bb3538
DV
1171static void
1172intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1173 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1174{
1175 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1176 const struct dp_link_dpll *divisor = NULL;
1177 int i, count = 0;
c6bb3538
DV
1178
1179 if (IS_G4X(dev)) {
9dd4ffdf
CML
1180 divisor = gen4_dpll;
1181 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1182 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1183 divisor = pch_dpll;
1184 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1185 } else if (IS_CHERRYVIEW(dev)) {
1186 divisor = chv_dpll;
1187 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1188 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1189 divisor = vlv_dpll;
1190 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1191 }
9dd4ffdf
CML
1192
1193 if (divisor && count) {
1194 for (i = 0; i < count; i++) {
1195 if (link_bw == divisor[i].link_bw) {
1196 pipe_config->dpll = divisor[i].dpll;
1197 pipe_config->clock_set = true;
1198 break;
1199 }
1200 }
c6bb3538
DV
1201 }
1202}
1203
f4896f15
VS
1204static int intel_supported_rates(const int *source_rates, int source_len,
1205 const int *sink_rates, int sink_len,
1206 int *supported_rates)
a8f3ef61
SJ
1207{
1208 int i = 0, j = 0, k = 0;
1209
a8f3ef61
SJ
1210 while (i < source_len && j < sink_len) {
1211 if (source_rates[i] == sink_rates[j]) {
1212 supported_rates[k] = source_rates[i];
1213 ++k;
1214 ++i;
1215 ++j;
1216 } else if (source_rates[i] < sink_rates[j]) {
1217 ++i;
1218 } else {
1219 ++j;
1220 }
1221 }
1222 return k;
1223}
1224
f4896f15 1225static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1226{
1227 int i = 0;
1228
1229 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1230 if (find == rates[i])
1231 break;
1232
1233 return i;
1234}
1235
00c09d70 1236bool
5bfe2ac0 1237intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1238 struct intel_crtc_state *pipe_config)
a4fc5ed6 1239{
5bfe2ac0 1240 struct drm_device *dev = encoder->base.dev;
36008365 1241 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1242 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1243 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1244 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1245 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1246 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1247 int lane_count, clock;
56071a20 1248 int min_lane_count = 1;
eeb6324d 1249 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1250 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1251 int min_clock = 0;
a8f3ef61 1252 int max_clock;
083f9560 1253 int bpp, mode_rate;
ff9a6750 1254 int link_avail, link_clock;
12f6a2e2 1255 const int *sink_rates;
f4896f15 1256 int supported_rates[8] = {0};
636280ba 1257 const int *source_rates;
a8f3ef61
SJ
1258 int source_len, sink_len, supported_len;
1259
12f6a2e2 1260 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
a8f3ef61 1261
636280ba 1262 source_len = intel_dp_source_rates(intel_dp, &source_rates);
a8f3ef61
SJ
1263
1264 supported_len = intel_supported_rates(source_rates, source_len,
1265 sink_rates, sink_len, supported_rates);
1266
1267 /* No common link rates between source and sink */
1268 WARN_ON(supported_len <= 0);
1269
1270 max_clock = supported_len - 1;
a4fc5ed6 1271
bc7d38a4 1272 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1273 pipe_config->has_pch_encoder = true;
1274
03afc4a2 1275 pipe_config->has_dp_encoder = true;
f769cd24 1276 pipe_config->has_drrs = false;
9ed109a7 1277 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1278
dd06f90e
JN
1279 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1280 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1281 adjusted_mode);
2dd24552
JB
1282 if (!HAS_PCH_SPLIT(dev))
1283 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1284 intel_connector->panel.fitting_mode);
1285 else
b074cec8
JB
1286 intel_pch_panel_fitting(intel_crtc, pipe_config,
1287 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1288 }
1289
cb1793ce 1290 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1291 return false;
1292
083f9560 1293 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61
SJ
1294 "max bw %d pixel clock %iKHz\n",
1295 max_lane_count, supported_rates[max_clock],
241bfc38 1296 adjusted_mode->crtc_clock);
083f9560 1297
36008365
DV
1298 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1299 * bpc in between. */
3e7ca985 1300 bpp = pipe_config->pipe_bpp;
56071a20
JN
1301 if (is_edp(intel_dp)) {
1302 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1303 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1304 dev_priv->vbt.edp_bpp);
1305 bpp = dev_priv->vbt.edp_bpp;
1306 }
1307
344c5bbc
JN
1308 /*
1309 * Use the maximum clock and number of lanes the eDP panel
1310 * advertizes being capable of. The panels are generally
1311 * designed to support only a single clock and lane
1312 * configuration, and typically these values correspond to the
1313 * native resolution of the panel.
1314 */
1315 min_lane_count = max_lane_count;
1316 min_clock = max_clock;
7984211e 1317 }
657445fe 1318
36008365 1319 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1320 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1321 bpp);
36008365 1322
c6930992 1323 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1324 for (lane_count = min_lane_count;
1325 lane_count <= max_lane_count;
1326 lane_count <<= 1) {
1327
1328 link_clock = supported_rates[clock];
36008365
DV
1329 link_avail = intel_dp_max_data_rate(link_clock,
1330 lane_count);
1331
1332 if (mode_rate <= link_avail) {
1333 goto found;
1334 }
1335 }
1336 }
1337 }
c4867936 1338
36008365 1339 return false;
3685a8f3 1340
36008365 1341found:
55bc60db
VS
1342 if (intel_dp->color_range_auto) {
1343 /*
1344 * See:
1345 * CEA-861-E - 5.1 Default Encoding Parameters
1346 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1347 */
18316c8c 1348 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1349 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1350 else
1351 intel_dp->color_range = 0;
1352 }
1353
3685a8f3 1354 if (intel_dp->color_range)
50f3b016 1355 pipe_config->limited_color_range = true;
a4fc5ed6 1356
36008365 1357 intel_dp->lane_count = lane_count;
a8f3ef61
SJ
1358
1359 intel_dp->link_bw =
1360 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1361
1362 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1363 intel_dp->rate_select =
1364 rate_to_index(supported_rates[clock], sink_rates);
1365 intel_dp->link_bw = 0;
1366 }
1367
657445fe 1368 pipe_config->pipe_bpp = bpp;
a8f3ef61 1369 pipe_config->port_clock = supported_rates[clock];
a4fc5ed6 1370
36008365
DV
1371 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1372 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1373 pipe_config->port_clock, bpp);
36008365
DV
1374 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1375 mode_rate, link_avail);
a4fc5ed6 1376
03afc4a2 1377 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1378 adjusted_mode->crtc_clock,
1379 pipe_config->port_clock,
03afc4a2 1380 &pipe_config->dp_m_n);
9d1a455b 1381
439d7ac0 1382 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1383 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1384 pipe_config->has_drrs = true;
439d7ac0
PB
1385 intel_link_compute_m_n(bpp, lane_count,
1386 intel_connector->panel.downclock_mode->clock,
1387 pipe_config->port_clock,
1388 &pipe_config->dp_m2_n2);
1389 }
1390
5416d871 1391 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
c3346ef6 1392 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
5416d871 1393 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1394 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1395 else
1396 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1397
03afc4a2 1398 return true;
a4fc5ed6
KP
1399}
1400
7c62a164 1401static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1402{
7c62a164
DV
1403 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1404 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1405 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1406 struct drm_i915_private *dev_priv = dev->dev_private;
1407 u32 dpa_ctl;
1408
6e3c9717
ACO
1409 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1410 crtc->config->port_clock);
ea9b6006
DV
1411 dpa_ctl = I915_READ(DP_A);
1412 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1413
6e3c9717 1414 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1415 /* For a long time we've carried around a ILK-DevA w/a for the
1416 * 160MHz clock. If we're really unlucky, it's still required.
1417 */
1418 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1419 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1420 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1421 } else {
1422 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1423 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1424 }
1ce17038 1425
ea9b6006
DV
1426 I915_WRITE(DP_A, dpa_ctl);
1427
1428 POSTING_READ(DP_A);
1429 udelay(500);
1430}
1431
8ac33ed3 1432static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1433{
b934223d 1434 struct drm_device *dev = encoder->base.dev;
417e822d 1435 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1436 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1437 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1438 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1439 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1440
417e822d 1441 /*
1a2eb460 1442 * There are four kinds of DP registers:
417e822d
KP
1443 *
1444 * IBX PCH
1a2eb460
KP
1445 * SNB CPU
1446 * IVB CPU
417e822d
KP
1447 * CPT PCH
1448 *
1449 * IBX PCH and CPU are the same for almost everything,
1450 * except that the CPU DP PLL is configured in this
1451 * register
1452 *
1453 * CPT PCH is quite different, having many bits moved
1454 * to the TRANS_DP_CTL register instead. That
1455 * configuration happens (oddly) in ironlake_pch_enable
1456 */
9c9e7927 1457
417e822d
KP
1458 /* Preserve the BIOS-computed detected bit. This is
1459 * supposed to be read-only.
1460 */
1461 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1462
417e822d 1463 /* Handle DP bits in common between all three register formats */
417e822d 1464 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1465 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1466
6e3c9717 1467 if (crtc->config->has_audio)
ea5b213a 1468 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1469
417e822d 1470 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1471
bc7d38a4 1472 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1473 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1474 intel_dp->DP |= DP_SYNC_HS_HIGH;
1475 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1476 intel_dp->DP |= DP_SYNC_VS_HIGH;
1477 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1478
6aba5b6c 1479 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1480 intel_dp->DP |= DP_ENHANCED_FRAMING;
1481
7c62a164 1482 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1483 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1484 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1485 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1486
1487 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1488 intel_dp->DP |= DP_SYNC_HS_HIGH;
1489 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1490 intel_dp->DP |= DP_SYNC_VS_HIGH;
1491 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1492
6aba5b6c 1493 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1494 intel_dp->DP |= DP_ENHANCED_FRAMING;
1495
44f37d1f
CML
1496 if (!IS_CHERRYVIEW(dev)) {
1497 if (crtc->pipe == 1)
1498 intel_dp->DP |= DP_PIPEB_SELECT;
1499 } else {
1500 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1501 }
417e822d
KP
1502 } else {
1503 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1504 }
a4fc5ed6
KP
1505}
1506
ffd6749d
PZ
1507#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1508#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1509
1a5ef5b7
PZ
1510#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1511#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1512
ffd6749d
PZ
1513#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1514#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1515
4be73780 1516static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1517 u32 mask,
1518 u32 value)
bd943159 1519{
30add22d 1520 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1521 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1522 u32 pp_stat_reg, pp_ctrl_reg;
1523
e39b999a
VS
1524 lockdep_assert_held(&dev_priv->pps_mutex);
1525
bf13e81b
JN
1526 pp_stat_reg = _pp_stat_reg(intel_dp);
1527 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1528
99ea7127 1529 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1530 mask, value,
1531 I915_READ(pp_stat_reg),
1532 I915_READ(pp_ctrl_reg));
32ce697c 1533
453c5420 1534 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1535 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1536 I915_READ(pp_stat_reg),
1537 I915_READ(pp_ctrl_reg));
32ce697c 1538 }
54c136d4
CW
1539
1540 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1541}
32ce697c 1542
4be73780 1543static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1544{
1545 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1546 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1547}
1548
4be73780 1549static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1550{
1551 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1552 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1553}
1554
4be73780 1555static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1556{
1557 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1558
1559 /* When we disable the VDD override bit last we have to do the manual
1560 * wait. */
1561 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1562 intel_dp->panel_power_cycle_delay);
1563
4be73780 1564 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1565}
1566
4be73780 1567static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1568{
1569 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1570 intel_dp->backlight_on_delay);
1571}
1572
4be73780 1573static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1574{
1575 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1576 intel_dp->backlight_off_delay);
1577}
99ea7127 1578
832dd3c1
KP
1579/* Read the current pp_control value, unlocking the register if it
1580 * is locked
1581 */
1582
453c5420 1583static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1584{
453c5420
JB
1585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1586 struct drm_i915_private *dev_priv = dev->dev_private;
1587 u32 control;
832dd3c1 1588
e39b999a
VS
1589 lockdep_assert_held(&dev_priv->pps_mutex);
1590
bf13e81b 1591 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1592 control &= ~PANEL_UNLOCK_MASK;
1593 control |= PANEL_UNLOCK_REGS;
1594 return control;
bd943159
KP
1595}
1596
951468f3
VS
1597/*
1598 * Must be paired with edp_panel_vdd_off().
1599 * Must hold pps_mutex around the whole on/off sequence.
1600 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1601 */
1e0560e0 1602static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1603{
30add22d 1604 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1605 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1606 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1607 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1608 enum intel_display_power_domain power_domain;
5d613501 1609 u32 pp;
453c5420 1610 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1611 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1612
e39b999a
VS
1613 lockdep_assert_held(&dev_priv->pps_mutex);
1614
97af61f5 1615 if (!is_edp(intel_dp))
adddaaf4 1616 return false;
bd943159 1617
2c623c11 1618 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1619 intel_dp->want_panel_vdd = true;
99ea7127 1620
4be73780 1621 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1622 return need_to_disable;
b0665d57 1623
4e6e1a54
ID
1624 power_domain = intel_display_port_power_domain(intel_encoder);
1625 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1626
3936fcf4
VS
1627 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1628 port_name(intel_dig_port->port));
bd943159 1629
4be73780
DV
1630 if (!edp_have_panel_power(intel_dp))
1631 wait_panel_power_cycle(intel_dp);
99ea7127 1632
453c5420 1633 pp = ironlake_get_pp_control(intel_dp);
5d613501 1634 pp |= EDP_FORCE_VDD;
ebf33b18 1635
bf13e81b
JN
1636 pp_stat_reg = _pp_stat_reg(intel_dp);
1637 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1638
1639 I915_WRITE(pp_ctrl_reg, pp);
1640 POSTING_READ(pp_ctrl_reg);
1641 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1642 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1643 /*
1644 * If the panel wasn't on, delay before accessing aux channel
1645 */
4be73780 1646 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1647 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1648 port_name(intel_dig_port->port));
f01eca2e 1649 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1650 }
adddaaf4
JN
1651
1652 return need_to_disable;
1653}
1654
951468f3
VS
1655/*
1656 * Must be paired with intel_edp_panel_vdd_off() or
1657 * intel_edp_panel_off().
1658 * Nested calls to these functions are not allowed since
1659 * we drop the lock. Caller must use some higher level
1660 * locking to prevent nested calls from other threads.
1661 */
b80d6c78 1662void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1663{
c695b6b6 1664 bool vdd;
adddaaf4 1665
c695b6b6
VS
1666 if (!is_edp(intel_dp))
1667 return;
1668
773538e8 1669 pps_lock(intel_dp);
c695b6b6 1670 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1671 pps_unlock(intel_dp);
c695b6b6 1672
e2c719b7 1673 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1674 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1675}
1676
4be73780 1677static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1678{
30add22d 1679 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1680 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1681 struct intel_digital_port *intel_dig_port =
1682 dp_to_dig_port(intel_dp);
1683 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1684 enum intel_display_power_domain power_domain;
5d613501 1685 u32 pp;
453c5420 1686 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1687
e39b999a 1688 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1689
15e899a0 1690 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1691
15e899a0 1692 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1693 return;
b0665d57 1694
3936fcf4
VS
1695 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1696 port_name(intel_dig_port->port));
bd943159 1697
be2c9196
VS
1698 pp = ironlake_get_pp_control(intel_dp);
1699 pp &= ~EDP_FORCE_VDD;
453c5420 1700
be2c9196
VS
1701 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1702 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1703
be2c9196
VS
1704 I915_WRITE(pp_ctrl_reg, pp);
1705 POSTING_READ(pp_ctrl_reg);
90791a5c 1706
be2c9196
VS
1707 /* Make sure sequencer is idle before allowing subsequent activity */
1708 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1709 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1710
be2c9196
VS
1711 if ((pp & POWER_TARGET_ON) == 0)
1712 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1713
be2c9196
VS
1714 power_domain = intel_display_port_power_domain(intel_encoder);
1715 intel_display_power_put(dev_priv, power_domain);
bd943159 1716}
5d613501 1717
4be73780 1718static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1719{
1720 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1721 struct intel_dp, panel_vdd_work);
bd943159 1722
773538e8 1723 pps_lock(intel_dp);
15e899a0
VS
1724 if (!intel_dp->want_panel_vdd)
1725 edp_panel_vdd_off_sync(intel_dp);
773538e8 1726 pps_unlock(intel_dp);
bd943159
KP
1727}
1728
aba86890
ID
1729static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1730{
1731 unsigned long delay;
1732
1733 /*
1734 * Queue the timer to fire a long time from now (relative to the power
1735 * down delay) to keep the panel power up across a sequence of
1736 * operations.
1737 */
1738 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1739 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1740}
1741
951468f3
VS
1742/*
1743 * Must be paired with edp_panel_vdd_on().
1744 * Must hold pps_mutex around the whole on/off sequence.
1745 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1746 */
4be73780 1747static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1748{
e39b999a
VS
1749 struct drm_i915_private *dev_priv =
1750 intel_dp_to_dev(intel_dp)->dev_private;
1751
1752 lockdep_assert_held(&dev_priv->pps_mutex);
1753
97af61f5
KP
1754 if (!is_edp(intel_dp))
1755 return;
5d613501 1756
e2c719b7 1757 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1758 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1759
bd943159
KP
1760 intel_dp->want_panel_vdd = false;
1761
aba86890 1762 if (sync)
4be73780 1763 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1764 else
1765 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1766}
1767
9f0fb5be 1768static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1769{
30add22d 1770 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1771 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1772 u32 pp;
453c5420 1773 u32 pp_ctrl_reg;
9934c132 1774
9f0fb5be
VS
1775 lockdep_assert_held(&dev_priv->pps_mutex);
1776
97af61f5 1777 if (!is_edp(intel_dp))
bd943159 1778 return;
99ea7127 1779
3936fcf4
VS
1780 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1781 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1782
e7a89ace
VS
1783 if (WARN(edp_have_panel_power(intel_dp),
1784 "eDP port %c panel power already on\n",
1785 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1786 return;
9934c132 1787
4be73780 1788 wait_panel_power_cycle(intel_dp);
37c6c9b0 1789
bf13e81b 1790 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1791 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1792 if (IS_GEN5(dev)) {
1793 /* ILK workaround: disable reset around power sequence */
1794 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1795 I915_WRITE(pp_ctrl_reg, pp);
1796 POSTING_READ(pp_ctrl_reg);
05ce1a49 1797 }
37c6c9b0 1798
1c0ae80a 1799 pp |= POWER_TARGET_ON;
99ea7127
KP
1800 if (!IS_GEN5(dev))
1801 pp |= PANEL_POWER_RESET;
1802
453c5420
JB
1803 I915_WRITE(pp_ctrl_reg, pp);
1804 POSTING_READ(pp_ctrl_reg);
9934c132 1805
4be73780 1806 wait_panel_on(intel_dp);
dce56b3c 1807 intel_dp->last_power_on = jiffies;
9934c132 1808
05ce1a49
KP
1809 if (IS_GEN5(dev)) {
1810 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1811 I915_WRITE(pp_ctrl_reg, pp);
1812 POSTING_READ(pp_ctrl_reg);
05ce1a49 1813 }
9f0fb5be 1814}
e39b999a 1815
9f0fb5be
VS
1816void intel_edp_panel_on(struct intel_dp *intel_dp)
1817{
1818 if (!is_edp(intel_dp))
1819 return;
1820
1821 pps_lock(intel_dp);
1822 edp_panel_on(intel_dp);
773538e8 1823 pps_unlock(intel_dp);
9934c132
JB
1824}
1825
9f0fb5be
VS
1826
1827static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1828{
4e6e1a54
ID
1829 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1830 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1831 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1832 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1833 enum intel_display_power_domain power_domain;
99ea7127 1834 u32 pp;
453c5420 1835 u32 pp_ctrl_reg;
9934c132 1836
9f0fb5be
VS
1837 lockdep_assert_held(&dev_priv->pps_mutex);
1838
97af61f5
KP
1839 if (!is_edp(intel_dp))
1840 return;
37c6c9b0 1841
3936fcf4
VS
1842 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1843 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1844
3936fcf4
VS
1845 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1846 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1847
453c5420 1848 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1849 /* We need to switch off panel power _and_ force vdd, for otherwise some
1850 * panels get very unhappy and cease to work. */
b3064154
PJ
1851 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1852 EDP_BLC_ENABLE);
453c5420 1853
bf13e81b 1854 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1855
849e39f5
PZ
1856 intel_dp->want_panel_vdd = false;
1857
453c5420
JB
1858 I915_WRITE(pp_ctrl_reg, pp);
1859 POSTING_READ(pp_ctrl_reg);
9934c132 1860
dce56b3c 1861 intel_dp->last_power_cycle = jiffies;
4be73780 1862 wait_panel_off(intel_dp);
849e39f5
PZ
1863
1864 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1865 power_domain = intel_display_port_power_domain(intel_encoder);
1866 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1867}
e39b999a 1868
9f0fb5be
VS
1869void intel_edp_panel_off(struct intel_dp *intel_dp)
1870{
1871 if (!is_edp(intel_dp))
1872 return;
e39b999a 1873
9f0fb5be
VS
1874 pps_lock(intel_dp);
1875 edp_panel_off(intel_dp);
773538e8 1876 pps_unlock(intel_dp);
9934c132
JB
1877}
1878
1250d107
JN
1879/* Enable backlight in the panel power control. */
1880static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1881{
da63a9f2
PZ
1882 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1883 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1884 struct drm_i915_private *dev_priv = dev->dev_private;
1885 u32 pp;
453c5420 1886 u32 pp_ctrl_reg;
32f9d658 1887
01cb9ea6
JB
1888 /*
1889 * If we enable the backlight right away following a panel power
1890 * on, we may see slight flicker as the panel syncs with the eDP
1891 * link. So delay a bit to make sure the image is solid before
1892 * allowing it to appear.
1893 */
4be73780 1894 wait_backlight_on(intel_dp);
e39b999a 1895
773538e8 1896 pps_lock(intel_dp);
e39b999a 1897
453c5420 1898 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1899 pp |= EDP_BLC_ENABLE;
453c5420 1900
bf13e81b 1901 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1902
1903 I915_WRITE(pp_ctrl_reg, pp);
1904 POSTING_READ(pp_ctrl_reg);
e39b999a 1905
773538e8 1906 pps_unlock(intel_dp);
32f9d658
ZW
1907}
1908
1250d107
JN
1909/* Enable backlight PWM and backlight PP control. */
1910void intel_edp_backlight_on(struct intel_dp *intel_dp)
1911{
1912 if (!is_edp(intel_dp))
1913 return;
1914
1915 DRM_DEBUG_KMS("\n");
1916
1917 intel_panel_enable_backlight(intel_dp->attached_connector);
1918 _intel_edp_backlight_on(intel_dp);
1919}
1920
1921/* Disable backlight in the panel power control. */
1922static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1923{
30add22d 1924 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1925 struct drm_i915_private *dev_priv = dev->dev_private;
1926 u32 pp;
453c5420 1927 u32 pp_ctrl_reg;
32f9d658 1928
f01eca2e
KP
1929 if (!is_edp(intel_dp))
1930 return;
1931
773538e8 1932 pps_lock(intel_dp);
e39b999a 1933
453c5420 1934 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1935 pp &= ~EDP_BLC_ENABLE;
453c5420 1936
bf13e81b 1937 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1938
1939 I915_WRITE(pp_ctrl_reg, pp);
1940 POSTING_READ(pp_ctrl_reg);
f7d2323c 1941
773538e8 1942 pps_unlock(intel_dp);
e39b999a
VS
1943
1944 intel_dp->last_backlight_off = jiffies;
f7d2323c 1945 edp_wait_backlight_off(intel_dp);
1250d107 1946}
f7d2323c 1947
1250d107
JN
1948/* Disable backlight PP control and backlight PWM. */
1949void intel_edp_backlight_off(struct intel_dp *intel_dp)
1950{
1951 if (!is_edp(intel_dp))
1952 return;
1953
1954 DRM_DEBUG_KMS("\n");
f7d2323c 1955
1250d107 1956 _intel_edp_backlight_off(intel_dp);
f7d2323c 1957 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 1958}
a4fc5ed6 1959
73580fb7
JN
1960/*
1961 * Hook for controlling the panel power control backlight through the bl_power
1962 * sysfs attribute. Take care to handle multiple calls.
1963 */
1964static void intel_edp_backlight_power(struct intel_connector *connector,
1965 bool enable)
1966{
1967 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
1968 bool is_enabled;
1969
773538e8 1970 pps_lock(intel_dp);
e39b999a 1971 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 1972 pps_unlock(intel_dp);
73580fb7
JN
1973
1974 if (is_enabled == enable)
1975 return;
1976
23ba9373
JN
1977 DRM_DEBUG_KMS("panel power control backlight %s\n",
1978 enable ? "enable" : "disable");
73580fb7
JN
1979
1980 if (enable)
1981 _intel_edp_backlight_on(intel_dp);
1982 else
1983 _intel_edp_backlight_off(intel_dp);
1984}
1985
2bd2ad64 1986static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 1987{
da63a9f2
PZ
1988 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1989 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1990 struct drm_device *dev = crtc->dev;
d240f20f
JB
1991 struct drm_i915_private *dev_priv = dev->dev_private;
1992 u32 dpa_ctl;
1993
2bd2ad64
DV
1994 assert_pipe_disabled(dev_priv,
1995 to_intel_crtc(crtc)->pipe);
1996
d240f20f
JB
1997 DRM_DEBUG_KMS("\n");
1998 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1999 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2000 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2001
2002 /* We don't adjust intel_dp->DP while tearing down the link, to
2003 * facilitate link retraining (e.g. after hotplug). Hence clear all
2004 * enable bits here to ensure that we don't enable too much. */
2005 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2006 intel_dp->DP |= DP_PLL_ENABLE;
2007 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2008 POSTING_READ(DP_A);
2009 udelay(200);
d240f20f
JB
2010}
2011
2bd2ad64 2012static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2013{
da63a9f2
PZ
2014 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2015 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2016 struct drm_device *dev = crtc->dev;
d240f20f
JB
2017 struct drm_i915_private *dev_priv = dev->dev_private;
2018 u32 dpa_ctl;
2019
2bd2ad64
DV
2020 assert_pipe_disabled(dev_priv,
2021 to_intel_crtc(crtc)->pipe);
2022
d240f20f 2023 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2024 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2025 "dp pll off, should be on\n");
2026 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2027
2028 /* We can't rely on the value tracked for the DP register in
2029 * intel_dp->DP because link_down must not change that (otherwise link
2030 * re-training will fail. */
298b0b39 2031 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2032 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2033 POSTING_READ(DP_A);
d240f20f
JB
2034 udelay(200);
2035}
2036
c7ad3810 2037/* If the sink supports it, try to set the power state appropriately */
c19b0669 2038void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2039{
2040 int ret, i;
2041
2042 /* Should have a valid DPCD by this point */
2043 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2044 return;
2045
2046 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2047 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2048 DP_SET_POWER_D3);
c7ad3810
JB
2049 } else {
2050 /*
2051 * When turning on, we need to retry for 1ms to give the sink
2052 * time to wake up.
2053 */
2054 for (i = 0; i < 3; i++) {
9d1a1031
JN
2055 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2056 DP_SET_POWER_D0);
c7ad3810
JB
2057 if (ret == 1)
2058 break;
2059 msleep(1);
2060 }
2061 }
f9cac721
JN
2062
2063 if (ret != 1)
2064 DRM_DEBUG_KMS("failed to %s sink power state\n",
2065 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2066}
2067
19d8fe15
DV
2068static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2069 enum pipe *pipe)
d240f20f 2070{
19d8fe15 2071 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2072 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2073 struct drm_device *dev = encoder->base.dev;
2074 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2075 enum intel_display_power_domain power_domain;
2076 u32 tmp;
2077
2078 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2079 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2080 return false;
2081
2082 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2083
2084 if (!(tmp & DP_PORT_EN))
2085 return false;
2086
bc7d38a4 2087 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2088 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2089 } else if (IS_CHERRYVIEW(dev)) {
2090 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2091 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2092 *pipe = PORT_TO_PIPE(tmp);
2093 } else {
2094 u32 trans_sel;
2095 u32 trans_dp;
2096 int i;
2097
2098 switch (intel_dp->output_reg) {
2099 case PCH_DP_B:
2100 trans_sel = TRANS_DP_PORT_SEL_B;
2101 break;
2102 case PCH_DP_C:
2103 trans_sel = TRANS_DP_PORT_SEL_C;
2104 break;
2105 case PCH_DP_D:
2106 trans_sel = TRANS_DP_PORT_SEL_D;
2107 break;
2108 default:
2109 return true;
2110 }
2111
055e393f 2112 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2113 trans_dp = I915_READ(TRANS_DP_CTL(i));
2114 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2115 *pipe = i;
2116 return true;
2117 }
2118 }
19d8fe15 2119
4a0833ec
DV
2120 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2121 intel_dp->output_reg);
2122 }
d240f20f 2123
19d8fe15
DV
2124 return true;
2125}
d240f20f 2126
045ac3b5 2127static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2128 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2129{
2130 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2131 u32 tmp, flags = 0;
63000ef6
XZ
2132 struct drm_device *dev = encoder->base.dev;
2133 struct drm_i915_private *dev_priv = dev->dev_private;
2134 enum port port = dp_to_dig_port(intel_dp)->port;
2135 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2136 int dotclock;
045ac3b5 2137
9ed109a7
DV
2138 tmp = I915_READ(intel_dp->output_reg);
2139 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2140 pipe_config->has_audio = true;
2141
63000ef6 2142 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2143 if (tmp & DP_SYNC_HS_HIGH)
2144 flags |= DRM_MODE_FLAG_PHSYNC;
2145 else
2146 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2147
63000ef6
XZ
2148 if (tmp & DP_SYNC_VS_HIGH)
2149 flags |= DRM_MODE_FLAG_PVSYNC;
2150 else
2151 flags |= DRM_MODE_FLAG_NVSYNC;
2152 } else {
2153 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2154 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2155 flags |= DRM_MODE_FLAG_PHSYNC;
2156 else
2157 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2158
63000ef6
XZ
2159 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2160 flags |= DRM_MODE_FLAG_PVSYNC;
2161 else
2162 flags |= DRM_MODE_FLAG_NVSYNC;
2163 }
045ac3b5 2164
2d112de7 2165 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2166
8c875fca
VS
2167 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2168 tmp & DP_COLOR_RANGE_16_235)
2169 pipe_config->limited_color_range = true;
2170
eb14cb74
VS
2171 pipe_config->has_dp_encoder = true;
2172
2173 intel_dp_get_m_n(crtc, pipe_config);
2174
18442d08 2175 if (port == PORT_A) {
f1f644dc
JB
2176 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2177 pipe_config->port_clock = 162000;
2178 else
2179 pipe_config->port_clock = 270000;
2180 }
18442d08
VS
2181
2182 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2183 &pipe_config->dp_m_n);
2184
2185 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2186 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2187
2d112de7 2188 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2189
c6cd2ee2
JN
2190 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2191 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2192 /*
2193 * This is a big fat ugly hack.
2194 *
2195 * Some machines in UEFI boot mode provide us a VBT that has 18
2196 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2197 * unknown we fail to light up. Yet the same BIOS boots up with
2198 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2199 * max, not what it tells us to use.
2200 *
2201 * Note: This will still be broken if the eDP panel is not lit
2202 * up by the BIOS, and thus we can't get the mode at module
2203 * load.
2204 */
2205 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2206 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2207 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2208 }
045ac3b5
JB
2209}
2210
e8cb4558 2211static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2212{
e8cb4558 2213 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2214 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2215 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2216
6e3c9717 2217 if (crtc->config->has_audio)
495a5bb8 2218 intel_audio_codec_disable(encoder);
6cb49835 2219
b32c6f48
RV
2220 if (HAS_PSR(dev) && !HAS_DDI(dev))
2221 intel_psr_disable(intel_dp);
2222
6cb49835
DV
2223 /* Make sure the panel is off before trying to change the mode. But also
2224 * ensure that we have vdd while we switch off the panel. */
24f3e092 2225 intel_edp_panel_vdd_on(intel_dp);
4be73780 2226 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2227 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2228 intel_edp_panel_off(intel_dp);
3739850b 2229
08aff3fe
VS
2230 /* disable the port before the pipe on g4x */
2231 if (INTEL_INFO(dev)->gen < 5)
3739850b 2232 intel_dp_link_down(intel_dp);
d240f20f
JB
2233}
2234
08aff3fe 2235static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2236{
2bd2ad64 2237 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2238 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2239
49277c31 2240 intel_dp_link_down(intel_dp);
08aff3fe
VS
2241 if (port == PORT_A)
2242 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2243}
2244
2245static void vlv_post_disable_dp(struct intel_encoder *encoder)
2246{
2247 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2248
2249 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2250}
2251
580d3811
VS
2252static void chv_post_disable_dp(struct intel_encoder *encoder)
2253{
2254 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2255 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2256 struct drm_device *dev = encoder->base.dev;
2257 struct drm_i915_private *dev_priv = dev->dev_private;
2258 struct intel_crtc *intel_crtc =
2259 to_intel_crtc(encoder->base.crtc);
2260 enum dpio_channel ch = vlv_dport_to_channel(dport);
2261 enum pipe pipe = intel_crtc->pipe;
2262 u32 val;
2263
2264 intel_dp_link_down(intel_dp);
2265
2266 mutex_lock(&dev_priv->dpio_lock);
2267
2268 /* Propagate soft reset to data lane reset */
97fd4d5c 2269 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2270 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2271 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2272
97fd4d5c
VS
2273 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2274 val |= CHV_PCS_REQ_SOFTRESET_EN;
2275 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2276
2277 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2278 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2279 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2280
2281 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2282 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2283 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2284
2285 mutex_unlock(&dev_priv->dpio_lock);
2286}
2287
7b13b58a
VS
2288static void
2289_intel_dp_set_link_train(struct intel_dp *intel_dp,
2290 uint32_t *DP,
2291 uint8_t dp_train_pat)
2292{
2293 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2294 struct drm_device *dev = intel_dig_port->base.base.dev;
2295 struct drm_i915_private *dev_priv = dev->dev_private;
2296 enum port port = intel_dig_port->port;
2297
2298 if (HAS_DDI(dev)) {
2299 uint32_t temp = I915_READ(DP_TP_CTL(port));
2300
2301 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2302 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2303 else
2304 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2305
2306 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2307 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2308 case DP_TRAINING_PATTERN_DISABLE:
2309 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2310
2311 break;
2312 case DP_TRAINING_PATTERN_1:
2313 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2314 break;
2315 case DP_TRAINING_PATTERN_2:
2316 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2317 break;
2318 case DP_TRAINING_PATTERN_3:
2319 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2320 break;
2321 }
2322 I915_WRITE(DP_TP_CTL(port), temp);
2323
2324 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2325 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2326
2327 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2328 case DP_TRAINING_PATTERN_DISABLE:
2329 *DP |= DP_LINK_TRAIN_OFF_CPT;
2330 break;
2331 case DP_TRAINING_PATTERN_1:
2332 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2333 break;
2334 case DP_TRAINING_PATTERN_2:
2335 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2336 break;
2337 case DP_TRAINING_PATTERN_3:
2338 DRM_ERROR("DP training pattern 3 not supported\n");
2339 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2340 break;
2341 }
2342
2343 } else {
2344 if (IS_CHERRYVIEW(dev))
2345 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2346 else
2347 *DP &= ~DP_LINK_TRAIN_MASK;
2348
2349 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2350 case DP_TRAINING_PATTERN_DISABLE:
2351 *DP |= DP_LINK_TRAIN_OFF;
2352 break;
2353 case DP_TRAINING_PATTERN_1:
2354 *DP |= DP_LINK_TRAIN_PAT_1;
2355 break;
2356 case DP_TRAINING_PATTERN_2:
2357 *DP |= DP_LINK_TRAIN_PAT_2;
2358 break;
2359 case DP_TRAINING_PATTERN_3:
2360 if (IS_CHERRYVIEW(dev)) {
2361 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2362 } else {
2363 DRM_ERROR("DP training pattern 3 not supported\n");
2364 *DP |= DP_LINK_TRAIN_PAT_2;
2365 }
2366 break;
2367 }
2368 }
2369}
2370
2371static void intel_dp_enable_port(struct intel_dp *intel_dp)
2372{
2373 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2374 struct drm_i915_private *dev_priv = dev->dev_private;
2375
7b13b58a
VS
2376 /* enable with pattern 1 (as per spec) */
2377 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2378 DP_TRAINING_PATTERN_1);
2379
2380 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2381 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2382
2383 /*
2384 * Magic for VLV/CHV. We _must_ first set up the register
2385 * without actually enabling the port, and then do another
2386 * write to enable the port. Otherwise link training will
2387 * fail when the power sequencer is freshly used for this port.
2388 */
2389 intel_dp->DP |= DP_PORT_EN;
2390
2391 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2392 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2393}
2394
e8cb4558 2395static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2396{
e8cb4558
DV
2397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2398 struct drm_device *dev = encoder->base.dev;
2399 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2400 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2401 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2402
0c33d8d7
DV
2403 if (WARN_ON(dp_reg & DP_PORT_EN))
2404 return;
5d613501 2405
093e3f13
VS
2406 pps_lock(intel_dp);
2407
2408 if (IS_VALLEYVIEW(dev))
2409 vlv_init_panel_power_sequencer(intel_dp);
2410
7b13b58a 2411 intel_dp_enable_port(intel_dp);
093e3f13
VS
2412
2413 edp_panel_vdd_on(intel_dp);
2414 edp_panel_on(intel_dp);
2415 edp_panel_vdd_off(intel_dp, true);
2416
2417 pps_unlock(intel_dp);
2418
61234fa5
VS
2419 if (IS_VALLEYVIEW(dev))
2420 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2421
f01eca2e 2422 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2423 intel_dp_start_link_train(intel_dp);
33a34e4e 2424 intel_dp_complete_link_train(intel_dp);
3ab9c637 2425 intel_dp_stop_link_train(intel_dp);
c1dec79a 2426
6e3c9717 2427 if (crtc->config->has_audio) {
c1dec79a
JN
2428 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2429 pipe_name(crtc->pipe));
2430 intel_audio_codec_enable(encoder);
2431 }
ab1f90f9 2432}
89b667f8 2433
ecff4f3b
JN
2434static void g4x_enable_dp(struct intel_encoder *encoder)
2435{
828f5c6e
JN
2436 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2437
ecff4f3b 2438 intel_enable_dp(encoder);
4be73780 2439 intel_edp_backlight_on(intel_dp);
ab1f90f9 2440}
89b667f8 2441
ab1f90f9
JN
2442static void vlv_enable_dp(struct intel_encoder *encoder)
2443{
828f5c6e
JN
2444 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2445
4be73780 2446 intel_edp_backlight_on(intel_dp);
b32c6f48 2447 intel_psr_enable(intel_dp);
d240f20f
JB
2448}
2449
ecff4f3b 2450static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2451{
2452 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2453 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2454
8ac33ed3
DV
2455 intel_dp_prepare(encoder);
2456
d41f1efb
DV
2457 /* Only ilk+ has port A */
2458 if (dport->port == PORT_A) {
2459 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2460 ironlake_edp_pll_on(intel_dp);
d41f1efb 2461 }
ab1f90f9
JN
2462}
2463
83b84597
VS
2464static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2465{
2466 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2467 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2468 enum pipe pipe = intel_dp->pps_pipe;
2469 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2470
2471 edp_panel_vdd_off_sync(intel_dp);
2472
2473 /*
2474 * VLV seems to get confused when multiple power seqeuencers
2475 * have the same port selected (even if only one has power/vdd
2476 * enabled). The failure manifests as vlv_wait_port_ready() failing
2477 * CHV on the other hand doesn't seem to mind having the same port
2478 * selected in multiple power seqeuencers, but let's clear the
2479 * port select always when logically disconnecting a power sequencer
2480 * from a port.
2481 */
2482 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2483 pipe_name(pipe), port_name(intel_dig_port->port));
2484 I915_WRITE(pp_on_reg, 0);
2485 POSTING_READ(pp_on_reg);
2486
2487 intel_dp->pps_pipe = INVALID_PIPE;
2488}
2489
a4a5d2f8
VS
2490static void vlv_steal_power_sequencer(struct drm_device *dev,
2491 enum pipe pipe)
2492{
2493 struct drm_i915_private *dev_priv = dev->dev_private;
2494 struct intel_encoder *encoder;
2495
2496 lockdep_assert_held(&dev_priv->pps_mutex);
2497
ac3c12e4
VS
2498 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2499 return;
2500
a4a5d2f8
VS
2501 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2502 base.head) {
2503 struct intel_dp *intel_dp;
773538e8 2504 enum port port;
a4a5d2f8
VS
2505
2506 if (encoder->type != INTEL_OUTPUT_EDP)
2507 continue;
2508
2509 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2510 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2511
2512 if (intel_dp->pps_pipe != pipe)
2513 continue;
2514
2515 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2516 pipe_name(pipe), port_name(port));
a4a5d2f8 2517
034e43c6
VS
2518 WARN(encoder->connectors_active,
2519 "stealing pipe %c power sequencer from active eDP port %c\n",
2520 pipe_name(pipe), port_name(port));
a4a5d2f8 2521
a4a5d2f8 2522 /* make sure vdd is off before we steal it */
83b84597 2523 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2524 }
2525}
2526
2527static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2528{
2529 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2530 struct intel_encoder *encoder = &intel_dig_port->base;
2531 struct drm_device *dev = encoder->base.dev;
2532 struct drm_i915_private *dev_priv = dev->dev_private;
2533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2534
2535 lockdep_assert_held(&dev_priv->pps_mutex);
2536
093e3f13
VS
2537 if (!is_edp(intel_dp))
2538 return;
2539
a4a5d2f8
VS
2540 if (intel_dp->pps_pipe == crtc->pipe)
2541 return;
2542
2543 /*
2544 * If another power sequencer was being used on this
2545 * port previously make sure to turn off vdd there while
2546 * we still have control of it.
2547 */
2548 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2549 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2550
2551 /*
2552 * We may be stealing the power
2553 * sequencer from another port.
2554 */
2555 vlv_steal_power_sequencer(dev, crtc->pipe);
2556
2557 /* now it's all ours */
2558 intel_dp->pps_pipe = crtc->pipe;
2559
2560 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2561 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2562
2563 /* init power sequencer on this pipe and port */
36b5f425
VS
2564 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2565 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2566}
2567
ab1f90f9 2568static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2569{
2bd2ad64 2570 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2571 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2572 struct drm_device *dev = encoder->base.dev;
89b667f8 2573 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2574 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2575 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2576 int pipe = intel_crtc->pipe;
2577 u32 val;
a4fc5ed6 2578
ab1f90f9 2579 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2580
ab3c759a 2581 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2582 val = 0;
2583 if (pipe)
2584 val |= (1<<21);
2585 else
2586 val &= ~(1<<21);
2587 val |= 0x001000c4;
ab3c759a
CML
2588 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2589 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2590 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2591
ab1f90f9
JN
2592 mutex_unlock(&dev_priv->dpio_lock);
2593
2594 intel_enable_dp(encoder);
89b667f8
JB
2595}
2596
ecff4f3b 2597static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2598{
2599 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2600 struct drm_device *dev = encoder->base.dev;
2601 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2602 struct intel_crtc *intel_crtc =
2603 to_intel_crtc(encoder->base.crtc);
e4607fcf 2604 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2605 int pipe = intel_crtc->pipe;
89b667f8 2606
8ac33ed3
DV
2607 intel_dp_prepare(encoder);
2608
89b667f8 2609 /* Program Tx lane resets to default */
0980a60f 2610 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2611 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2612 DPIO_PCS_TX_LANE2_RESET |
2613 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2614 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2615 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2616 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2617 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2618 DPIO_PCS_CLK_SOFT_RESET);
2619
2620 /* Fix up inter-pair skew failure */
ab3c759a
CML
2621 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2622 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2623 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2624 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2625}
2626
e4a1d846
CML
2627static void chv_pre_enable_dp(struct intel_encoder *encoder)
2628{
2629 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2630 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2631 struct drm_device *dev = encoder->base.dev;
2632 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2633 struct intel_crtc *intel_crtc =
2634 to_intel_crtc(encoder->base.crtc);
2635 enum dpio_channel ch = vlv_dport_to_channel(dport);
2636 int pipe = intel_crtc->pipe;
2637 int data, i;
949c1d43 2638 u32 val;
e4a1d846 2639
e4a1d846 2640 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2641
570e2a74
VS
2642 /* allow hardware to manage TX FIFO reset source */
2643 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2644 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2645 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2646
2647 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2648 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2649 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2650
949c1d43 2651 /* Deassert soft data lane reset*/
97fd4d5c 2652 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2653 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2654 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2655
2656 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2657 val |= CHV_PCS_REQ_SOFTRESET_EN;
2658 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2659
2660 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2661 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2662 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2663
97fd4d5c 2664 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2665 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2666 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2667
2668 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2669 for (i = 0; i < 4; i++) {
2670 /* Set the latency optimal bit */
2671 data = (i == 1) ? 0x0 : 0x6;
2672 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2673 data << DPIO_FRC_LATENCY_SHFIT);
2674
2675 /* Set the upar bit */
2676 data = (i == 1) ? 0x0 : 0x1;
2677 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2678 data << DPIO_UPAR_SHIFT);
2679 }
2680
2681 /* Data lane stagger programming */
2682 /* FIXME: Fix up value only after power analysis */
2683
2684 mutex_unlock(&dev_priv->dpio_lock);
2685
e4a1d846 2686 intel_enable_dp(encoder);
e4a1d846
CML
2687}
2688
9197c88b
VS
2689static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2690{
2691 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2692 struct drm_device *dev = encoder->base.dev;
2693 struct drm_i915_private *dev_priv = dev->dev_private;
2694 struct intel_crtc *intel_crtc =
2695 to_intel_crtc(encoder->base.crtc);
2696 enum dpio_channel ch = vlv_dport_to_channel(dport);
2697 enum pipe pipe = intel_crtc->pipe;
2698 u32 val;
2699
625695f8
VS
2700 intel_dp_prepare(encoder);
2701
9197c88b
VS
2702 mutex_lock(&dev_priv->dpio_lock);
2703
b9e5ac3c
VS
2704 /* program left/right clock distribution */
2705 if (pipe != PIPE_B) {
2706 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2707 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2708 if (ch == DPIO_CH0)
2709 val |= CHV_BUFLEFTENA1_FORCE;
2710 if (ch == DPIO_CH1)
2711 val |= CHV_BUFRIGHTENA1_FORCE;
2712 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2713 } else {
2714 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2715 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2716 if (ch == DPIO_CH0)
2717 val |= CHV_BUFLEFTENA2_FORCE;
2718 if (ch == DPIO_CH1)
2719 val |= CHV_BUFRIGHTENA2_FORCE;
2720 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2721 }
2722
9197c88b
VS
2723 /* program clock channel usage */
2724 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2725 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2726 if (pipe != PIPE_B)
2727 val &= ~CHV_PCS_USEDCLKCHANNEL;
2728 else
2729 val |= CHV_PCS_USEDCLKCHANNEL;
2730 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2731
2732 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2733 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2734 if (pipe != PIPE_B)
2735 val &= ~CHV_PCS_USEDCLKCHANNEL;
2736 else
2737 val |= CHV_PCS_USEDCLKCHANNEL;
2738 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2739
2740 /*
2741 * This a a bit weird since generally CL
2742 * matches the pipe, but here we need to
2743 * pick the CL based on the port.
2744 */
2745 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2746 if (pipe != PIPE_B)
2747 val &= ~CHV_CMN_USEDCLKCHANNEL;
2748 else
2749 val |= CHV_CMN_USEDCLKCHANNEL;
2750 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2751
2752 mutex_unlock(&dev_priv->dpio_lock);
2753}
2754
a4fc5ed6 2755/*
df0c237d
JB
2756 * Native read with retry for link status and receiver capability reads for
2757 * cases where the sink may still be asleep.
9d1a1031
JN
2758 *
2759 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2760 * supposed to retry 3 times per the spec.
a4fc5ed6 2761 */
9d1a1031
JN
2762static ssize_t
2763intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2764 void *buffer, size_t size)
a4fc5ed6 2765{
9d1a1031
JN
2766 ssize_t ret;
2767 int i;
61da5fab 2768
f6a19066
VS
2769 /*
2770 * Sometime we just get the same incorrect byte repeated
2771 * over the entire buffer. Doing just one throw away read
2772 * initially seems to "solve" it.
2773 */
2774 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2775
61da5fab 2776 for (i = 0; i < 3; i++) {
9d1a1031
JN
2777 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2778 if (ret == size)
2779 return ret;
61da5fab
JB
2780 msleep(1);
2781 }
a4fc5ed6 2782
9d1a1031 2783 return ret;
a4fc5ed6
KP
2784}
2785
2786/*
2787 * Fetch AUX CH registers 0x202 - 0x207 which contain
2788 * link status information
2789 */
2790static bool
93f62dad 2791intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2792{
9d1a1031
JN
2793 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2794 DP_LANE0_1_STATUS,
2795 link_status,
2796 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2797}
2798
1100244e 2799/* These are source-specific values. */
a4fc5ed6 2800static uint8_t
1a2eb460 2801intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2802{
30add22d 2803 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2804 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2805 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2806
7ad14a29
SJ
2807 if (INTEL_INFO(dev)->gen >= 9) {
2808 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2809 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2810 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2811 } else if (IS_VALLEYVIEW(dev))
bd60018a 2812 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2813 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2814 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2815 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2816 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2817 else
bd60018a 2818 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2819}
2820
2821static uint8_t
2822intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2823{
30add22d 2824 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2825 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2826
5a9d1f1a
DL
2827 if (INTEL_INFO(dev)->gen >= 9) {
2828 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2829 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2830 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2831 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2832 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2833 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2834 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2835 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2836 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2837 default:
2838 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2839 }
2840 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2841 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2842 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2843 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2844 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2845 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2846 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2847 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2848 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2849 default:
bd60018a 2850 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2851 }
e2fa6fba
P
2852 } else if (IS_VALLEYVIEW(dev)) {
2853 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2855 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2856 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2857 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2859 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2860 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2861 default:
bd60018a 2862 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2863 }
bc7d38a4 2864 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2865 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2866 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2867 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2868 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2869 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2870 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2871 default:
bd60018a 2872 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2873 }
2874 } else {
2875 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2876 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2877 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2878 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2879 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2880 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2881 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2882 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2883 default:
bd60018a 2884 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2885 }
a4fc5ed6
KP
2886 }
2887}
2888
e2fa6fba
P
2889static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2890{
2891 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2892 struct drm_i915_private *dev_priv = dev->dev_private;
2893 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2894 struct intel_crtc *intel_crtc =
2895 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2896 unsigned long demph_reg_value, preemph_reg_value,
2897 uniqtranscale_reg_value;
2898 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2899 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2900 int pipe = intel_crtc->pipe;
e2fa6fba
P
2901
2902 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2903 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2904 preemph_reg_value = 0x0004000;
2905 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2906 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2907 demph_reg_value = 0x2B405555;
2908 uniqtranscale_reg_value = 0x552AB83A;
2909 break;
bd60018a 2910 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2911 demph_reg_value = 0x2B404040;
2912 uniqtranscale_reg_value = 0x5548B83A;
2913 break;
bd60018a 2914 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2915 demph_reg_value = 0x2B245555;
2916 uniqtranscale_reg_value = 0x5560B83A;
2917 break;
bd60018a 2918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2919 demph_reg_value = 0x2B405555;
2920 uniqtranscale_reg_value = 0x5598DA3A;
2921 break;
2922 default:
2923 return 0;
2924 }
2925 break;
bd60018a 2926 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2927 preemph_reg_value = 0x0002000;
2928 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2930 demph_reg_value = 0x2B404040;
2931 uniqtranscale_reg_value = 0x5552B83A;
2932 break;
bd60018a 2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2934 demph_reg_value = 0x2B404848;
2935 uniqtranscale_reg_value = 0x5580B83A;
2936 break;
bd60018a 2937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2938 demph_reg_value = 0x2B404040;
2939 uniqtranscale_reg_value = 0x55ADDA3A;
2940 break;
2941 default:
2942 return 0;
2943 }
2944 break;
bd60018a 2945 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
2946 preemph_reg_value = 0x0000000;
2947 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2948 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2949 demph_reg_value = 0x2B305555;
2950 uniqtranscale_reg_value = 0x5570B83A;
2951 break;
bd60018a 2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2953 demph_reg_value = 0x2B2B4040;
2954 uniqtranscale_reg_value = 0x55ADDA3A;
2955 break;
2956 default:
2957 return 0;
2958 }
2959 break;
bd60018a 2960 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
2961 preemph_reg_value = 0x0006000;
2962 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2963 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2964 demph_reg_value = 0x1B405555;
2965 uniqtranscale_reg_value = 0x55ADDA3A;
2966 break;
2967 default:
2968 return 0;
2969 }
2970 break;
2971 default:
2972 return 0;
2973 }
2974
0980a60f 2975 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
2976 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2977 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2978 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 2979 uniqtranscale_reg_value);
ab3c759a
CML
2980 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2981 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2982 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2983 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 2984 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
2985
2986 return 0;
2987}
2988
e4a1d846
CML
2989static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2990{
2991 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2992 struct drm_i915_private *dev_priv = dev->dev_private;
2993 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2994 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 2995 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
2996 uint8_t train_set = intel_dp->train_set[0];
2997 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
2998 enum pipe pipe = intel_crtc->pipe;
2999 int i;
e4a1d846
CML
3000
3001 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3002 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3003 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3005 deemph_reg_value = 128;
3006 margin_reg_value = 52;
3007 break;
bd60018a 3008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3009 deemph_reg_value = 128;
3010 margin_reg_value = 77;
3011 break;
bd60018a 3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3013 deemph_reg_value = 128;
3014 margin_reg_value = 102;
3015 break;
bd60018a 3016 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3017 deemph_reg_value = 128;
3018 margin_reg_value = 154;
3019 /* FIXME extra to set for 1200 */
3020 break;
3021 default:
3022 return 0;
3023 }
3024 break;
bd60018a 3025 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3026 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3028 deemph_reg_value = 85;
3029 margin_reg_value = 78;
3030 break;
bd60018a 3031 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3032 deemph_reg_value = 85;
3033 margin_reg_value = 116;
3034 break;
bd60018a 3035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3036 deemph_reg_value = 85;
3037 margin_reg_value = 154;
3038 break;
3039 default:
3040 return 0;
3041 }
3042 break;
bd60018a 3043 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3044 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3046 deemph_reg_value = 64;
3047 margin_reg_value = 104;
3048 break;
bd60018a 3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3050 deemph_reg_value = 64;
3051 margin_reg_value = 154;
3052 break;
3053 default:
3054 return 0;
3055 }
3056 break;
bd60018a 3057 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3058 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3060 deemph_reg_value = 43;
3061 margin_reg_value = 154;
3062 break;
3063 default:
3064 return 0;
3065 }
3066 break;
3067 default:
3068 return 0;
3069 }
3070
3071 mutex_lock(&dev_priv->dpio_lock);
3072
3073 /* Clear calc init */
1966e59e
VS
3074 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3075 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3076 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3077 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3078 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3079
3080 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3081 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3082 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3083 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3084 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3085
a02ef3c7
VS
3086 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3087 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3088 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3089 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3090
3091 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3092 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3093 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3094 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3095
e4a1d846 3096 /* Program swing deemph */
f72df8db
VS
3097 for (i = 0; i < 4; i++) {
3098 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3099 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3100 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3101 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3102 }
e4a1d846
CML
3103
3104 /* Program swing margin */
f72df8db
VS
3105 for (i = 0; i < 4; i++) {
3106 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3107 val &= ~DPIO_SWING_MARGIN000_MASK;
3108 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3109 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3110 }
e4a1d846
CML
3111
3112 /* Disable unique transition scale */
f72df8db
VS
3113 for (i = 0; i < 4; i++) {
3114 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3115 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3116 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3117 }
e4a1d846
CML
3118
3119 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3120 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3121 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3122 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3123
3124 /*
3125 * The document said it needs to set bit 27 for ch0 and bit 26
3126 * for ch1. Might be a typo in the doc.
3127 * For now, for this unique transition scale selection, set bit
3128 * 27 for ch0 and ch1.
3129 */
f72df8db
VS
3130 for (i = 0; i < 4; i++) {
3131 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3132 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3133 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3134 }
e4a1d846 3135
f72df8db
VS
3136 for (i = 0; i < 4; i++) {
3137 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3138 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3139 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3140 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3141 }
e4a1d846
CML
3142 }
3143
3144 /* Start swing calculation */
1966e59e
VS
3145 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3146 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3147 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3148
3149 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3150 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3151 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3152
3153 /* LRC Bypass */
3154 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3155 val |= DPIO_LRC_BYPASS;
3156 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3157
3158 mutex_unlock(&dev_priv->dpio_lock);
3159
3160 return 0;
3161}
3162
a4fc5ed6 3163static void
0301b3ac
JN
3164intel_get_adjust_train(struct intel_dp *intel_dp,
3165 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3166{
3167 uint8_t v = 0;
3168 uint8_t p = 0;
3169 int lane;
1a2eb460
KP
3170 uint8_t voltage_max;
3171 uint8_t preemph_max;
a4fc5ed6 3172
33a34e4e 3173 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3174 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3175 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3176
3177 if (this_v > v)
3178 v = this_v;
3179 if (this_p > p)
3180 p = this_p;
3181 }
3182
1a2eb460 3183 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3184 if (v >= voltage_max)
3185 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3186
1a2eb460
KP
3187 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3188 if (p >= preemph_max)
3189 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3190
3191 for (lane = 0; lane < 4; lane++)
33a34e4e 3192 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3193}
3194
3195static uint32_t
f0a3424e 3196intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3197{
3cf2efb1 3198 uint32_t signal_levels = 0;
a4fc5ed6 3199
3cf2efb1 3200 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3201 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3202 default:
3203 signal_levels |= DP_VOLTAGE_0_4;
3204 break;
bd60018a 3205 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3206 signal_levels |= DP_VOLTAGE_0_6;
3207 break;
bd60018a 3208 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3209 signal_levels |= DP_VOLTAGE_0_8;
3210 break;
bd60018a 3211 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3212 signal_levels |= DP_VOLTAGE_1_2;
3213 break;
3214 }
3cf2efb1 3215 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3216 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3217 default:
3218 signal_levels |= DP_PRE_EMPHASIS_0;
3219 break;
bd60018a 3220 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3221 signal_levels |= DP_PRE_EMPHASIS_3_5;
3222 break;
bd60018a 3223 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3224 signal_levels |= DP_PRE_EMPHASIS_6;
3225 break;
bd60018a 3226 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3227 signal_levels |= DP_PRE_EMPHASIS_9_5;
3228 break;
3229 }
3230 return signal_levels;
3231}
3232
e3421a18
ZW
3233/* Gen6's DP voltage swing and pre-emphasis control */
3234static uint32_t
3235intel_gen6_edp_signal_levels(uint8_t train_set)
3236{
3c5a62b5
YL
3237 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3238 DP_TRAIN_PRE_EMPHASIS_MASK);
3239 switch (signal_levels) {
bd60018a
SJ
3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3242 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3244 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3247 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3248 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3250 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3251 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3253 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3254 default:
3c5a62b5
YL
3255 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3256 "0x%x\n", signal_levels);
3257 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3258 }
3259}
3260
1a2eb460
KP
3261/* Gen7's DP voltage swing and pre-emphasis control */
3262static uint32_t
3263intel_gen7_edp_signal_levels(uint8_t train_set)
3264{
3265 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3266 DP_TRAIN_PRE_EMPHASIS_MASK);
3267 switch (signal_levels) {
bd60018a 3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3269 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3271 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3273 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3274
bd60018a 3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3276 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3278 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3279
bd60018a 3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3281 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3283 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3284
3285 default:
3286 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3287 "0x%x\n", signal_levels);
3288 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3289 }
3290}
3291
d6c0d722
PZ
3292/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3293static uint32_t
f0a3424e 3294intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3295{
d6c0d722
PZ
3296 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3297 DP_TRAIN_PRE_EMPHASIS_MASK);
3298 switch (signal_levels) {
bd60018a 3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3300 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3302 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3303 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3304 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3306 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3307
bd60018a 3308 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3309 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3311 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3313 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3314
bd60018a 3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3316 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3317 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3318 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3319
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3321 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3322 default:
3323 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3324 "0x%x\n", signal_levels);
c5fe6a06 3325 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3326 }
a4fc5ed6
KP
3327}
3328
f0a3424e
PZ
3329/* Properly updates "DP" with the correct signal levels. */
3330static void
3331intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3332{
3333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3334 enum port port = intel_dig_port->port;
f0a3424e
PZ
3335 struct drm_device *dev = intel_dig_port->base.base.dev;
3336 uint32_t signal_levels, mask;
3337 uint8_t train_set = intel_dp->train_set[0];
3338
5a9d1f1a 3339 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3340 signal_levels = intel_hsw_signal_levels(train_set);
3341 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3342 } else if (IS_CHERRYVIEW(dev)) {
3343 signal_levels = intel_chv_signal_levels(intel_dp);
3344 mask = 0;
e2fa6fba
P
3345 } else if (IS_VALLEYVIEW(dev)) {
3346 signal_levels = intel_vlv_signal_levels(intel_dp);
3347 mask = 0;
bc7d38a4 3348 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3349 signal_levels = intel_gen7_edp_signal_levels(train_set);
3350 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3351 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3352 signal_levels = intel_gen6_edp_signal_levels(train_set);
3353 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3354 } else {
3355 signal_levels = intel_gen4_signal_levels(train_set);
3356 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3357 }
3358
3359 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3360
3361 *DP = (*DP & ~mask) | signal_levels;
3362}
3363
a4fc5ed6 3364static bool
ea5b213a 3365intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3366 uint32_t *DP,
58e10eb9 3367 uint8_t dp_train_pat)
a4fc5ed6 3368{
174edf1f
PZ
3369 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3370 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3371 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3372 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3373 int ret, len;
a4fc5ed6 3374
7b13b58a 3375 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3376
70aff66c 3377 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3378 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3379
2cdfe6c8
JN
3380 buf[0] = dp_train_pat;
3381 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3382 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3383 /* don't write DP_TRAINING_LANEx_SET on disable */
3384 len = 1;
3385 } else {
3386 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3387 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3388 len = intel_dp->lane_count + 1;
47ea7542 3389 }
a4fc5ed6 3390
9d1a1031
JN
3391 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3392 buf, len);
2cdfe6c8
JN
3393
3394 return ret == len;
a4fc5ed6
KP
3395}
3396
70aff66c
JN
3397static bool
3398intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3399 uint8_t dp_train_pat)
3400{
953d22e8 3401 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3402 intel_dp_set_signal_levels(intel_dp, DP);
3403 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3404}
3405
3406static bool
3407intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3408 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3409{
3410 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3411 struct drm_device *dev = intel_dig_port->base.base.dev;
3412 struct drm_i915_private *dev_priv = dev->dev_private;
3413 int ret;
3414
3415 intel_get_adjust_train(intel_dp, link_status);
3416 intel_dp_set_signal_levels(intel_dp, DP);
3417
3418 I915_WRITE(intel_dp->output_reg, *DP);
3419 POSTING_READ(intel_dp->output_reg);
3420
9d1a1031
JN
3421 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3422 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3423
3424 return ret == intel_dp->lane_count;
3425}
3426
3ab9c637
ID
3427static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3428{
3429 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3430 struct drm_device *dev = intel_dig_port->base.base.dev;
3431 struct drm_i915_private *dev_priv = dev->dev_private;
3432 enum port port = intel_dig_port->port;
3433 uint32_t val;
3434
3435 if (!HAS_DDI(dev))
3436 return;
3437
3438 val = I915_READ(DP_TP_CTL(port));
3439 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3440 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3441 I915_WRITE(DP_TP_CTL(port), val);
3442
3443 /*
3444 * On PORT_A we can have only eDP in SST mode. There the only reason
3445 * we need to set idle transmission mode is to work around a HW issue
3446 * where we enable the pipe while not in idle link-training mode.
3447 * In this case there is requirement to wait for a minimum number of
3448 * idle patterns to be sent.
3449 */
3450 if (port == PORT_A)
3451 return;
3452
3453 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3454 1))
3455 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3456}
3457
33a34e4e 3458/* Enable corresponding port and start training pattern 1 */
c19b0669 3459void
33a34e4e 3460intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3461{
da63a9f2 3462 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3463 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3464 int i;
3465 uint8_t voltage;
cdb0e95b 3466 int voltage_tries, loop_tries;
ea5b213a 3467 uint32_t DP = intel_dp->DP;
6aba5b6c 3468 uint8_t link_config[2];
a4fc5ed6 3469
affa9354 3470 if (HAS_DDI(dev))
c19b0669
PZ
3471 intel_ddi_prepare_link_retrain(encoder);
3472
3cf2efb1 3473 /* Write the link configuration data */
6aba5b6c
JN
3474 link_config[0] = intel_dp->link_bw;
3475 link_config[1] = intel_dp->lane_count;
3476 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3477 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3478 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
a8f3ef61
SJ
3479 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
3480 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3481 &intel_dp->rate_select, 1);
6aba5b6c
JN
3482
3483 link_config[0] = 0;
3484 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3485 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3486
3487 DP |= DP_PORT_EN;
1a2eb460 3488
70aff66c
JN
3489 /* clock recovery */
3490 if (!intel_dp_reset_link_train(intel_dp, &DP,
3491 DP_TRAINING_PATTERN_1 |
3492 DP_LINK_SCRAMBLING_DISABLE)) {
3493 DRM_ERROR("failed to enable link training\n");
3494 return;
3495 }
3496
a4fc5ed6 3497 voltage = 0xff;
cdb0e95b
KP
3498 voltage_tries = 0;
3499 loop_tries = 0;
a4fc5ed6 3500 for (;;) {
70aff66c 3501 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3502
a7c9655f 3503 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3504 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3505 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3506 break;
93f62dad 3507 }
a4fc5ed6 3508
01916270 3509 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3510 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3511 break;
3512 }
3513
3514 /* Check to see if we've tried the max voltage */
3515 for (i = 0; i < intel_dp->lane_count; i++)
3516 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3517 break;
3b4f819d 3518 if (i == intel_dp->lane_count) {
b06fbda3
DV
3519 ++loop_tries;
3520 if (loop_tries == 5) {
3def84b3 3521 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3522 break;
3523 }
70aff66c
JN
3524 intel_dp_reset_link_train(intel_dp, &DP,
3525 DP_TRAINING_PATTERN_1 |
3526 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3527 voltage_tries = 0;
3528 continue;
3529 }
a4fc5ed6 3530
3cf2efb1 3531 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3532 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3533 ++voltage_tries;
b06fbda3 3534 if (voltage_tries == 5) {
3def84b3 3535 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3536 break;
3537 }
3538 } else
3539 voltage_tries = 0;
3540 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3541
70aff66c
JN
3542 /* Update training set as requested by target */
3543 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3544 DRM_ERROR("failed to update link training\n");
3545 break;
3546 }
a4fc5ed6
KP
3547 }
3548
33a34e4e
JB
3549 intel_dp->DP = DP;
3550}
3551
c19b0669 3552void
33a34e4e
JB
3553intel_dp_complete_link_train(struct intel_dp *intel_dp)
3554{
33a34e4e 3555 bool channel_eq = false;
37f80975 3556 int tries, cr_tries;
33a34e4e 3557 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3558 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3559
3560 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3561 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3562 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3563
a4fc5ed6 3564 /* channel equalization */
70aff66c 3565 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3566 training_pattern |
70aff66c
JN
3567 DP_LINK_SCRAMBLING_DISABLE)) {
3568 DRM_ERROR("failed to start channel equalization\n");
3569 return;
3570 }
3571
a4fc5ed6 3572 tries = 0;
37f80975 3573 cr_tries = 0;
a4fc5ed6
KP
3574 channel_eq = false;
3575 for (;;) {
70aff66c 3576 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3577
37f80975
JB
3578 if (cr_tries > 5) {
3579 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3580 break;
3581 }
3582
a7c9655f 3583 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3584 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3585 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3586 break;
70aff66c 3587 }
a4fc5ed6 3588
37f80975 3589 /* Make sure clock is still ok */
01916270 3590 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3591 intel_dp_start_link_train(intel_dp);
70aff66c 3592 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3593 training_pattern |
70aff66c 3594 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3595 cr_tries++;
3596 continue;
3597 }
3598
1ffdff13 3599 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3600 channel_eq = true;
3601 break;
3602 }
a4fc5ed6 3603
37f80975
JB
3604 /* Try 5 times, then try clock recovery if that fails */
3605 if (tries > 5) {
37f80975 3606 intel_dp_start_link_train(intel_dp);
70aff66c 3607 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3608 training_pattern |
70aff66c 3609 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3610 tries = 0;
3611 cr_tries++;
3612 continue;
3613 }
a4fc5ed6 3614
70aff66c
JN
3615 /* Update training set as requested by target */
3616 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3617 DRM_ERROR("failed to update link training\n");
3618 break;
3619 }
3cf2efb1 3620 ++tries;
869184a6 3621 }
3cf2efb1 3622
3ab9c637
ID
3623 intel_dp_set_idle_link_train(intel_dp);
3624
3625 intel_dp->DP = DP;
3626
d6c0d722 3627 if (channel_eq)
07f42258 3628 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3629
3ab9c637
ID
3630}
3631
3632void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3633{
70aff66c 3634 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3635 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3636}
3637
3638static void
ea5b213a 3639intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3640{
da63a9f2 3641 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3642 enum port port = intel_dig_port->port;
da63a9f2 3643 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3644 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3645 uint32_t DP = intel_dp->DP;
a4fc5ed6 3646
bc76e320 3647 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3648 return;
3649
0c33d8d7 3650 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3651 return;
3652
28c97730 3653 DRM_DEBUG_KMS("\n");
32f9d658 3654
bc7d38a4 3655 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3656 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3657 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3658 } else {
aad3d14d
VS
3659 if (IS_CHERRYVIEW(dev))
3660 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3661 else
3662 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3663 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3664 }
fe255d00 3665 POSTING_READ(intel_dp->output_reg);
5eb08b69 3666
493a7081 3667 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3668 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3669 /* Hardware workaround: leaving our transcoder select
3670 * set to transcoder B while it's off will prevent the
3671 * corresponding HDMI output on transcoder A.
3672 *
3673 * Combine this with another hardware workaround:
3674 * transcoder select bit can only be cleared while the
3675 * port is enabled.
3676 */
3677 DP &= ~DP_PIPEB_SELECT;
3678 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3679 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3680 }
3681
832afda6 3682 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3683 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3684 POSTING_READ(intel_dp->output_reg);
f01eca2e 3685 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3686}
3687
26d61aad
KP
3688static bool
3689intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3690{
a031d709
RV
3691 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3692 struct drm_device *dev = dig_port->base.base.dev;
3693 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3694 uint8_t rev;
a031d709 3695
9d1a1031
JN
3696 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3697 sizeof(intel_dp->dpcd)) < 0)
edb39244 3698 return false; /* aux transfer failed */
92fd8fd1 3699
a8e98153 3700 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3701
edb39244
AJ
3702 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3703 return false; /* DPCD not present */
3704
2293bb5c
SK
3705 /* Check if the panel supports PSR */
3706 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3707 if (is_edp(intel_dp)) {
9d1a1031
JN
3708 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3709 intel_dp->psr_dpcd,
3710 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3711 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3712 dev_priv->psr.sink_support = true;
50003939 3713 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3714 }
50003939
JN
3715 }
3716
7809a611 3717 /* Training Pattern 3 support, both source and sink */
06ea66b6 3718 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3719 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3720 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3721 intel_dp->use_tps3 = true;
f8d8a672 3722 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3723 } else
3724 intel_dp->use_tps3 = false;
3725
fc0f8e25
SJ
3726 /* Intermediate frequency support */
3727 if (is_edp(intel_dp) &&
3728 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3729 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3730 (rev >= 0x03)) { /* eDp v1.4 or higher */
ea2d8a42
VS
3731 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3732 int i;
3733
fc0f8e25
SJ
3734 intel_dp_dpcd_read_wake(&intel_dp->aux,
3735 DP_SUPPORTED_LINK_RATES,
ea2d8a42
VS
3736 supported_rates,
3737 sizeof(supported_rates));
3738
3739 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3740 int val = le16_to_cpu(supported_rates[i]);
3741
3742 if (val == 0)
3743 break;
3744
3745 intel_dp->supported_rates[i] = val * 200;
3746 }
3747 intel_dp->num_supported_rates = i;
fc0f8e25 3748 }
edb39244
AJ
3749 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3750 DP_DWN_STRM_PORT_PRESENT))
3751 return true; /* native DP sink */
3752
3753 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3754 return true; /* no per-port downstream info */
3755
9d1a1031
JN
3756 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3757 intel_dp->downstream_ports,
3758 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3759 return false; /* downstream port status fetch failed */
3760
3761 return true;
92fd8fd1
KP
3762}
3763
0d198328
AJ
3764static void
3765intel_dp_probe_oui(struct intel_dp *intel_dp)
3766{
3767 u8 buf[3];
3768
3769 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3770 return;
3771
9d1a1031 3772 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3773 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3774 buf[0], buf[1], buf[2]);
3775
9d1a1031 3776 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3777 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3778 buf[0], buf[1], buf[2]);
3779}
3780
0e32b39c
DA
3781static bool
3782intel_dp_probe_mst(struct intel_dp *intel_dp)
3783{
3784 u8 buf[1];
3785
3786 if (!intel_dp->can_mst)
3787 return false;
3788
3789 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3790 return false;
3791
0e32b39c
DA
3792 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3793 if (buf[0] & DP_MST_CAP) {
3794 DRM_DEBUG_KMS("Sink is MST capable\n");
3795 intel_dp->is_mst = true;
3796 } else {
3797 DRM_DEBUG_KMS("Sink is not MST capable\n");
3798 intel_dp->is_mst = false;
3799 }
3800 }
0e32b39c
DA
3801
3802 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3803 return intel_dp->is_mst;
3804}
3805
d2e216d0
RV
3806int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3807{
3808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3809 struct drm_device *dev = intel_dig_port->base.base.dev;
3810 struct intel_crtc *intel_crtc =
3811 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3812 u8 buf;
3813 int test_crc_count;
3814 int attempts = 6;
d2e216d0 3815
ad9dc91b 3816 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3817 return -EIO;
d2e216d0 3818
ad9dc91b 3819 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3820 return -ENOTTY;
3821
1dda5f93
RV
3822 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3823 return -EIO;
3824
9d1a1031 3825 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3826 buf | DP_TEST_SINK_START) < 0)
bda0381e 3827 return -EIO;
d2e216d0 3828
1dda5f93 3829 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3830 return -EIO;
ad9dc91b 3831 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3832
ad9dc91b 3833 do {
1dda5f93
RV
3834 if (drm_dp_dpcd_readb(&intel_dp->aux,
3835 DP_TEST_SINK_MISC, &buf) < 0)
3836 return -EIO;
ad9dc91b
RV
3837 intel_wait_for_vblank(dev, intel_crtc->pipe);
3838 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3839
3840 if (attempts == 0) {
90bd1f46
DV
3841 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3842 return -ETIMEDOUT;
ad9dc91b 3843 }
d2e216d0 3844
9d1a1031 3845 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3846 return -EIO;
d2e216d0 3847
1dda5f93
RV
3848 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3849 return -EIO;
3850 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3851 buf & ~DP_TEST_SINK_START) < 0)
3852 return -EIO;
ce31d9f4 3853
d2e216d0
RV
3854 return 0;
3855}
3856
a60f0e38
JB
3857static bool
3858intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3859{
9d1a1031
JN
3860 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3861 DP_DEVICE_SERVICE_IRQ_VECTOR,
3862 sink_irq_vector, 1) == 1;
a60f0e38
JB
3863}
3864
0e32b39c
DA
3865static bool
3866intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3867{
3868 int ret;
3869
3870 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3871 DP_SINK_COUNT_ESI,
3872 sink_irq_vector, 14);
3873 if (ret != 14)
3874 return false;
3875
3876 return true;
3877}
3878
a60f0e38
JB
3879static void
3880intel_dp_handle_test_request(struct intel_dp *intel_dp)
3881{
3882 /* NAK by default */
9d1a1031 3883 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3884}
3885
0e32b39c
DA
3886static int
3887intel_dp_check_mst_status(struct intel_dp *intel_dp)
3888{
3889 bool bret;
3890
3891 if (intel_dp->is_mst) {
3892 u8 esi[16] = { 0 };
3893 int ret = 0;
3894 int retry;
3895 bool handled;
3896 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3897go_again:
3898 if (bret == true) {
3899
3900 /* check link status - esi[10] = 0x200c */
3901 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3902 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3903 intel_dp_start_link_train(intel_dp);
3904 intel_dp_complete_link_train(intel_dp);
3905 intel_dp_stop_link_train(intel_dp);
3906 }
3907
6f34cc39 3908 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3909 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3910
3911 if (handled) {
3912 for (retry = 0; retry < 3; retry++) {
3913 int wret;
3914 wret = drm_dp_dpcd_write(&intel_dp->aux,
3915 DP_SINK_COUNT_ESI+1,
3916 &esi[1], 3);
3917 if (wret == 3) {
3918 break;
3919 }
3920 }
3921
3922 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3923 if (bret == true) {
6f34cc39 3924 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3925 goto go_again;
3926 }
3927 } else
3928 ret = 0;
3929
3930 return ret;
3931 } else {
3932 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3933 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3934 intel_dp->is_mst = false;
3935 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3936 /* send a hotplug event */
3937 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3938 }
3939 }
3940 return -EINVAL;
3941}
3942
a4fc5ed6
KP
3943/*
3944 * According to DP spec
3945 * 5.1.2:
3946 * 1. Read DPCD
3947 * 2. Configure link according to Receiver Capabilities
3948 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3949 * 4. Check link status on receipt of hot-plug interrupt
3950 */
a5146200 3951static void
ea5b213a 3952intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 3953{
5b215bcf 3954 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 3955 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 3956 u8 sink_irq_vector;
93f62dad 3957 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 3958
5b215bcf
DA
3959 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3960
da63a9f2 3961 if (!intel_encoder->connectors_active)
d2b996ac 3962 return;
59cd09e1 3963
da63a9f2 3964 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
3965 return;
3966
1a125d8a
ID
3967 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3968 return;
3969
92fd8fd1 3970 /* Try to read receiver status if the link appears to be up */
93f62dad 3971 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
3972 return;
3973 }
3974
92fd8fd1 3975 /* Now read the DPCD to see if it's actually running */
26d61aad 3976 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
3977 return;
3978 }
3979
a60f0e38
JB
3980 /* Try to read the source of the interrupt */
3981 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3982 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3983 /* Clear interrupt source */
9d1a1031
JN
3984 drm_dp_dpcd_writeb(&intel_dp->aux,
3985 DP_DEVICE_SERVICE_IRQ_VECTOR,
3986 sink_irq_vector);
a60f0e38
JB
3987
3988 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3989 intel_dp_handle_test_request(intel_dp);
3990 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3991 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3992 }
3993
1ffdff13 3994 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 3995 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 3996 intel_encoder->base.name);
33a34e4e
JB
3997 intel_dp_start_link_train(intel_dp);
3998 intel_dp_complete_link_train(intel_dp);
3ab9c637 3999 intel_dp_stop_link_train(intel_dp);
33a34e4e 4000 }
a4fc5ed6 4001}
a4fc5ed6 4002
caf9ab24 4003/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4004static enum drm_connector_status
26d61aad 4005intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4006{
caf9ab24 4007 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4008 uint8_t type;
4009
4010 if (!intel_dp_get_dpcd(intel_dp))
4011 return connector_status_disconnected;
4012
4013 /* if there's no downstream port, we're done */
4014 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4015 return connector_status_connected;
caf9ab24
AJ
4016
4017 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4018 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4019 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4020 uint8_t reg;
9d1a1031
JN
4021
4022 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4023 &reg, 1) < 0)
caf9ab24 4024 return connector_status_unknown;
9d1a1031 4025
23235177
AJ
4026 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4027 : connector_status_disconnected;
caf9ab24
AJ
4028 }
4029
4030 /* If no HPD, poke DDC gently */
0b99836f 4031 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4032 return connector_status_connected;
caf9ab24
AJ
4033
4034 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4035 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4036 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4037 if (type == DP_DS_PORT_TYPE_VGA ||
4038 type == DP_DS_PORT_TYPE_NON_EDID)
4039 return connector_status_unknown;
4040 } else {
4041 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4042 DP_DWN_STRM_PORT_TYPE_MASK;
4043 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4044 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4045 return connector_status_unknown;
4046 }
caf9ab24
AJ
4047
4048 /* Anything else is out of spec, warn and ignore */
4049 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4050 return connector_status_disconnected;
71ba9000
AJ
4051}
4052
d410b56d
CW
4053static enum drm_connector_status
4054edp_detect(struct intel_dp *intel_dp)
4055{
4056 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4057 enum drm_connector_status status;
4058
4059 status = intel_panel_detect(dev);
4060 if (status == connector_status_unknown)
4061 status = connector_status_connected;
4062
4063 return status;
4064}
4065
5eb08b69 4066static enum drm_connector_status
a9756bb5 4067ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4068{
30add22d 4069 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4070 struct drm_i915_private *dev_priv = dev->dev_private;
4071 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4072
1b469639
DL
4073 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4074 return connector_status_disconnected;
4075
26d61aad 4076 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4077}
4078
2a592bec
DA
4079static int g4x_digital_port_connected(struct drm_device *dev,
4080 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4081{
a4fc5ed6 4082 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4083 uint32_t bit;
5eb08b69 4084
232a6ee9
TP
4085 if (IS_VALLEYVIEW(dev)) {
4086 switch (intel_dig_port->port) {
4087 case PORT_B:
4088 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4089 break;
4090 case PORT_C:
4091 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4092 break;
4093 case PORT_D:
4094 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4095 break;
4096 default:
2a592bec 4097 return -EINVAL;
232a6ee9
TP
4098 }
4099 } else {
4100 switch (intel_dig_port->port) {
4101 case PORT_B:
4102 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4103 break;
4104 case PORT_C:
4105 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4106 break;
4107 case PORT_D:
4108 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4109 break;
4110 default:
2a592bec 4111 return -EINVAL;
232a6ee9 4112 }
a4fc5ed6
KP
4113 }
4114
10f76a38 4115 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4116 return 0;
4117 return 1;
4118}
4119
4120static enum drm_connector_status
4121g4x_dp_detect(struct intel_dp *intel_dp)
4122{
4123 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4124 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4125 int ret;
4126
4127 /* Can't disconnect eDP, but you can close the lid... */
4128 if (is_edp(intel_dp)) {
4129 enum drm_connector_status status;
4130
4131 status = intel_panel_detect(dev);
4132 if (status == connector_status_unknown)
4133 status = connector_status_connected;
4134 return status;
4135 }
4136
4137 ret = g4x_digital_port_connected(dev, intel_dig_port);
4138 if (ret == -EINVAL)
4139 return connector_status_unknown;
4140 else if (ret == 0)
a4fc5ed6
KP
4141 return connector_status_disconnected;
4142
26d61aad 4143 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4144}
4145
8c241fef 4146static struct edid *
beb60608 4147intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4148{
beb60608 4149 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4150
9cd300e0
JN
4151 /* use cached edid if we have one */
4152 if (intel_connector->edid) {
9cd300e0
JN
4153 /* invalid edid */
4154 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4155 return NULL;
4156
55e9edeb 4157 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4158 } else
4159 return drm_get_edid(&intel_connector->base,
4160 &intel_dp->aux.ddc);
4161}
8c241fef 4162
beb60608
CW
4163static void
4164intel_dp_set_edid(struct intel_dp *intel_dp)
4165{
4166 struct intel_connector *intel_connector = intel_dp->attached_connector;
4167 struct edid *edid;
8c241fef 4168
beb60608
CW
4169 edid = intel_dp_get_edid(intel_dp);
4170 intel_connector->detect_edid = edid;
4171
4172 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4173 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4174 else
4175 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4176}
4177
beb60608
CW
4178static void
4179intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4180{
beb60608 4181 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4182
beb60608
CW
4183 kfree(intel_connector->detect_edid);
4184 intel_connector->detect_edid = NULL;
9cd300e0 4185
beb60608
CW
4186 intel_dp->has_audio = false;
4187}
d6f24d0f 4188
beb60608
CW
4189static enum intel_display_power_domain
4190intel_dp_power_get(struct intel_dp *dp)
4191{
4192 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4193 enum intel_display_power_domain power_domain;
4194
4195 power_domain = intel_display_port_power_domain(encoder);
4196 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4197
4198 return power_domain;
4199}
d6f24d0f 4200
beb60608
CW
4201static void
4202intel_dp_power_put(struct intel_dp *dp,
4203 enum intel_display_power_domain power_domain)
4204{
4205 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4206 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4207}
4208
a9756bb5
ZW
4209static enum drm_connector_status
4210intel_dp_detect(struct drm_connector *connector, bool force)
4211{
4212 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4213 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4214 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4215 struct drm_device *dev = connector->dev;
a9756bb5 4216 enum drm_connector_status status;
671dedd2 4217 enum intel_display_power_domain power_domain;
0e32b39c 4218 bool ret;
a9756bb5 4219
164c8598 4220 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4221 connector->base.id, connector->name);
beb60608 4222 intel_dp_unset_edid(intel_dp);
164c8598 4223
0e32b39c
DA
4224 if (intel_dp->is_mst) {
4225 /* MST devices are disconnected from a monitor POV */
4226 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4227 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4228 return connector_status_disconnected;
0e32b39c
DA
4229 }
4230
beb60608 4231 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4232
d410b56d
CW
4233 /* Can't disconnect eDP, but you can close the lid... */
4234 if (is_edp(intel_dp))
4235 status = edp_detect(intel_dp);
4236 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4237 status = ironlake_dp_detect(intel_dp);
4238 else
4239 status = g4x_dp_detect(intel_dp);
4240 if (status != connector_status_connected)
c8c8fb33 4241 goto out;
a9756bb5 4242
0d198328
AJ
4243 intel_dp_probe_oui(intel_dp);
4244
0e32b39c
DA
4245 ret = intel_dp_probe_mst(intel_dp);
4246 if (ret) {
4247 /* if we are in MST mode then this connector
4248 won't appear connected or have anything with EDID on it */
4249 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4250 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4251 status = connector_status_disconnected;
4252 goto out;
4253 }
4254
beb60608 4255 intel_dp_set_edid(intel_dp);
a9756bb5 4256
d63885da
PZ
4257 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4258 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4259 status = connector_status_connected;
4260
4261out:
beb60608 4262 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4263 return status;
a4fc5ed6
KP
4264}
4265
beb60608
CW
4266static void
4267intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4268{
df0e9248 4269 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4270 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4271 enum intel_display_power_domain power_domain;
a4fc5ed6 4272
beb60608
CW
4273 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4274 connector->base.id, connector->name);
4275 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4276
beb60608
CW
4277 if (connector->status != connector_status_connected)
4278 return;
671dedd2 4279
beb60608
CW
4280 power_domain = intel_dp_power_get(intel_dp);
4281
4282 intel_dp_set_edid(intel_dp);
4283
4284 intel_dp_power_put(intel_dp, power_domain);
4285
4286 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4287 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4288}
4289
4290static int intel_dp_get_modes(struct drm_connector *connector)
4291{
4292 struct intel_connector *intel_connector = to_intel_connector(connector);
4293 struct edid *edid;
4294
4295 edid = intel_connector->detect_edid;
4296 if (edid) {
4297 int ret = intel_connector_update_modes(connector, edid);
4298 if (ret)
4299 return ret;
4300 }
32f9d658 4301
f8779fda 4302 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4303 if (is_edp(intel_attached_dp(connector)) &&
4304 intel_connector->panel.fixed_mode) {
f8779fda 4305 struct drm_display_mode *mode;
beb60608
CW
4306
4307 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4308 intel_connector->panel.fixed_mode);
f8779fda 4309 if (mode) {
32f9d658
ZW
4310 drm_mode_probed_add(connector, mode);
4311 return 1;
4312 }
4313 }
beb60608 4314
32f9d658 4315 return 0;
a4fc5ed6
KP
4316}
4317
1aad7ac0
CW
4318static bool
4319intel_dp_detect_audio(struct drm_connector *connector)
4320{
1aad7ac0 4321 bool has_audio = false;
beb60608 4322 struct edid *edid;
1aad7ac0 4323
beb60608
CW
4324 edid = to_intel_connector(connector)->detect_edid;
4325 if (edid)
1aad7ac0 4326 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4327
1aad7ac0
CW
4328 return has_audio;
4329}
4330
f684960e
CW
4331static int
4332intel_dp_set_property(struct drm_connector *connector,
4333 struct drm_property *property,
4334 uint64_t val)
4335{
e953fd7b 4336 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4337 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4338 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4339 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4340 int ret;
4341
662595df 4342 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4343 if (ret)
4344 return ret;
4345
3f43c48d 4346 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4347 int i = val;
4348 bool has_audio;
4349
4350 if (i == intel_dp->force_audio)
f684960e
CW
4351 return 0;
4352
1aad7ac0 4353 intel_dp->force_audio = i;
f684960e 4354
c3e5f67b 4355 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4356 has_audio = intel_dp_detect_audio(connector);
4357 else
c3e5f67b 4358 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4359
4360 if (has_audio == intel_dp->has_audio)
f684960e
CW
4361 return 0;
4362
1aad7ac0 4363 intel_dp->has_audio = has_audio;
f684960e
CW
4364 goto done;
4365 }
4366
e953fd7b 4367 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4368 bool old_auto = intel_dp->color_range_auto;
4369 uint32_t old_range = intel_dp->color_range;
4370
55bc60db
VS
4371 switch (val) {
4372 case INTEL_BROADCAST_RGB_AUTO:
4373 intel_dp->color_range_auto = true;
4374 break;
4375 case INTEL_BROADCAST_RGB_FULL:
4376 intel_dp->color_range_auto = false;
4377 intel_dp->color_range = 0;
4378 break;
4379 case INTEL_BROADCAST_RGB_LIMITED:
4380 intel_dp->color_range_auto = false;
4381 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4382 break;
4383 default:
4384 return -EINVAL;
4385 }
ae4edb80
DV
4386
4387 if (old_auto == intel_dp->color_range_auto &&
4388 old_range == intel_dp->color_range)
4389 return 0;
4390
e953fd7b
CW
4391 goto done;
4392 }
4393
53b41837
YN
4394 if (is_edp(intel_dp) &&
4395 property == connector->dev->mode_config.scaling_mode_property) {
4396 if (val == DRM_MODE_SCALE_NONE) {
4397 DRM_DEBUG_KMS("no scaling not supported\n");
4398 return -EINVAL;
4399 }
4400
4401 if (intel_connector->panel.fitting_mode == val) {
4402 /* the eDP scaling property is not changed */
4403 return 0;
4404 }
4405 intel_connector->panel.fitting_mode = val;
4406
4407 goto done;
4408 }
4409
f684960e
CW
4410 return -EINVAL;
4411
4412done:
c0c36b94
CW
4413 if (intel_encoder->base.crtc)
4414 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4415
4416 return 0;
4417}
4418
a4fc5ed6 4419static void
73845adf 4420intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4421{
1d508706 4422 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4423
10e972d3 4424 kfree(intel_connector->detect_edid);
beb60608 4425
9cd300e0
JN
4426 if (!IS_ERR_OR_NULL(intel_connector->edid))
4427 kfree(intel_connector->edid);
4428
acd8db10
PZ
4429 /* Can't call is_edp() since the encoder may have been destroyed
4430 * already. */
4431 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4432 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4433
a4fc5ed6 4434 drm_connector_cleanup(connector);
55f78c43 4435 kfree(connector);
a4fc5ed6
KP
4436}
4437
00c09d70 4438void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4439{
da63a9f2
PZ
4440 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4441 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4442
4f71d0cb 4443 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4444 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4445 if (is_edp(intel_dp)) {
4446 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4447 /*
4448 * vdd might still be enabled do to the delayed vdd off.
4449 * Make sure vdd is actually turned off here.
4450 */
773538e8 4451 pps_lock(intel_dp);
4be73780 4452 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4453 pps_unlock(intel_dp);
4454
01527b31
CT
4455 if (intel_dp->edp_notifier.notifier_call) {
4456 unregister_reboot_notifier(&intel_dp->edp_notifier);
4457 intel_dp->edp_notifier.notifier_call = NULL;
4458 }
bd943159 4459 }
c8bd0e49 4460 drm_encoder_cleanup(encoder);
da63a9f2 4461 kfree(intel_dig_port);
24d05927
DV
4462}
4463
07f9cd0b
ID
4464static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4465{
4466 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4467
4468 if (!is_edp(intel_dp))
4469 return;
4470
951468f3
VS
4471 /*
4472 * vdd might still be enabled do to the delayed vdd off.
4473 * Make sure vdd is actually turned off here.
4474 */
afa4e53a 4475 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4476 pps_lock(intel_dp);
07f9cd0b 4477 edp_panel_vdd_off_sync(intel_dp);
773538e8 4478 pps_unlock(intel_dp);
07f9cd0b
ID
4479}
4480
49e6bc51
VS
4481static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4482{
4483 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4484 struct drm_device *dev = intel_dig_port->base.base.dev;
4485 struct drm_i915_private *dev_priv = dev->dev_private;
4486 enum intel_display_power_domain power_domain;
4487
4488 lockdep_assert_held(&dev_priv->pps_mutex);
4489
4490 if (!edp_have_panel_vdd(intel_dp))
4491 return;
4492
4493 /*
4494 * The VDD bit needs a power domain reference, so if the bit is
4495 * already enabled when we boot or resume, grab this reference and
4496 * schedule a vdd off, so we don't hold on to the reference
4497 * indefinitely.
4498 */
4499 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4500 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4501 intel_display_power_get(dev_priv, power_domain);
4502
4503 edp_panel_vdd_schedule_off(intel_dp);
4504}
4505
6d93c0c4
ID
4506static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4507{
49e6bc51
VS
4508 struct intel_dp *intel_dp;
4509
4510 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4511 return;
4512
4513 intel_dp = enc_to_intel_dp(encoder);
4514
4515 pps_lock(intel_dp);
4516
4517 /*
4518 * Read out the current power sequencer assignment,
4519 * in case the BIOS did something with it.
4520 */
4521 if (IS_VALLEYVIEW(encoder->dev))
4522 vlv_initial_power_sequencer_setup(intel_dp);
4523
4524 intel_edp_panel_vdd_sanitize(intel_dp);
4525
4526 pps_unlock(intel_dp);
6d93c0c4
ID
4527}
4528
a4fc5ed6 4529static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4530 .dpms = intel_connector_dpms,
a4fc5ed6 4531 .detect = intel_dp_detect,
beb60608 4532 .force = intel_dp_force,
a4fc5ed6 4533 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4534 .set_property = intel_dp_set_property,
2545e4a6 4535 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4536 .destroy = intel_dp_connector_destroy,
c6f95f27 4537 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4538};
4539
4540static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4541 .get_modes = intel_dp_get_modes,
4542 .mode_valid = intel_dp_mode_valid,
df0e9248 4543 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4544};
4545
a4fc5ed6 4546static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4547 .reset = intel_dp_encoder_reset,
24d05927 4548 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4549};
4550
0e32b39c 4551void
21d40d37 4552intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4553{
0e32b39c 4554 return;
c8110e52 4555}
6207937d 4556
b2c5c181 4557enum irqreturn
13cf5504
DA
4558intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4559{
4560 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4561 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4562 struct drm_device *dev = intel_dig_port->base.base.dev;
4563 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4564 enum intel_display_power_domain power_domain;
b2c5c181 4565 enum irqreturn ret = IRQ_NONE;
1c767b33 4566
0e32b39c
DA
4567 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4568 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4569
7a7f84cc
VS
4570 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4571 /*
4572 * vdd off can generate a long pulse on eDP which
4573 * would require vdd on to handle it, and thus we
4574 * would end up in an endless cycle of
4575 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4576 */
4577 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4578 port_name(intel_dig_port->port));
a8b3d52f 4579 return IRQ_HANDLED;
7a7f84cc
VS
4580 }
4581
26fbb774
VS
4582 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4583 port_name(intel_dig_port->port),
0e32b39c 4584 long_hpd ? "long" : "short");
13cf5504 4585
1c767b33
ID
4586 power_domain = intel_display_port_power_domain(intel_encoder);
4587 intel_display_power_get(dev_priv, power_domain);
4588
0e32b39c 4589 if (long_hpd) {
2a592bec
DA
4590
4591 if (HAS_PCH_SPLIT(dev)) {
4592 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4593 goto mst_fail;
4594 } else {
4595 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4596 goto mst_fail;
4597 }
0e32b39c
DA
4598
4599 if (!intel_dp_get_dpcd(intel_dp)) {
4600 goto mst_fail;
4601 }
4602
4603 intel_dp_probe_oui(intel_dp);
4604
4605 if (!intel_dp_probe_mst(intel_dp))
4606 goto mst_fail;
4607
4608 } else {
4609 if (intel_dp->is_mst) {
1c767b33 4610 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4611 goto mst_fail;
4612 }
4613
4614 if (!intel_dp->is_mst) {
4615 /*
4616 * we'll check the link status via the normal hot plug path later -
4617 * but for short hpds we should check it now
4618 */
5b215bcf 4619 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4620 intel_dp_check_link_status(intel_dp);
5b215bcf 4621 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4622 }
4623 }
b2c5c181
DV
4624
4625 ret = IRQ_HANDLED;
4626
1c767b33 4627 goto put_power;
0e32b39c
DA
4628mst_fail:
4629 /* if we were in MST mode, and device is not there get out of MST mode */
4630 if (intel_dp->is_mst) {
4631 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4632 intel_dp->is_mst = false;
4633 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4634 }
1c767b33
ID
4635put_power:
4636 intel_display_power_put(dev_priv, power_domain);
4637
4638 return ret;
13cf5504
DA
4639}
4640
e3421a18
ZW
4641/* Return which DP Port should be selected for Transcoder DP control */
4642int
0206e353 4643intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4644{
4645 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4646 struct intel_encoder *intel_encoder;
4647 struct intel_dp *intel_dp;
e3421a18 4648
fa90ecef
PZ
4649 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4650 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4651
fa90ecef
PZ
4652 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4653 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4654 return intel_dp->output_reg;
e3421a18 4655 }
ea5b213a 4656
e3421a18
ZW
4657 return -1;
4658}
4659
36e83a18 4660/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4661bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4662{
4663 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4664 union child_device_config *p_child;
36e83a18 4665 int i;
5d8a7752
VS
4666 static const short port_mapping[] = {
4667 [PORT_B] = PORT_IDPB,
4668 [PORT_C] = PORT_IDPC,
4669 [PORT_D] = PORT_IDPD,
4670 };
36e83a18 4671
3b32a35b
VS
4672 if (port == PORT_A)
4673 return true;
4674
41aa3448 4675 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4676 return false;
4677
41aa3448
RV
4678 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4679 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4680
5d8a7752 4681 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4682 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4683 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4684 return true;
4685 }
4686 return false;
4687}
4688
0e32b39c 4689void
f684960e
CW
4690intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4691{
53b41837
YN
4692 struct intel_connector *intel_connector = to_intel_connector(connector);
4693
3f43c48d 4694 intel_attach_force_audio_property(connector);
e953fd7b 4695 intel_attach_broadcast_rgb_property(connector);
55bc60db 4696 intel_dp->color_range_auto = true;
53b41837
YN
4697
4698 if (is_edp(intel_dp)) {
4699 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4700 drm_object_attach_property(
4701 &connector->base,
53b41837 4702 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4703 DRM_MODE_SCALE_ASPECT);
4704 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4705 }
f684960e
CW
4706}
4707
dada1a9f
ID
4708static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4709{
4710 intel_dp->last_power_cycle = jiffies;
4711 intel_dp->last_power_on = jiffies;
4712 intel_dp->last_backlight_off = jiffies;
4713}
4714
67a54566
DV
4715static void
4716intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4717 struct intel_dp *intel_dp)
67a54566
DV
4718{
4719 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4720 struct edp_power_seq cur, vbt, spec,
4721 *final = &intel_dp->pps_delays;
67a54566 4722 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4723 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4724
e39b999a
VS
4725 lockdep_assert_held(&dev_priv->pps_mutex);
4726
81ddbc69
VS
4727 /* already initialized? */
4728 if (final->t11_t12 != 0)
4729 return;
4730
453c5420 4731 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4732 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4733 pp_on_reg = PCH_PP_ON_DELAYS;
4734 pp_off_reg = PCH_PP_OFF_DELAYS;
4735 pp_div_reg = PCH_PP_DIVISOR;
4736 } else {
bf13e81b
JN
4737 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4738
4739 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4740 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4741 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4742 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4743 }
67a54566
DV
4744
4745 /* Workaround: Need to write PP_CONTROL with the unlock key as
4746 * the very first thing. */
453c5420 4747 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4748 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4749
453c5420
JB
4750 pp_on = I915_READ(pp_on_reg);
4751 pp_off = I915_READ(pp_off_reg);
4752 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4753
4754 /* Pull timing values out of registers */
4755 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4756 PANEL_POWER_UP_DELAY_SHIFT;
4757
4758 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4759 PANEL_LIGHT_ON_DELAY_SHIFT;
4760
4761 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4762 PANEL_LIGHT_OFF_DELAY_SHIFT;
4763
4764 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4765 PANEL_POWER_DOWN_DELAY_SHIFT;
4766
4767 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4768 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4769
4770 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4771 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4772
41aa3448 4773 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4774
4775 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4776 * our hw here, which are all in 100usec. */
4777 spec.t1_t3 = 210 * 10;
4778 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4779 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4780 spec.t10 = 500 * 10;
4781 /* This one is special and actually in units of 100ms, but zero
4782 * based in the hw (so we need to add 100 ms). But the sw vbt
4783 * table multiplies it with 1000 to make it in units of 100usec,
4784 * too. */
4785 spec.t11_t12 = (510 + 100) * 10;
4786
4787 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4788 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4789
4790 /* Use the max of the register settings and vbt. If both are
4791 * unset, fall back to the spec limits. */
36b5f425 4792#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4793 spec.field : \
4794 max(cur.field, vbt.field))
4795 assign_final(t1_t3);
4796 assign_final(t8);
4797 assign_final(t9);
4798 assign_final(t10);
4799 assign_final(t11_t12);
4800#undef assign_final
4801
36b5f425 4802#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4803 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4804 intel_dp->backlight_on_delay = get_delay(t8);
4805 intel_dp->backlight_off_delay = get_delay(t9);
4806 intel_dp->panel_power_down_delay = get_delay(t10);
4807 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4808#undef get_delay
4809
f30d26e4
JN
4810 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4811 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4812 intel_dp->panel_power_cycle_delay);
4813
4814 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4815 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4816}
4817
4818static void
4819intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4820 struct intel_dp *intel_dp)
f30d26e4
JN
4821{
4822 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4823 u32 pp_on, pp_off, pp_div, port_sel = 0;
4824 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4825 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4826 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4827 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4828
e39b999a 4829 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4830
4831 if (HAS_PCH_SPLIT(dev)) {
4832 pp_on_reg = PCH_PP_ON_DELAYS;
4833 pp_off_reg = PCH_PP_OFF_DELAYS;
4834 pp_div_reg = PCH_PP_DIVISOR;
4835 } else {
bf13e81b
JN
4836 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4837
4838 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4839 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4840 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4841 }
4842
b2f19d1a
PZ
4843 /*
4844 * And finally store the new values in the power sequencer. The
4845 * backlight delays are set to 1 because we do manual waits on them. For
4846 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4847 * we'll end up waiting for the backlight off delay twice: once when we
4848 * do the manual sleep, and once when we disable the panel and wait for
4849 * the PP_STATUS bit to become zero.
4850 */
f30d26e4 4851 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4852 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4853 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4854 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4855 /* Compute the divisor for the pp clock, simply match the Bspec
4856 * formula. */
453c5420 4857 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4858 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4859 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4860
4861 /* Haswell doesn't have any port selection bits for the panel
4862 * power sequencer any more. */
bc7d38a4 4863 if (IS_VALLEYVIEW(dev)) {
ad933b56 4864 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4865 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4866 if (port == PORT_A)
a24c144c 4867 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4868 else
a24c144c 4869 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4870 }
4871
453c5420
JB
4872 pp_on |= port_sel;
4873
4874 I915_WRITE(pp_on_reg, pp_on);
4875 I915_WRITE(pp_off_reg, pp_off);
4876 I915_WRITE(pp_div_reg, pp_div);
67a54566 4877
67a54566 4878 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4879 I915_READ(pp_on_reg),
4880 I915_READ(pp_off_reg),
4881 I915_READ(pp_div_reg));
f684960e
CW
4882}
4883
b33a2815
VK
4884/**
4885 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4886 * @dev: DRM device
4887 * @refresh_rate: RR to be programmed
4888 *
4889 * This function gets called when refresh rate (RR) has to be changed from
4890 * one frequency to another. Switches can be between high and low RR
4891 * supported by the panel or to any other RR based on media playback (in
4892 * this case, RR value needs to be passed from user space).
4893 *
4894 * The caller of this function needs to take a lock on dev_priv->drrs.
4895 */
96178eeb 4896static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4897{
4898 struct drm_i915_private *dev_priv = dev->dev_private;
4899 struct intel_encoder *encoder;
96178eeb
VK
4900 struct intel_digital_port *dig_port = NULL;
4901 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4902 struct intel_crtc_state *config = NULL;
439d7ac0 4903 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4904 u32 reg, val;
96178eeb 4905 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4906
4907 if (refresh_rate <= 0) {
4908 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4909 return;
4910 }
4911
96178eeb
VK
4912 if (intel_dp == NULL) {
4913 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4914 return;
4915 }
4916
1fcc9d1c 4917 /*
e4d59f6b
RV
4918 * FIXME: This needs proper synchronization with psr state for some
4919 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4920 */
439d7ac0 4921
96178eeb
VK
4922 dig_port = dp_to_dig_port(intel_dp);
4923 encoder = &dig_port->base;
439d7ac0
PB
4924 intel_crtc = encoder->new_crtc;
4925
4926 if (!intel_crtc) {
4927 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4928 return;
4929 }
4930
6e3c9717 4931 config = intel_crtc->config;
439d7ac0 4932
96178eeb 4933 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
4934 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4935 return;
4936 }
4937
96178eeb
VK
4938 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4939 refresh_rate)
439d7ac0
PB
4940 index = DRRS_LOW_RR;
4941
96178eeb 4942 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
4943 DRM_DEBUG_KMS(
4944 "DRRS requested for previously set RR...ignoring\n");
4945 return;
4946 }
4947
4948 if (!intel_crtc->active) {
4949 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4950 return;
4951 }
4952
44395bfe 4953 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
4954 switch (index) {
4955 case DRRS_HIGH_RR:
4956 intel_dp_set_m_n(intel_crtc, M1_N1);
4957 break;
4958 case DRRS_LOW_RR:
4959 intel_dp_set_m_n(intel_crtc, M2_N2);
4960 break;
4961 case DRRS_MAX_RR:
4962 default:
4963 DRM_ERROR("Unsupported refreshrate type\n");
4964 }
4965 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 4966 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 4967 val = I915_READ(reg);
a4c30b1d 4968
439d7ac0 4969 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
4970 if (IS_VALLEYVIEW(dev))
4971 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4972 else
4973 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 4974 } else {
6fa7aec1
VK
4975 if (IS_VALLEYVIEW(dev))
4976 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4977 else
4978 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
4979 }
4980 I915_WRITE(reg, val);
4981 }
4982
4e9ac947
VK
4983 dev_priv->drrs.refresh_rate_type = index;
4984
4985 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4986}
4987
b33a2815
VK
4988/**
4989 * intel_edp_drrs_enable - init drrs struct if supported
4990 * @intel_dp: DP struct
4991 *
4992 * Initializes frontbuffer_bits and drrs.dp
4993 */
c395578e
VK
4994void intel_edp_drrs_enable(struct intel_dp *intel_dp)
4995{
4996 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4997 struct drm_i915_private *dev_priv = dev->dev_private;
4998 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4999 struct drm_crtc *crtc = dig_port->base.base.crtc;
5000 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5001
5002 if (!intel_crtc->config->has_drrs) {
5003 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5004 return;
5005 }
5006
5007 mutex_lock(&dev_priv->drrs.mutex);
5008 if (WARN_ON(dev_priv->drrs.dp)) {
5009 DRM_ERROR("DRRS already enabled\n");
5010 goto unlock;
5011 }
5012
5013 dev_priv->drrs.busy_frontbuffer_bits = 0;
5014
5015 dev_priv->drrs.dp = intel_dp;
5016
5017unlock:
5018 mutex_unlock(&dev_priv->drrs.mutex);
5019}
5020
b33a2815
VK
5021/**
5022 * intel_edp_drrs_disable - Disable DRRS
5023 * @intel_dp: DP struct
5024 *
5025 */
c395578e
VK
5026void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5027{
5028 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5029 struct drm_i915_private *dev_priv = dev->dev_private;
5030 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5031 struct drm_crtc *crtc = dig_port->base.base.crtc;
5032 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5033
5034 if (!intel_crtc->config->has_drrs)
5035 return;
5036
5037 mutex_lock(&dev_priv->drrs.mutex);
5038 if (!dev_priv->drrs.dp) {
5039 mutex_unlock(&dev_priv->drrs.mutex);
5040 return;
5041 }
5042
5043 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5044 intel_dp_set_drrs_state(dev_priv->dev,
5045 intel_dp->attached_connector->panel.
5046 fixed_mode->vrefresh);
5047
5048 dev_priv->drrs.dp = NULL;
5049 mutex_unlock(&dev_priv->drrs.mutex);
5050
5051 cancel_delayed_work_sync(&dev_priv->drrs.work);
5052}
5053
4e9ac947
VK
5054static void intel_edp_drrs_downclock_work(struct work_struct *work)
5055{
5056 struct drm_i915_private *dev_priv =
5057 container_of(work, typeof(*dev_priv), drrs.work.work);
5058 struct intel_dp *intel_dp;
5059
5060 mutex_lock(&dev_priv->drrs.mutex);
5061
5062 intel_dp = dev_priv->drrs.dp;
5063
5064 if (!intel_dp)
5065 goto unlock;
5066
439d7ac0 5067 /*
4e9ac947
VK
5068 * The delayed work can race with an invalidate hence we need to
5069 * recheck.
439d7ac0
PB
5070 */
5071
4e9ac947
VK
5072 if (dev_priv->drrs.busy_frontbuffer_bits)
5073 goto unlock;
439d7ac0 5074
4e9ac947
VK
5075 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5076 intel_dp_set_drrs_state(dev_priv->dev,
5077 intel_dp->attached_connector->panel.
5078 downclock_mode->vrefresh);
439d7ac0 5079
4e9ac947 5080unlock:
439d7ac0 5081
4e9ac947 5082 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5083}
5084
b33a2815
VK
5085/**
5086 * intel_edp_drrs_invalidate - Invalidate DRRS
5087 * @dev: DRM device
5088 * @frontbuffer_bits: frontbuffer plane tracking bits
5089 *
5090 * When there is a disturbance on screen (due to cursor movement/time
5091 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5092 * high RR.
5093 *
5094 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5095 */
a93fad0f
VK
5096void intel_edp_drrs_invalidate(struct drm_device *dev,
5097 unsigned frontbuffer_bits)
5098{
5099 struct drm_i915_private *dev_priv = dev->dev_private;
5100 struct drm_crtc *crtc;
5101 enum pipe pipe;
5102
5103 if (!dev_priv->drrs.dp)
5104 return;
5105
3954e733
R
5106 cancel_delayed_work_sync(&dev_priv->drrs.work);
5107
a93fad0f
VK
5108 mutex_lock(&dev_priv->drrs.mutex);
5109 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5110 pipe = to_intel_crtc(crtc)->pipe;
5111
5112 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5113 intel_dp_set_drrs_state(dev_priv->dev,
5114 dev_priv->drrs.dp->attached_connector->panel.
5115 fixed_mode->vrefresh);
5116 }
5117
5118 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5119
5120 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5121 mutex_unlock(&dev_priv->drrs.mutex);
5122}
5123
b33a2815
VK
5124/**
5125 * intel_edp_drrs_flush - Flush DRRS
5126 * @dev: DRM device
5127 * @frontbuffer_bits: frontbuffer plane tracking bits
5128 *
5129 * When there is no movement on screen, DRRS work can be scheduled.
5130 * This DRRS work is responsible for setting relevant registers after a
5131 * timeout of 1 second.
5132 *
5133 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5134 */
a93fad0f
VK
5135void intel_edp_drrs_flush(struct drm_device *dev,
5136 unsigned frontbuffer_bits)
5137{
5138 struct drm_i915_private *dev_priv = dev->dev_private;
5139 struct drm_crtc *crtc;
5140 enum pipe pipe;
5141
5142 if (!dev_priv->drrs.dp)
5143 return;
5144
3954e733
R
5145 cancel_delayed_work_sync(&dev_priv->drrs.work);
5146
a93fad0f
VK
5147 mutex_lock(&dev_priv->drrs.mutex);
5148 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5149 pipe = to_intel_crtc(crtc)->pipe;
5150 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5151
a93fad0f
VK
5152 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5153 !dev_priv->drrs.busy_frontbuffer_bits)
5154 schedule_delayed_work(&dev_priv->drrs.work,
5155 msecs_to_jiffies(1000));
5156 mutex_unlock(&dev_priv->drrs.mutex);
5157}
5158
b33a2815
VK
5159/**
5160 * DOC: Display Refresh Rate Switching (DRRS)
5161 *
5162 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5163 * which enables swtching between low and high refresh rates,
5164 * dynamically, based on the usage scenario. This feature is applicable
5165 * for internal panels.
5166 *
5167 * Indication that the panel supports DRRS is given by the panel EDID, which
5168 * would list multiple refresh rates for one resolution.
5169 *
5170 * DRRS is of 2 types - static and seamless.
5171 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5172 * (may appear as a blink on screen) and is used in dock-undock scenario.
5173 * Seamless DRRS involves changing RR without any visual effect to the user
5174 * and can be used during normal system usage. This is done by programming
5175 * certain registers.
5176 *
5177 * Support for static/seamless DRRS may be indicated in the VBT based on
5178 * inputs from the panel spec.
5179 *
5180 * DRRS saves power by switching to low RR based on usage scenarios.
5181 *
5182 * eDP DRRS:-
5183 * The implementation is based on frontbuffer tracking implementation.
5184 * When there is a disturbance on the screen triggered by user activity or a
5185 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5186 * When there is no movement on screen, after a timeout of 1 second, a switch
5187 * to low RR is made.
5188 * For integration with frontbuffer tracking code,
5189 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5190 *
5191 * DRRS can be further extended to support other internal panels and also
5192 * the scenario of video playback wherein RR is set based on the rate
5193 * requested by userspace.
5194 */
5195
5196/**
5197 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5198 * @intel_connector: eDP connector
5199 * @fixed_mode: preferred mode of panel
5200 *
5201 * This function is called only once at driver load to initialize basic
5202 * DRRS stuff.
5203 *
5204 * Returns:
5205 * Downclock mode if panel supports it, else return NULL.
5206 * DRRS support is determined by the presence of downclock mode (apart
5207 * from VBT setting).
5208 */
4f9db5b5 5209static struct drm_display_mode *
96178eeb
VK
5210intel_dp_drrs_init(struct intel_connector *intel_connector,
5211 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5212{
5213 struct drm_connector *connector = &intel_connector->base;
96178eeb 5214 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5215 struct drm_i915_private *dev_priv = dev->dev_private;
5216 struct drm_display_mode *downclock_mode = NULL;
5217
5218 if (INTEL_INFO(dev)->gen <= 6) {
5219 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5220 return NULL;
5221 }
5222
5223 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5224 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5225 return NULL;
5226 }
5227
5228 downclock_mode = intel_find_panel_downclock
5229 (dev, fixed_mode, connector);
5230
5231 if (!downclock_mode) {
a1d26342 5232 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5233 return NULL;
5234 }
5235
4e9ac947
VK
5236 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5237
96178eeb 5238 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5239
96178eeb 5240 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5241
96178eeb 5242 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5243 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5244 return downclock_mode;
5245}
5246
ed92f0b2 5247static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5248 struct intel_connector *intel_connector)
ed92f0b2
PZ
5249{
5250 struct drm_connector *connector = &intel_connector->base;
5251 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5252 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5253 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5254 struct drm_i915_private *dev_priv = dev->dev_private;
5255 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5256 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5257 bool has_dpcd;
5258 struct drm_display_mode *scan;
5259 struct edid *edid;
6517d273 5260 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5261
96178eeb 5262 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5263
ed92f0b2
PZ
5264 if (!is_edp(intel_dp))
5265 return true;
5266
49e6bc51
VS
5267 pps_lock(intel_dp);
5268 intel_edp_panel_vdd_sanitize(intel_dp);
5269 pps_unlock(intel_dp);
63635217 5270
ed92f0b2 5271 /* Cache DPCD and EDID for edp. */
ed92f0b2 5272 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5273
5274 if (has_dpcd) {
5275 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5276 dev_priv->no_aux_handshake =
5277 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5278 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5279 } else {
5280 /* if this fails, presume the device is a ghost */
5281 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5282 return false;
5283 }
5284
5285 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5286 pps_lock(intel_dp);
36b5f425 5287 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5288 pps_unlock(intel_dp);
ed92f0b2 5289
060c8778 5290 mutex_lock(&dev->mode_config.mutex);
0b99836f 5291 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5292 if (edid) {
5293 if (drm_add_edid_modes(connector, edid)) {
5294 drm_mode_connector_update_edid_property(connector,
5295 edid);
5296 drm_edid_to_eld(connector, edid);
5297 } else {
5298 kfree(edid);
5299 edid = ERR_PTR(-EINVAL);
5300 }
5301 } else {
5302 edid = ERR_PTR(-ENOENT);
5303 }
5304 intel_connector->edid = edid;
5305
5306 /* prefer fixed mode from EDID if available */
5307 list_for_each_entry(scan, &connector->probed_modes, head) {
5308 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5309 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5310 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5311 intel_connector, fixed_mode);
ed92f0b2
PZ
5312 break;
5313 }
5314 }
5315
5316 /* fallback to VBT if available for eDP */
5317 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5318 fixed_mode = drm_mode_duplicate(dev,
5319 dev_priv->vbt.lfp_lvds_vbt_mode);
5320 if (fixed_mode)
5321 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5322 }
060c8778 5323 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5324
01527b31
CT
5325 if (IS_VALLEYVIEW(dev)) {
5326 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5327 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5328
5329 /*
5330 * Figure out the current pipe for the initial backlight setup.
5331 * If the current pipe isn't valid, try the PPS pipe, and if that
5332 * fails just assume pipe A.
5333 */
5334 if (IS_CHERRYVIEW(dev))
5335 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5336 else
5337 pipe = PORT_TO_PIPE(intel_dp->DP);
5338
5339 if (pipe != PIPE_A && pipe != PIPE_B)
5340 pipe = intel_dp->pps_pipe;
5341
5342 if (pipe != PIPE_A && pipe != PIPE_B)
5343 pipe = PIPE_A;
5344
5345 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5346 pipe_name(pipe));
01527b31
CT
5347 }
5348
4f9db5b5 5349 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5350 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5351 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5352
5353 return true;
5354}
5355
16c25533 5356bool
f0fec3f2
PZ
5357intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5358 struct intel_connector *intel_connector)
a4fc5ed6 5359{
f0fec3f2
PZ
5360 struct drm_connector *connector = &intel_connector->base;
5361 struct intel_dp *intel_dp = &intel_dig_port->dp;
5362 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5363 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5364 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5365 enum port port = intel_dig_port->port;
0b99836f 5366 int type;
a4fc5ed6 5367
a4a5d2f8
VS
5368 intel_dp->pps_pipe = INVALID_PIPE;
5369
ec5b01dd 5370 /* intel_dp vfuncs */
b6b5e383
DL
5371 if (INTEL_INFO(dev)->gen >= 9)
5372 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5373 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5374 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5375 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5376 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5377 else if (HAS_PCH_SPLIT(dev))
5378 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5379 else
5380 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5381
b9ca5fad
DL
5382 if (INTEL_INFO(dev)->gen >= 9)
5383 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5384 else
5385 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5386
0767935e
DV
5387 /* Preserve the current hw state. */
5388 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5389 intel_dp->attached_connector = intel_connector;
3d3dc149 5390
3b32a35b 5391 if (intel_dp_is_edp(dev, port))
b329530c 5392 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5393 else
5394 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5395
f7d24902
ID
5396 /*
5397 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5398 * for DP the encoder type can be set by the caller to
5399 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5400 */
5401 if (type == DRM_MODE_CONNECTOR_eDP)
5402 intel_encoder->type = INTEL_OUTPUT_EDP;
5403
c17ed5b5
VS
5404 /* eDP only on port B and/or C on vlv/chv */
5405 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5406 port != PORT_B && port != PORT_C))
5407 return false;
5408
e7281eab
ID
5409 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5410 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5411 port_name(port));
5412
b329530c 5413 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5414 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5415
a4fc5ed6
KP
5416 connector->interlace_allowed = true;
5417 connector->doublescan_allowed = 0;
5418
f0fec3f2 5419 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5420 edp_panel_vdd_work);
a4fc5ed6 5421
df0e9248 5422 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5423 drm_connector_register(connector);
a4fc5ed6 5424
affa9354 5425 if (HAS_DDI(dev))
bcbc889b
PZ
5426 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5427 else
5428 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5429 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5430
0b99836f 5431 /* Set up the hotplug pin. */
ab9d7c30
PZ
5432 switch (port) {
5433 case PORT_A:
1d843f9d 5434 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5435 break;
5436 case PORT_B:
1d843f9d 5437 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5438 break;
5439 case PORT_C:
1d843f9d 5440 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5441 break;
5442 case PORT_D:
1d843f9d 5443 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5444 break;
5445 default:
ad1c0b19 5446 BUG();
5eb08b69
ZW
5447 }
5448
dada1a9f 5449 if (is_edp(intel_dp)) {
773538e8 5450 pps_lock(intel_dp);
1e74a324
VS
5451 intel_dp_init_panel_power_timestamps(intel_dp);
5452 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5453 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5454 else
36b5f425 5455 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5456 pps_unlock(intel_dp);
dada1a9f 5457 }
0095e6dc 5458
9d1a1031 5459 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5460
0e32b39c 5461 /* init MST on ports that can support it */
c86ea3d0 5462 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5463 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5464 intel_dp_mst_encoder_init(intel_dig_port,
5465 intel_connector->base.base.id);
0e32b39c
DA
5466 }
5467 }
5468
36b5f425 5469 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5470 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5471 if (is_edp(intel_dp)) {
5472 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5473 /*
5474 * vdd might still be enabled do to the delayed vdd off.
5475 * Make sure vdd is actually turned off here.
5476 */
773538e8 5477 pps_lock(intel_dp);
4be73780 5478 edp_panel_vdd_off_sync(intel_dp);
773538e8 5479 pps_unlock(intel_dp);
15b1d171 5480 }
34ea3d38 5481 drm_connector_unregister(connector);
b2f246a8 5482 drm_connector_cleanup(connector);
16c25533 5483 return false;
b2f246a8 5484 }
32f9d658 5485
f684960e
CW
5486 intel_dp_add_properties(intel_dp, connector);
5487
a4fc5ed6
KP
5488 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5489 * 0xd. Failure to do so will result in spurious interrupts being
5490 * generated on the port when a cable is not attached.
5491 */
5492 if (IS_G4X(dev) && !IS_GM45(dev)) {
5493 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5494 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5495 }
16c25533
PZ
5496
5497 return true;
a4fc5ed6 5498}
f0fec3f2
PZ
5499
5500void
5501intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5502{
13cf5504 5503 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5504 struct intel_digital_port *intel_dig_port;
5505 struct intel_encoder *intel_encoder;
5506 struct drm_encoder *encoder;
5507 struct intel_connector *intel_connector;
5508
b14c5679 5509 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5510 if (!intel_dig_port)
5511 return;
5512
b14c5679 5513 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5514 if (!intel_connector) {
5515 kfree(intel_dig_port);
5516 return;
5517 }
5518
5519 intel_encoder = &intel_dig_port->base;
5520 encoder = &intel_encoder->base;
5521
5522 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5523 DRM_MODE_ENCODER_TMDS);
5524
5bfe2ac0 5525 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5526 intel_encoder->disable = intel_disable_dp;
00c09d70 5527 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5528 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5529 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5530 if (IS_CHERRYVIEW(dev)) {
9197c88b 5531 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5532 intel_encoder->pre_enable = chv_pre_enable_dp;
5533 intel_encoder->enable = vlv_enable_dp;
580d3811 5534 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5535 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5536 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5537 intel_encoder->pre_enable = vlv_pre_enable_dp;
5538 intel_encoder->enable = vlv_enable_dp;
49277c31 5539 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5540 } else {
ecff4f3b
JN
5541 intel_encoder->pre_enable = g4x_pre_enable_dp;
5542 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5543 if (INTEL_INFO(dev)->gen >= 5)
5544 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5545 }
f0fec3f2 5546
174edf1f 5547 intel_dig_port->port = port;
f0fec3f2
PZ
5548 intel_dig_port->dp.output_reg = output_reg;
5549
00c09d70 5550 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5551 if (IS_CHERRYVIEW(dev)) {
5552 if (port == PORT_D)
5553 intel_encoder->crtc_mask = 1 << 2;
5554 else
5555 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5556 } else {
5557 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5558 }
bc079e8b 5559 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5560 intel_encoder->hot_plug = intel_dp_hot_plug;
5561
13cf5504
DA
5562 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5563 dev_priv->hpd_irq_port[port] = intel_dig_port;
5564
15b1d171
PZ
5565 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5566 drm_encoder_cleanup(encoder);
5567 kfree(intel_dig_port);
b2f246a8 5568 kfree(intel_connector);
15b1d171 5569 }
f0fec3f2 5570}
0e32b39c
DA
5571
5572void intel_dp_mst_suspend(struct drm_device *dev)
5573{
5574 struct drm_i915_private *dev_priv = dev->dev_private;
5575 int i;
5576
5577 /* disable MST */
5578 for (i = 0; i < I915_MAX_PORTS; i++) {
5579 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5580 if (!intel_dig_port)
5581 continue;
5582
5583 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5584 if (!intel_dig_port->dp.can_mst)
5585 continue;
5586 if (intel_dig_port->dp.is_mst)
5587 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5588 }
5589 }
5590}
5591
5592void intel_dp_mst_resume(struct drm_device *dev)
5593{
5594 struct drm_i915_private *dev_priv = dev->dev_private;
5595 int i;
5596
5597 for (i = 0; i < I915_MAX_PORTS; i++) {
5598 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5599 if (!intel_dig_port)
5600 continue;
5601 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5602 int ret;
5603
5604 if (!intel_dig_port->dp.can_mst)
5605 continue;
5606
5607 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5608 if (ret != 0) {
5609 intel_dp_check_mst_status(&intel_dig_port->dp);
5610 }
5611 }
5612 }
5613}