]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Avoid overflowing the DP link rate arrays
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 91
cfcb0fc9
JB
92/**
93 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94 * @intel_dp: DP struct
95 *
96 * If a CPU or PCH DP output is attached to an eDP panel, this function
97 * will return true, and false otherwise.
98 */
99static bool is_edp(struct intel_dp *intel_dp)
100{
da63a9f2
PZ
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
104}
105
68b4d824 106static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 107{
68b4d824
ID
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
111}
112
df0e9248
CW
113static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114{
fa90ecef 115 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
116}
117
ea5b213a 118static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 119static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 120static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 121static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
122static void vlv_steal_power_sequencer(struct drm_device *dev,
123 enum pipe pipe);
a4fc5ed6 124
ed4e9c1d
VS
125static int
126intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 127{
7183dc29 128 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
129
130 switch (max_link_bw) {
131 case DP_LINK_BW_1_62:
132 case DP_LINK_BW_2_7:
1db10e28 133 case DP_LINK_BW_5_4:
d4eead50 134 break;
a4fc5ed6 135 default:
d4eead50
ID
136 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137 max_link_bw);
a4fc5ed6
KP
138 max_link_bw = DP_LINK_BW_1_62;
139 break;
140 }
141 return max_link_bw;
142}
143
eeb6324d
PZ
144static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145{
146 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147 struct drm_device *dev = intel_dig_port->base.base.dev;
148 u8 source_max, sink_max;
149
150 source_max = 4;
151 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153 source_max = 2;
154
155 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157 return min(source_max, sink_max);
158}
159
cd9dde44
AJ
160/*
161 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec.
163 *
164 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165 *
166 * 270000 * 1 * 8 / 10 == 216000
167 *
168 * The actual data capacity of that configuration is 2.16Gbit/s, so the
169 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
170 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171 * 119000. At 18bpp that's 2142000 kilobits per second.
172 *
173 * Thus the strange-looking division by 10 in intel_dp_link_required, to
174 * get the result in decakilobits instead of kilobits.
175 */
176
a4fc5ed6 177static int
c898261c 178intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 179{
cd9dde44 180 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
181}
182
fe27d53e
DA
183static int
184intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185{
186 return (max_link_clock * max_lanes * 8) / 10;
187}
188
c19de8eb 189static enum drm_mode_status
a4fc5ed6
KP
190intel_dp_mode_valid(struct drm_connector *connector,
191 struct drm_display_mode *mode)
192{
df0e9248 193 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
194 struct intel_connector *intel_connector = to_intel_connector(connector);
195 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
196 int target_clock = mode->clock;
197 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 198
dd06f90e
JN
199 if (is_edp(intel_dp) && fixed_mode) {
200 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
201 return MODE_PANEL;
202
dd06f90e 203 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 204 return MODE_PANEL;
03afc4a2
DV
205
206 target_clock = fixed_mode->clock;
7de56f43
ZY
207 }
208
50fec21a 209 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 210 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
211
212 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213 mode_rate = intel_dp_link_required(target_clock, 18);
214
215 if (mode_rate > max_rate)
c4867936 216 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
217
218 if (mode->clock < 10000)
219 return MODE_CLOCK_LOW;
220
0af78a2b
DV
221 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222 return MODE_H_ILLEGAL;
223
a4fc5ed6
KP
224 return MODE_OK;
225}
226
a4f1289e 227uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
228{
229 int i;
230 uint32_t v = 0;
231
232 if (src_bytes > 4)
233 src_bytes = 4;
234 for (i = 0; i < src_bytes; i++)
235 v |= ((uint32_t) src[i]) << ((3-i) * 8);
236 return v;
237}
238
c2af70e2 239static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
240{
241 int i;
242 if (dst_bytes > 4)
243 dst_bytes = 4;
244 for (i = 0; i < dst_bytes; i++)
245 dst[i] = src >> ((3-i) * 8);
246}
247
fb0f8fbf
KP
248/* hrawclock is 1/4 the FSB frequency */
249static int
250intel_hrawclk(struct drm_device *dev)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 uint32_t clkcfg;
254
9473c8f4
VP
255 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256 if (IS_VALLEYVIEW(dev))
257 return 200;
258
fb0f8fbf
KP
259 clkcfg = I915_READ(CLKCFG);
260 switch (clkcfg & CLKCFG_FSB_MASK) {
261 case CLKCFG_FSB_400:
262 return 100;
263 case CLKCFG_FSB_533:
264 return 133;
265 case CLKCFG_FSB_667:
266 return 166;
267 case CLKCFG_FSB_800:
268 return 200;
269 case CLKCFG_FSB_1067:
270 return 266;
271 case CLKCFG_FSB_1333:
272 return 333;
273 /* these two are just a guess; one of them might be right */
274 case CLKCFG_FSB_1600:
275 case CLKCFG_FSB_1600_ALT:
276 return 400;
277 default:
278 return 133;
279 }
280}
281
bf13e81b
JN
282static void
283intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 284 struct intel_dp *intel_dp);
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b 288
773538e8
VS
289static void pps_lock(struct intel_dp *intel_dp)
290{
291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292 struct intel_encoder *encoder = &intel_dig_port->base;
293 struct drm_device *dev = encoder->base.dev;
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 enum intel_display_power_domain power_domain;
296
297 /*
298 * See vlv_power_sequencer_reset() why we need
299 * a power domain reference here.
300 */
301 power_domain = intel_display_port_power_domain(encoder);
302 intel_display_power_get(dev_priv, power_domain);
303
304 mutex_lock(&dev_priv->pps_mutex);
305}
306
307static void pps_unlock(struct intel_dp *intel_dp)
308{
309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
310 struct intel_encoder *encoder = &intel_dig_port->base;
311 struct drm_device *dev = encoder->base.dev;
312 struct drm_i915_private *dev_priv = dev->dev_private;
313 enum intel_display_power_domain power_domain;
314
315 mutex_unlock(&dev_priv->pps_mutex);
316
317 power_domain = intel_display_port_power_domain(encoder);
318 intel_display_power_put(dev_priv, power_domain);
319}
320
961a0db0
VS
321static void
322vlv_power_sequencer_kick(struct intel_dp *intel_dp)
323{
324 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
325 struct drm_device *dev = intel_dig_port->base.base.dev;
326 struct drm_i915_private *dev_priv = dev->dev_private;
327 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 328 bool pll_enabled;
961a0db0
VS
329 uint32_t DP;
330
331 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
332 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
333 pipe_name(pipe), port_name(intel_dig_port->port)))
334 return;
335
336 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
337 pipe_name(pipe), port_name(intel_dig_port->port));
338
339 /* Preserve the BIOS-computed detected bit. This is
340 * supposed to be read-only.
341 */
342 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
343 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
344 DP |= DP_PORT_WIDTH(1);
345 DP |= DP_LINK_TRAIN_PAT_1;
346
347 if (IS_CHERRYVIEW(dev))
348 DP |= DP_PIPE_SELECT_CHV(pipe);
349 else if (pipe == PIPE_B)
350 DP |= DP_PIPEB_SELECT;
351
d288f65f
VS
352 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
353
354 /*
355 * The DPLL for the pipe must be enabled for this to work.
356 * So enable temporarily it if it's not already enabled.
357 */
358 if (!pll_enabled)
359 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
360 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
361
961a0db0
VS
362 /*
363 * Similar magic as in intel_dp_enable_port().
364 * We _must_ do this port enable + disable trick
365 * to make this power seqeuencer lock onto the port.
366 * Otherwise even VDD force bit won't work.
367 */
368 I915_WRITE(intel_dp->output_reg, DP);
369 POSTING_READ(intel_dp->output_reg);
370
371 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
376
377 if (!pll_enabled)
378 vlv_force_pll_off(dev, pipe);
961a0db0
VS
379}
380
bf13e81b
JN
381static enum pipe
382vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
383{
384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
385 struct drm_device *dev = intel_dig_port->base.base.dev;
386 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
387 struct intel_encoder *encoder;
388 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 389 enum pipe pipe;
bf13e81b 390
e39b999a 391 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 392
a8c3344e
VS
393 /* We should never land here with regular DP ports */
394 WARN_ON(!is_edp(intel_dp));
395
a4a5d2f8
VS
396 if (intel_dp->pps_pipe != INVALID_PIPE)
397 return intel_dp->pps_pipe;
398
399 /*
400 * We don't have power sequencer currently.
401 * Pick one that's not used by other ports.
402 */
403 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
404 base.head) {
405 struct intel_dp *tmp;
406
407 if (encoder->type != INTEL_OUTPUT_EDP)
408 continue;
409
410 tmp = enc_to_intel_dp(&encoder->base);
411
412 if (tmp->pps_pipe != INVALID_PIPE)
413 pipes &= ~(1 << tmp->pps_pipe);
414 }
415
416 /*
417 * Didn't find one. This should not happen since there
418 * are two power sequencers and up to two eDP ports.
419 */
420 if (WARN_ON(pipes == 0))
a8c3344e
VS
421 pipe = PIPE_A;
422 else
423 pipe = ffs(pipes) - 1;
a4a5d2f8 424
a8c3344e
VS
425 vlv_steal_power_sequencer(dev, pipe);
426 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
427
428 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
429 pipe_name(intel_dp->pps_pipe),
430 port_name(intel_dig_port->port));
431
432 /* init power sequencer on this pipe and port */
36b5f425
VS
433 intel_dp_init_panel_power_sequencer(dev, intel_dp);
434 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 435
961a0db0
VS
436 /*
437 * Even vdd force doesn't work until we've made
438 * the power sequencer lock in on the port.
439 */
440 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
441
442 return intel_dp->pps_pipe;
443}
444
6491ab27
VS
445typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
446 enum pipe pipe);
447
448static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
452}
453
454static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
455 enum pipe pipe)
456{
457 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
458}
459
460static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
461 enum pipe pipe)
462{
463 return true;
464}
bf13e81b 465
a4a5d2f8 466static enum pipe
6491ab27
VS
467vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
468 enum port port,
469 vlv_pipe_check pipe_check)
a4a5d2f8
VS
470{
471 enum pipe pipe;
bf13e81b 472
bf13e81b
JN
473 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
474 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
475 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
476
477 if (port_sel != PANEL_PORT_SELECT_VLV(port))
478 continue;
479
6491ab27
VS
480 if (!pipe_check(dev_priv, pipe))
481 continue;
482
a4a5d2f8 483 return pipe;
bf13e81b
JN
484 }
485
a4a5d2f8
VS
486 return INVALID_PIPE;
487}
488
489static void
490vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
491{
492 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
493 struct drm_device *dev = intel_dig_port->base.base.dev;
494 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
495 enum port port = intel_dig_port->port;
496
497 lockdep_assert_held(&dev_priv->pps_mutex);
498
499 /* try to find a pipe with this port selected */
6491ab27
VS
500 /* first pick one where the panel is on */
501 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502 vlv_pipe_has_pp_on);
503 /* didn't find one? pick one where vdd is on */
504 if (intel_dp->pps_pipe == INVALID_PIPE)
505 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 vlv_pipe_has_vdd_on);
507 /* didn't find one? pick one with just the correct port */
508 if (intel_dp->pps_pipe == INVALID_PIPE)
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_any);
a4a5d2f8
VS
511
512 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
513 if (intel_dp->pps_pipe == INVALID_PIPE) {
514 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
515 port_name(port));
516 return;
bf13e81b
JN
517 }
518
a4a5d2f8
VS
519 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
520 port_name(port), pipe_name(intel_dp->pps_pipe));
521
36b5f425
VS
522 intel_dp_init_panel_power_sequencer(dev, intel_dp);
523 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
524}
525
773538e8
VS
526void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
527{
528 struct drm_device *dev = dev_priv->dev;
529 struct intel_encoder *encoder;
530
531 if (WARN_ON(!IS_VALLEYVIEW(dev)))
532 return;
533
534 /*
535 * We can't grab pps_mutex here due to deadlock with power_domain
536 * mutex when power_domain functions are called while holding pps_mutex.
537 * That also means that in order to use pps_pipe the code needs to
538 * hold both a power domain reference and pps_mutex, and the power domain
539 * reference get/put must be done while _not_ holding pps_mutex.
540 * pps_{lock,unlock}() do these steps in the correct order, so one
541 * should use them always.
542 */
543
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
545 struct intel_dp *intel_dp;
546
547 if (encoder->type != INTEL_OUTPUT_EDP)
548 continue;
549
550 intel_dp = enc_to_intel_dp(&encoder->base);
551 intel_dp->pps_pipe = INVALID_PIPE;
552 }
bf13e81b
JN
553}
554
555static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
556{
557 struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
559 if (HAS_PCH_SPLIT(dev))
560 return PCH_PP_CONTROL;
561 else
562 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563}
564
565static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566{
567 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568
569 if (HAS_PCH_SPLIT(dev))
570 return PCH_PP_STATUS;
571 else
572 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
573}
574
01527b31
CT
575/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
576 This function only applicable when panel PM state is not to be tracked */
577static int edp_notify_handler(struct notifier_block *this, unsigned long code,
578 void *unused)
579{
580 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
581 edp_notifier);
582 struct drm_device *dev = intel_dp_to_dev(intel_dp);
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 u32 pp_div;
585 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
586
587 if (!is_edp(intel_dp) || code != SYS_RESTART)
588 return 0;
589
773538e8 590 pps_lock(intel_dp);
e39b999a 591
01527b31 592 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
593 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594
01527b31
CT
595 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
596 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
597 pp_div = I915_READ(pp_div_reg);
598 pp_div &= PP_REFERENCE_DIVIDER_MASK;
599
600 /* 0x1F write to PP_DIV_REG sets max cycle delay */
601 I915_WRITE(pp_div_reg, pp_div | 0x1F);
602 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
603 msleep(intel_dp->panel_power_cycle_delay);
604 }
605
773538e8 606 pps_unlock(intel_dp);
e39b999a 607
01527b31
CT
608 return 0;
609}
610
4be73780 611static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 612{
30add22d 613 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
614 struct drm_i915_private *dev_priv = dev->dev_private;
615
e39b999a
VS
616 lockdep_assert_held(&dev_priv->pps_mutex);
617
9a42356b
VS
618 if (IS_VALLEYVIEW(dev) &&
619 intel_dp->pps_pipe == INVALID_PIPE)
620 return false;
621
bf13e81b 622 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
623}
624
4be73780 625static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 626{
30add22d 627 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
628 struct drm_i915_private *dev_priv = dev->dev_private;
629
e39b999a
VS
630 lockdep_assert_held(&dev_priv->pps_mutex);
631
9a42356b
VS
632 if (IS_VALLEYVIEW(dev) &&
633 intel_dp->pps_pipe == INVALID_PIPE)
634 return false;
635
773538e8 636 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
637}
638
9b984dae
KP
639static void
640intel_dp_check_edp(struct intel_dp *intel_dp)
641{
30add22d 642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 643 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 644
9b984dae
KP
645 if (!is_edp(intel_dp))
646 return;
453c5420 647
4be73780 648 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
649 WARN(1, "eDP powered off while attempting aux channel communication.\n");
650 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
651 I915_READ(_pp_stat_reg(intel_dp)),
652 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
653 }
654}
655
9ee32fea
DV
656static uint32_t
657intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658{
659 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
660 struct drm_device *dev = intel_dig_port->base.base.dev;
661 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 662 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
663 uint32_t status;
664 bool done;
665
ef04f00d 666#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 667 if (has_aux_irq)
b18ac466 668 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 669 msecs_to_jiffies_timeout(10));
9ee32fea
DV
670 else
671 done = wait_for_atomic(C, 10) == 0;
672 if (!done)
673 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674 has_aux_irq);
675#undef C
676
677 return status;
678}
679
ec5b01dd 680static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 681{
174edf1f
PZ
682 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
683 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 684
ec5b01dd
DL
685 /*
686 * The clock divider is based off the hrawclk, and would like to run at
687 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 688 */
ec5b01dd
DL
689 return index ? 0 : intel_hrawclk(dev) / 2;
690}
691
692static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693{
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
696
697 if (index)
698 return 0;
699
700 if (intel_dig_port->port == PORT_A) {
701 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 702 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 703 else
b84a1cf8 704 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
705 } else {
706 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
707 }
708}
709
710static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711{
712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713 struct drm_device *dev = intel_dig_port->base.base.dev;
714 struct drm_i915_private *dev_priv = dev->dev_private;
715
716 if (intel_dig_port->port == PORT_A) {
717 if (index)
718 return 0;
719 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
720 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
721 /* Workaround for non-ULT HSW */
bc86625a
CW
722 switch (index) {
723 case 0: return 63;
724 case 1: return 72;
725 default: return 0;
726 }
ec5b01dd 727 } else {
bc86625a 728 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 729 }
b84a1cf8
RV
730}
731
ec5b01dd
DL
732static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733{
734 return index ? 0 : 100;
735}
736
b6b5e383
DL
737static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738{
739 /*
740 * SKL doesn't need us to program the AUX clock divider (Hardware will
741 * derive the clock from CDCLK automatically). We still implement the
742 * get_aux_clock_divider vfunc to plug-in into the existing code.
743 */
744 return index ? 0 : 1;
745}
746
5ed12a19
DL
747static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
748 bool has_aux_irq,
749 int send_bytes,
750 uint32_t aux_clock_divider)
751{
752 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
753 struct drm_device *dev = intel_dig_port->base.base.dev;
754 uint32_t precharge, timeout;
755
756 if (IS_GEN6(dev))
757 precharge = 3;
758 else
759 precharge = 5;
760
761 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
762 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
763 else
764 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
765
766 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 767 DP_AUX_CH_CTL_DONE |
5ed12a19 768 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 769 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 770 timeout |
788d4433 771 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
772 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 774 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
775}
776
b9ca5fad
DL
777static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778 bool has_aux_irq,
779 int send_bytes,
780 uint32_t unused)
781{
782 return DP_AUX_CH_CTL_SEND_BUSY |
783 DP_AUX_CH_CTL_DONE |
784 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
785 DP_AUX_CH_CTL_TIME_OUT_ERROR |
786 DP_AUX_CH_CTL_TIME_OUT_1600us |
787 DP_AUX_CH_CTL_RECEIVE_ERROR |
788 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
789 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
790}
791
b84a1cf8
RV
792static int
793intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 794 const uint8_t *send, int send_bytes,
b84a1cf8
RV
795 uint8_t *recv, int recv_size)
796{
797 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
798 struct drm_device *dev = intel_dig_port->base.base.dev;
799 struct drm_i915_private *dev_priv = dev->dev_private;
800 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
801 uint32_t ch_data = ch_ctl + 4;
bc86625a 802 uint32_t aux_clock_divider;
b84a1cf8
RV
803 int i, ret, recv_bytes;
804 uint32_t status;
5ed12a19 805 int try, clock = 0;
4e6b788c 806 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
807 bool vdd;
808
773538e8 809 pps_lock(intel_dp);
e39b999a 810
72c3500a
VS
811 /*
812 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 * In such cases we want to leave VDD enabled and it's up to upper layers
814 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 * ourselves.
816 */
1e0560e0 817 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
818
819 /* dp aux is extremely sensitive to irq latency, hence request the
820 * lowest possible wakeup latency and so prevent the cpu from going into
821 * deep sleep states.
822 */
823 pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825 intel_dp_check_edp(intel_dp);
5eb08b69 826
c67a470b
PZ
827 intel_aux_display_runtime_get(dev_priv);
828
11bee43e
JB
829 /* Try to wait for any previous AUX channel activity */
830 for (try = 0; try < 3; try++) {
ef04f00d 831 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
832 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833 break;
834 msleep(1);
835 }
836
837 if (try == 3) {
838 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839 I915_READ(ch_ctl));
9ee32fea
DV
840 ret = -EBUSY;
841 goto out;
4f7f7b7e
CW
842 }
843
46a5ae9f
PZ
844 /* Only 5 data registers! */
845 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846 ret = -E2BIG;
847 goto out;
848 }
849
ec5b01dd 850 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
851 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852 has_aux_irq,
853 send_bytes,
854 aux_clock_divider);
5ed12a19 855
bc86625a
CW
856 /* Must try at least 3 times according to DP spec */
857 for (try = 0; try < 5; try++) {
858 /* Load the send data into the aux channel data registers */
859 for (i = 0; i < send_bytes; i += 4)
860 I915_WRITE(ch_data + i,
a4f1289e
RV
861 intel_dp_pack_aux(send + i,
862 send_bytes - i));
bc86625a
CW
863
864 /* Send the command and wait for it to complete */
5ed12a19 865 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
866
867 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868
869 /* Clear done status and any errors */
870 I915_WRITE(ch_ctl,
871 status |
872 DP_AUX_CH_CTL_DONE |
873 DP_AUX_CH_CTL_TIME_OUT_ERROR |
874 DP_AUX_CH_CTL_RECEIVE_ERROR);
875
876 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR))
878 continue;
879 if (status & DP_AUX_CH_CTL_DONE)
880 break;
881 }
4f7f7b7e 882 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
883 break;
884 }
885
a4fc5ed6 886 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 887 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
888 ret = -EBUSY;
889 goto out;
a4fc5ed6
KP
890 }
891
892 /* Check for timeout or receive error.
893 * Timeouts occur when the sink is not connected
894 */
a5b3da54 895 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 896 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
897 ret = -EIO;
898 goto out;
a5b3da54 899 }
1ae8c0a5
KP
900
901 /* Timeouts occur when the device isn't connected, so they're
902 * "normal" -- don't fill the kernel log with these */
a5b3da54 903 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 904 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
905 ret = -ETIMEDOUT;
906 goto out;
a4fc5ed6
KP
907 }
908
909 /* Unload any bytes sent back from the other side */
910 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
912 if (recv_bytes > recv_size)
913 recv_bytes = recv_size;
0206e353 914
4f7f7b7e 915 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
916 intel_dp_unpack_aux(I915_READ(ch_data + i),
917 recv + i, recv_bytes - i);
a4fc5ed6 918
9ee32fea
DV
919 ret = recv_bytes;
920out:
921 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 922 intel_aux_display_runtime_put(dev_priv);
9ee32fea 923
884f19e9
JN
924 if (vdd)
925 edp_panel_vdd_off(intel_dp, false);
926
773538e8 927 pps_unlock(intel_dp);
e39b999a 928
9ee32fea 929 return ret;
a4fc5ed6
KP
930}
931
a6c8aff0
JN
932#define BARE_ADDRESS_SIZE 3
933#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
934static ssize_t
935intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 936{
9d1a1031
JN
937 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
938 uint8_t txbuf[20], rxbuf[20];
939 size_t txsize, rxsize;
a4fc5ed6 940 int ret;
a4fc5ed6 941
9d1a1031
JN
942 txbuf[0] = msg->request << 4;
943 txbuf[1] = msg->address >> 8;
944 txbuf[2] = msg->address & 0xff;
945 txbuf[3] = msg->size - 1;
46a5ae9f 946
9d1a1031
JN
947 switch (msg->request & ~DP_AUX_I2C_MOT) {
948 case DP_AUX_NATIVE_WRITE:
949 case DP_AUX_I2C_WRITE:
a6c8aff0 950 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 951 rxsize = 1;
f51a44b9 952
9d1a1031
JN
953 if (WARN_ON(txsize > 20))
954 return -E2BIG;
a4fc5ed6 955
9d1a1031 956 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 957
9d1a1031
JN
958 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
959 if (ret > 0) {
960 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 961
9d1a1031
JN
962 /* Return payload size. */
963 ret = msg->size;
964 }
965 break;
46a5ae9f 966
9d1a1031
JN
967 case DP_AUX_NATIVE_READ:
968 case DP_AUX_I2C_READ:
a6c8aff0 969 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 970 rxsize = msg->size + 1;
a4fc5ed6 971
9d1a1031
JN
972 if (WARN_ON(rxsize > 20))
973 return -E2BIG;
a4fc5ed6 974
9d1a1031
JN
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
978 /*
979 * Assume happy day, and copy the data. The caller is
980 * expected to check msg->reply before touching it.
981 *
982 * Return payload size.
983 */
984 ret--;
985 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 986 }
9d1a1031
JN
987 break;
988
989 default:
990 ret = -EINVAL;
991 break;
a4fc5ed6 992 }
f51a44b9 993
9d1a1031 994 return ret;
a4fc5ed6
KP
995}
996
9d1a1031
JN
997static void
998intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
999{
1000 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1001 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1002 enum port port = intel_dig_port->port;
0b99836f 1003 const char *name = NULL;
ab2c0672
DA
1004 int ret;
1005
33ad6626
JN
1006 switch (port) {
1007 case PORT_A:
1008 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1009 name = "DPDDC-A";
ab2c0672 1010 break;
33ad6626
JN
1011 case PORT_B:
1012 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1013 name = "DPDDC-B";
ab2c0672 1014 break;
33ad6626
JN
1015 case PORT_C:
1016 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1017 name = "DPDDC-C";
ab2c0672 1018 break;
33ad6626
JN
1019 case PORT_D:
1020 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1021 name = "DPDDC-D";
33ad6626
JN
1022 break;
1023 default:
1024 BUG();
ab2c0672
DA
1025 }
1026
1b1aad75
DL
1027 /*
1028 * The AUX_CTL register is usually DP_CTL + 0x10.
1029 *
1030 * On Haswell and Broadwell though:
1031 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1032 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1033 *
1034 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1035 */
1036 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1037 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1038
0b99836f 1039 intel_dp->aux.name = name;
9d1a1031
JN
1040 intel_dp->aux.dev = dev->dev;
1041 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1042
0b99836f
JN
1043 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1044 connector->base.kdev->kobj.name);
8316f337 1045
4f71d0cb 1046 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1047 if (ret < 0) {
4f71d0cb 1048 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1049 name, ret);
1050 return;
ab2c0672 1051 }
8a5e6aeb 1052
0b99836f
JN
1053 ret = sysfs_create_link(&connector->base.kdev->kobj,
1054 &intel_dp->aux.ddc.dev.kobj,
1055 intel_dp->aux.ddc.dev.kobj.name);
1056 if (ret < 0) {
1057 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1058 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1059 }
a4fc5ed6
KP
1060}
1061
80f65de3
ID
1062static void
1063intel_dp_connector_unregister(struct intel_connector *intel_connector)
1064{
1065 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1066
0e32b39c
DA
1067 if (!intel_connector->mst_port)
1068 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1069 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1070 intel_connector_unregister(intel_connector);
1071}
1072
5416d871 1073static void
c3346ef6 1074skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1075{
1076 u32 ctrl1;
1077
1078 pipe_config->ddi_pll_sel = SKL_DPLL0;
1079 pipe_config->dpll_hw_state.cfgcr1 = 0;
1080 pipe_config->dpll_hw_state.cfgcr2 = 0;
1081
1082 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1083 switch (link_clock / 2) {
1084 case 81000:
5416d871
DL
1085 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1086 SKL_DPLL0);
1087 break;
c3346ef6 1088 case 135000:
5416d871
DL
1089 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1090 SKL_DPLL0);
1091 break;
c3346ef6 1092 case 270000:
5416d871
DL
1093 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1094 SKL_DPLL0);
1095 break;
c3346ef6
SJ
1096 case 162000:
1097 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1098 SKL_DPLL0);
1099 break;
1100 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1101 results in CDCLK change. Need to handle the change of CDCLK by
1102 disabling pipes and re-enabling them */
1103 case 108000:
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1105 SKL_DPLL0);
1106 break;
1107 case 216000:
1108 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1109 SKL_DPLL0);
1110 break;
1111
5416d871
DL
1112 }
1113 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1114}
1115
0e50338c 1116static void
5cec258b 1117hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1118{
1119 switch (link_bw) {
1120 case DP_LINK_BW_1_62:
1121 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1122 break;
1123 case DP_LINK_BW_2_7:
1124 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1125 break;
1126 case DP_LINK_BW_5_4:
1127 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1128 break;
1129 }
1130}
1131
fc0f8e25 1132static int
12f6a2e2 1133intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1134{
12f6a2e2
VS
1135 if (intel_dp->num_supported_rates) {
1136 *sink_rates = intel_dp->supported_rates;
ea2d8a42 1137 return intel_dp->num_supported_rates;
fc0f8e25 1138 }
12f6a2e2
VS
1139
1140 *sink_rates = default_rates;
1141
1142 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1143}
1144
a8f3ef61 1145static int
1db10e28 1146intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1147{
636280ba
VS
1148 if (INTEL_INFO(dev)->gen >= 9) {
1149 *source_rates = gen9_rates;
1150 return ARRAY_SIZE(gen9_rates);
a8f3ef61 1151 }
636280ba
VS
1152
1153 *source_rates = default_rates;
1154
1db10e28
VS
1155 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1156 /* WaDisableHBR2:skl */
1157 return (DP_LINK_BW_2_7 >> 3) + 1;
1158 else if (INTEL_INFO(dev)->gen >= 8 ||
1159 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1160 return (DP_LINK_BW_5_4 >> 3) + 1;
1161 else
1162 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1163}
1164
c6bb3538
DV
1165static void
1166intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1167 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1168{
1169 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1170 const struct dp_link_dpll *divisor = NULL;
1171 int i, count = 0;
c6bb3538
DV
1172
1173 if (IS_G4X(dev)) {
9dd4ffdf
CML
1174 divisor = gen4_dpll;
1175 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1176 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1177 divisor = pch_dpll;
1178 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1179 } else if (IS_CHERRYVIEW(dev)) {
1180 divisor = chv_dpll;
1181 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1182 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1183 divisor = vlv_dpll;
1184 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1185 }
9dd4ffdf
CML
1186
1187 if (divisor && count) {
1188 for (i = 0; i < count; i++) {
1189 if (link_bw == divisor[i].link_bw) {
1190 pipe_config->dpll = divisor[i].dpll;
1191 pipe_config->clock_set = true;
1192 break;
1193 }
1194 }
c6bb3538
DV
1195 }
1196}
1197
2ecae76a
VS
1198static int intersect_rates(const int *source_rates, int source_len,
1199 const int *sink_rates, int sink_len,
1200 int *supported_rates)
a8f3ef61
SJ
1201{
1202 int i = 0, j = 0, k = 0;
1203
a8f3ef61
SJ
1204 while (i < source_len && j < sink_len) {
1205 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1206 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1207 return k;
a8f3ef61
SJ
1208 supported_rates[k] = source_rates[i];
1209 ++k;
1210 ++i;
1211 ++j;
1212 } else if (source_rates[i] < sink_rates[j]) {
1213 ++i;
1214 } else {
1215 ++j;
1216 }
1217 }
1218 return k;
1219}
1220
2ecae76a
VS
1221static int intel_supported_rates(struct intel_dp *intel_dp,
1222 int *supported_rates)
1223{
1224 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1225 const int *source_rates, *sink_rates;
1226 int source_len, sink_len;
1227
1228 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1229 source_len = intel_dp_source_rates(dev, &source_rates);
1230
1231 return intersect_rates(source_rates, source_len,
1232 sink_rates, sink_len,
1233 supported_rates);
1234}
1235
f4896f15 1236static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1237{
1238 int i = 0;
1239
1240 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1241 if (find == rates[i])
1242 break;
1243
1244 return i;
1245}
1246
50fec21a
VS
1247int
1248intel_dp_max_link_rate(struct intel_dp *intel_dp)
1249{
1250 int rates[DP_MAX_SUPPORTED_RATES] = {};
1251 int len;
1252
1253 len = intel_supported_rates(intel_dp, rates);
1254 if (WARN_ON(len <= 0))
1255 return 162000;
1256
1257 return rates[rate_to_index(0, rates) - 1];
1258}
1259
ed4e9c1d
VS
1260int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1261{
1262 return rate_to_index(rate, intel_dp->supported_rates);
1263}
1264
00c09d70 1265bool
5bfe2ac0 1266intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1267 struct intel_crtc_state *pipe_config)
a4fc5ed6 1268{
5bfe2ac0 1269 struct drm_device *dev = encoder->base.dev;
36008365 1270 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1271 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1272 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1273 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1274 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1275 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1276 int lane_count, clock;
56071a20 1277 int min_lane_count = 1;
eeb6324d 1278 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1279 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1280 int min_clock = 0;
a8f3ef61 1281 int max_clock;
083f9560 1282 int bpp, mode_rate;
ff9a6750 1283 int link_avail, link_clock;
2ecae76a
VS
1284 int supported_rates[DP_MAX_SUPPORTED_RATES] = {};
1285 int supported_len;
a8f3ef61 1286
2ecae76a 1287 supported_len = intel_supported_rates(intel_dp, supported_rates);
a8f3ef61
SJ
1288
1289 /* No common link rates between source and sink */
1290 WARN_ON(supported_len <= 0);
1291
1292 max_clock = supported_len - 1;
a4fc5ed6 1293
bc7d38a4 1294 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1295 pipe_config->has_pch_encoder = true;
1296
03afc4a2 1297 pipe_config->has_dp_encoder = true;
f769cd24 1298 pipe_config->has_drrs = false;
9ed109a7 1299 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1300
dd06f90e
JN
1301 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1302 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1303 adjusted_mode);
2dd24552
JB
1304 if (!HAS_PCH_SPLIT(dev))
1305 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1306 intel_connector->panel.fitting_mode);
1307 else
b074cec8
JB
1308 intel_pch_panel_fitting(intel_crtc, pipe_config,
1309 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1310 }
1311
cb1793ce 1312 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1313 return false;
1314
083f9560 1315 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61
SJ
1316 "max bw %d pixel clock %iKHz\n",
1317 max_lane_count, supported_rates[max_clock],
241bfc38 1318 adjusted_mode->crtc_clock);
083f9560 1319
36008365
DV
1320 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1321 * bpc in between. */
3e7ca985 1322 bpp = pipe_config->pipe_bpp;
56071a20
JN
1323 if (is_edp(intel_dp)) {
1324 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1325 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1326 dev_priv->vbt.edp_bpp);
1327 bpp = dev_priv->vbt.edp_bpp;
1328 }
1329
344c5bbc
JN
1330 /*
1331 * Use the maximum clock and number of lanes the eDP panel
1332 * advertizes being capable of. The panels are generally
1333 * designed to support only a single clock and lane
1334 * configuration, and typically these values correspond to the
1335 * native resolution of the panel.
1336 */
1337 min_lane_count = max_lane_count;
1338 min_clock = max_clock;
7984211e 1339 }
657445fe 1340
36008365 1341 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1342 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1343 bpp);
36008365 1344
c6930992 1345 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1346 for (lane_count = min_lane_count;
1347 lane_count <= max_lane_count;
1348 lane_count <<= 1) {
1349
1350 link_clock = supported_rates[clock];
36008365
DV
1351 link_avail = intel_dp_max_data_rate(link_clock,
1352 lane_count);
1353
1354 if (mode_rate <= link_avail) {
1355 goto found;
1356 }
1357 }
1358 }
1359 }
c4867936 1360
36008365 1361 return false;
3685a8f3 1362
36008365 1363found:
55bc60db
VS
1364 if (intel_dp->color_range_auto) {
1365 /*
1366 * See:
1367 * CEA-861-E - 5.1 Default Encoding Parameters
1368 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1369 */
18316c8c 1370 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1371 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1372 else
1373 intel_dp->color_range = 0;
1374 }
1375
3685a8f3 1376 if (intel_dp->color_range)
50f3b016 1377 pipe_config->limited_color_range = true;
a4fc5ed6 1378
36008365 1379 intel_dp->lane_count = lane_count;
a8f3ef61 1380
bc27b7d3
VS
1381 if (intel_dp->num_supported_rates) {
1382 intel_dp->link_bw = 0;
a8f3ef61 1383 intel_dp->rate_select =
ed4e9c1d 1384 intel_dp_rate_select(intel_dp, supported_rates[clock]);
bc27b7d3
VS
1385 } else {
1386 intel_dp->link_bw =
1387 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1388 intel_dp->rate_select = 0;
a8f3ef61
SJ
1389 }
1390
657445fe 1391 pipe_config->pipe_bpp = bpp;
a8f3ef61 1392 pipe_config->port_clock = supported_rates[clock];
a4fc5ed6 1393
36008365
DV
1394 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1395 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1396 pipe_config->port_clock, bpp);
36008365
DV
1397 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1398 mode_rate, link_avail);
a4fc5ed6 1399
03afc4a2 1400 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1401 adjusted_mode->crtc_clock,
1402 pipe_config->port_clock,
03afc4a2 1403 &pipe_config->dp_m_n);
9d1a455b 1404
439d7ac0 1405 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1406 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1407 pipe_config->has_drrs = true;
439d7ac0
PB
1408 intel_link_compute_m_n(bpp, lane_count,
1409 intel_connector->panel.downclock_mode->clock,
1410 pipe_config->port_clock,
1411 &pipe_config->dp_m2_n2);
1412 }
1413
5416d871 1414 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
c3346ef6 1415 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
5416d871 1416 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1417 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1418 else
1419 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1420
03afc4a2 1421 return true;
a4fc5ed6
KP
1422}
1423
7c62a164 1424static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1425{
7c62a164
DV
1426 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1427 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1428 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1429 struct drm_i915_private *dev_priv = dev->dev_private;
1430 u32 dpa_ctl;
1431
6e3c9717
ACO
1432 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1433 crtc->config->port_clock);
ea9b6006
DV
1434 dpa_ctl = I915_READ(DP_A);
1435 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1436
6e3c9717 1437 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1438 /* For a long time we've carried around a ILK-DevA w/a for the
1439 * 160MHz clock. If we're really unlucky, it's still required.
1440 */
1441 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1442 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1443 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1444 } else {
1445 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1446 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1447 }
1ce17038 1448
ea9b6006
DV
1449 I915_WRITE(DP_A, dpa_ctl);
1450
1451 POSTING_READ(DP_A);
1452 udelay(500);
1453}
1454
8ac33ed3 1455static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1456{
b934223d 1457 struct drm_device *dev = encoder->base.dev;
417e822d 1458 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1459 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1460 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1461 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1462 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1463
417e822d 1464 /*
1a2eb460 1465 * There are four kinds of DP registers:
417e822d
KP
1466 *
1467 * IBX PCH
1a2eb460
KP
1468 * SNB CPU
1469 * IVB CPU
417e822d
KP
1470 * CPT PCH
1471 *
1472 * IBX PCH and CPU are the same for almost everything,
1473 * except that the CPU DP PLL is configured in this
1474 * register
1475 *
1476 * CPT PCH is quite different, having many bits moved
1477 * to the TRANS_DP_CTL register instead. That
1478 * configuration happens (oddly) in ironlake_pch_enable
1479 */
9c9e7927 1480
417e822d
KP
1481 /* Preserve the BIOS-computed detected bit. This is
1482 * supposed to be read-only.
1483 */
1484 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1485
417e822d 1486 /* Handle DP bits in common between all three register formats */
417e822d 1487 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1488 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1489
6e3c9717 1490 if (crtc->config->has_audio)
ea5b213a 1491 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1492
417e822d 1493 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1494
bc7d38a4 1495 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1496 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1497 intel_dp->DP |= DP_SYNC_HS_HIGH;
1498 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1499 intel_dp->DP |= DP_SYNC_VS_HIGH;
1500 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1501
6aba5b6c 1502 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1503 intel_dp->DP |= DP_ENHANCED_FRAMING;
1504
7c62a164 1505 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1506 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1507 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1508 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1509
1510 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1511 intel_dp->DP |= DP_SYNC_HS_HIGH;
1512 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1513 intel_dp->DP |= DP_SYNC_VS_HIGH;
1514 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1515
6aba5b6c 1516 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1517 intel_dp->DP |= DP_ENHANCED_FRAMING;
1518
44f37d1f
CML
1519 if (!IS_CHERRYVIEW(dev)) {
1520 if (crtc->pipe == 1)
1521 intel_dp->DP |= DP_PIPEB_SELECT;
1522 } else {
1523 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1524 }
417e822d
KP
1525 } else {
1526 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1527 }
a4fc5ed6
KP
1528}
1529
ffd6749d
PZ
1530#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1531#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1532
1a5ef5b7
PZ
1533#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1534#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1535
ffd6749d
PZ
1536#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1537#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1538
4be73780 1539static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1540 u32 mask,
1541 u32 value)
bd943159 1542{
30add22d 1543 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1544 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1545 u32 pp_stat_reg, pp_ctrl_reg;
1546
e39b999a
VS
1547 lockdep_assert_held(&dev_priv->pps_mutex);
1548
bf13e81b
JN
1549 pp_stat_reg = _pp_stat_reg(intel_dp);
1550 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1551
99ea7127 1552 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1553 mask, value,
1554 I915_READ(pp_stat_reg),
1555 I915_READ(pp_ctrl_reg));
32ce697c 1556
453c5420 1557 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1558 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1559 I915_READ(pp_stat_reg),
1560 I915_READ(pp_ctrl_reg));
32ce697c 1561 }
54c136d4
CW
1562
1563 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1564}
32ce697c 1565
4be73780 1566static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1567{
1568 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1569 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1570}
1571
4be73780 1572static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1573{
1574 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1575 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1576}
1577
4be73780 1578static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1579{
1580 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1581
1582 /* When we disable the VDD override bit last we have to do the manual
1583 * wait. */
1584 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1585 intel_dp->panel_power_cycle_delay);
1586
4be73780 1587 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1588}
1589
4be73780 1590static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1591{
1592 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1593 intel_dp->backlight_on_delay);
1594}
1595
4be73780 1596static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1597{
1598 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1599 intel_dp->backlight_off_delay);
1600}
99ea7127 1601
832dd3c1
KP
1602/* Read the current pp_control value, unlocking the register if it
1603 * is locked
1604 */
1605
453c5420 1606static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1607{
453c5420
JB
1608 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1609 struct drm_i915_private *dev_priv = dev->dev_private;
1610 u32 control;
832dd3c1 1611
e39b999a
VS
1612 lockdep_assert_held(&dev_priv->pps_mutex);
1613
bf13e81b 1614 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1615 control &= ~PANEL_UNLOCK_MASK;
1616 control |= PANEL_UNLOCK_REGS;
1617 return control;
bd943159
KP
1618}
1619
951468f3
VS
1620/*
1621 * Must be paired with edp_panel_vdd_off().
1622 * Must hold pps_mutex around the whole on/off sequence.
1623 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1624 */
1e0560e0 1625static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1626{
30add22d 1627 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1628 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1629 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1630 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1631 enum intel_display_power_domain power_domain;
5d613501 1632 u32 pp;
453c5420 1633 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1634 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1635
e39b999a
VS
1636 lockdep_assert_held(&dev_priv->pps_mutex);
1637
97af61f5 1638 if (!is_edp(intel_dp))
adddaaf4 1639 return false;
bd943159 1640
2c623c11 1641 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1642 intel_dp->want_panel_vdd = true;
99ea7127 1643
4be73780 1644 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1645 return need_to_disable;
b0665d57 1646
4e6e1a54
ID
1647 power_domain = intel_display_port_power_domain(intel_encoder);
1648 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1649
3936fcf4
VS
1650 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1651 port_name(intel_dig_port->port));
bd943159 1652
4be73780
DV
1653 if (!edp_have_panel_power(intel_dp))
1654 wait_panel_power_cycle(intel_dp);
99ea7127 1655
453c5420 1656 pp = ironlake_get_pp_control(intel_dp);
5d613501 1657 pp |= EDP_FORCE_VDD;
ebf33b18 1658
bf13e81b
JN
1659 pp_stat_reg = _pp_stat_reg(intel_dp);
1660 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1661
1662 I915_WRITE(pp_ctrl_reg, pp);
1663 POSTING_READ(pp_ctrl_reg);
1664 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1665 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1666 /*
1667 * If the panel wasn't on, delay before accessing aux channel
1668 */
4be73780 1669 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1670 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1671 port_name(intel_dig_port->port));
f01eca2e 1672 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1673 }
adddaaf4
JN
1674
1675 return need_to_disable;
1676}
1677
951468f3
VS
1678/*
1679 * Must be paired with intel_edp_panel_vdd_off() or
1680 * intel_edp_panel_off().
1681 * Nested calls to these functions are not allowed since
1682 * we drop the lock. Caller must use some higher level
1683 * locking to prevent nested calls from other threads.
1684 */
b80d6c78 1685void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1686{
c695b6b6 1687 bool vdd;
adddaaf4 1688
c695b6b6
VS
1689 if (!is_edp(intel_dp))
1690 return;
1691
773538e8 1692 pps_lock(intel_dp);
c695b6b6 1693 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1694 pps_unlock(intel_dp);
c695b6b6 1695
e2c719b7 1696 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1697 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1698}
1699
4be73780 1700static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1701{
30add22d 1702 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1703 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1704 struct intel_digital_port *intel_dig_port =
1705 dp_to_dig_port(intel_dp);
1706 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1707 enum intel_display_power_domain power_domain;
5d613501 1708 u32 pp;
453c5420 1709 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1710
e39b999a 1711 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1712
15e899a0 1713 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1714
15e899a0 1715 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1716 return;
b0665d57 1717
3936fcf4
VS
1718 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1719 port_name(intel_dig_port->port));
bd943159 1720
be2c9196
VS
1721 pp = ironlake_get_pp_control(intel_dp);
1722 pp &= ~EDP_FORCE_VDD;
453c5420 1723
be2c9196
VS
1724 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1725 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1726
be2c9196
VS
1727 I915_WRITE(pp_ctrl_reg, pp);
1728 POSTING_READ(pp_ctrl_reg);
90791a5c 1729
be2c9196
VS
1730 /* Make sure sequencer is idle before allowing subsequent activity */
1731 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1732 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1733
be2c9196
VS
1734 if ((pp & POWER_TARGET_ON) == 0)
1735 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1736
be2c9196
VS
1737 power_domain = intel_display_port_power_domain(intel_encoder);
1738 intel_display_power_put(dev_priv, power_domain);
bd943159 1739}
5d613501 1740
4be73780 1741static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1742{
1743 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1744 struct intel_dp, panel_vdd_work);
bd943159 1745
773538e8 1746 pps_lock(intel_dp);
15e899a0
VS
1747 if (!intel_dp->want_panel_vdd)
1748 edp_panel_vdd_off_sync(intel_dp);
773538e8 1749 pps_unlock(intel_dp);
bd943159
KP
1750}
1751
aba86890
ID
1752static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1753{
1754 unsigned long delay;
1755
1756 /*
1757 * Queue the timer to fire a long time from now (relative to the power
1758 * down delay) to keep the panel power up across a sequence of
1759 * operations.
1760 */
1761 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1762 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1763}
1764
951468f3
VS
1765/*
1766 * Must be paired with edp_panel_vdd_on().
1767 * Must hold pps_mutex around the whole on/off sequence.
1768 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1769 */
4be73780 1770static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1771{
e39b999a
VS
1772 struct drm_i915_private *dev_priv =
1773 intel_dp_to_dev(intel_dp)->dev_private;
1774
1775 lockdep_assert_held(&dev_priv->pps_mutex);
1776
97af61f5
KP
1777 if (!is_edp(intel_dp))
1778 return;
5d613501 1779
e2c719b7 1780 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1781 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1782
bd943159
KP
1783 intel_dp->want_panel_vdd = false;
1784
aba86890 1785 if (sync)
4be73780 1786 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1787 else
1788 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1789}
1790
9f0fb5be 1791static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1792{
30add22d 1793 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1794 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1795 u32 pp;
453c5420 1796 u32 pp_ctrl_reg;
9934c132 1797
9f0fb5be
VS
1798 lockdep_assert_held(&dev_priv->pps_mutex);
1799
97af61f5 1800 if (!is_edp(intel_dp))
bd943159 1801 return;
99ea7127 1802
3936fcf4
VS
1803 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1804 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1805
e7a89ace
VS
1806 if (WARN(edp_have_panel_power(intel_dp),
1807 "eDP port %c panel power already on\n",
1808 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1809 return;
9934c132 1810
4be73780 1811 wait_panel_power_cycle(intel_dp);
37c6c9b0 1812
bf13e81b 1813 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1814 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1815 if (IS_GEN5(dev)) {
1816 /* ILK workaround: disable reset around power sequence */
1817 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1818 I915_WRITE(pp_ctrl_reg, pp);
1819 POSTING_READ(pp_ctrl_reg);
05ce1a49 1820 }
37c6c9b0 1821
1c0ae80a 1822 pp |= POWER_TARGET_ON;
99ea7127
KP
1823 if (!IS_GEN5(dev))
1824 pp |= PANEL_POWER_RESET;
1825
453c5420
JB
1826 I915_WRITE(pp_ctrl_reg, pp);
1827 POSTING_READ(pp_ctrl_reg);
9934c132 1828
4be73780 1829 wait_panel_on(intel_dp);
dce56b3c 1830 intel_dp->last_power_on = jiffies;
9934c132 1831
05ce1a49
KP
1832 if (IS_GEN5(dev)) {
1833 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1834 I915_WRITE(pp_ctrl_reg, pp);
1835 POSTING_READ(pp_ctrl_reg);
05ce1a49 1836 }
9f0fb5be 1837}
e39b999a 1838
9f0fb5be
VS
1839void intel_edp_panel_on(struct intel_dp *intel_dp)
1840{
1841 if (!is_edp(intel_dp))
1842 return;
1843
1844 pps_lock(intel_dp);
1845 edp_panel_on(intel_dp);
773538e8 1846 pps_unlock(intel_dp);
9934c132
JB
1847}
1848
9f0fb5be
VS
1849
1850static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1851{
4e6e1a54
ID
1852 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1853 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1854 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1855 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1856 enum intel_display_power_domain power_domain;
99ea7127 1857 u32 pp;
453c5420 1858 u32 pp_ctrl_reg;
9934c132 1859
9f0fb5be
VS
1860 lockdep_assert_held(&dev_priv->pps_mutex);
1861
97af61f5
KP
1862 if (!is_edp(intel_dp))
1863 return;
37c6c9b0 1864
3936fcf4
VS
1865 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1866 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1867
3936fcf4
VS
1868 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1869 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1870
453c5420 1871 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1872 /* We need to switch off panel power _and_ force vdd, for otherwise some
1873 * panels get very unhappy and cease to work. */
b3064154
PJ
1874 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1875 EDP_BLC_ENABLE);
453c5420 1876
bf13e81b 1877 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1878
849e39f5
PZ
1879 intel_dp->want_panel_vdd = false;
1880
453c5420
JB
1881 I915_WRITE(pp_ctrl_reg, pp);
1882 POSTING_READ(pp_ctrl_reg);
9934c132 1883
dce56b3c 1884 intel_dp->last_power_cycle = jiffies;
4be73780 1885 wait_panel_off(intel_dp);
849e39f5
PZ
1886
1887 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1888 power_domain = intel_display_port_power_domain(intel_encoder);
1889 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1890}
e39b999a 1891
9f0fb5be
VS
1892void intel_edp_panel_off(struct intel_dp *intel_dp)
1893{
1894 if (!is_edp(intel_dp))
1895 return;
e39b999a 1896
9f0fb5be
VS
1897 pps_lock(intel_dp);
1898 edp_panel_off(intel_dp);
773538e8 1899 pps_unlock(intel_dp);
9934c132
JB
1900}
1901
1250d107
JN
1902/* Enable backlight in the panel power control. */
1903static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1904{
da63a9f2
PZ
1905 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1906 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1907 struct drm_i915_private *dev_priv = dev->dev_private;
1908 u32 pp;
453c5420 1909 u32 pp_ctrl_reg;
32f9d658 1910
01cb9ea6
JB
1911 /*
1912 * If we enable the backlight right away following a panel power
1913 * on, we may see slight flicker as the panel syncs with the eDP
1914 * link. So delay a bit to make sure the image is solid before
1915 * allowing it to appear.
1916 */
4be73780 1917 wait_backlight_on(intel_dp);
e39b999a 1918
773538e8 1919 pps_lock(intel_dp);
e39b999a 1920
453c5420 1921 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1922 pp |= EDP_BLC_ENABLE;
453c5420 1923
bf13e81b 1924 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1925
1926 I915_WRITE(pp_ctrl_reg, pp);
1927 POSTING_READ(pp_ctrl_reg);
e39b999a 1928
773538e8 1929 pps_unlock(intel_dp);
32f9d658
ZW
1930}
1931
1250d107
JN
1932/* Enable backlight PWM and backlight PP control. */
1933void intel_edp_backlight_on(struct intel_dp *intel_dp)
1934{
1935 if (!is_edp(intel_dp))
1936 return;
1937
1938 DRM_DEBUG_KMS("\n");
1939
1940 intel_panel_enable_backlight(intel_dp->attached_connector);
1941 _intel_edp_backlight_on(intel_dp);
1942}
1943
1944/* Disable backlight in the panel power control. */
1945static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1946{
30add22d 1947 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1948 struct drm_i915_private *dev_priv = dev->dev_private;
1949 u32 pp;
453c5420 1950 u32 pp_ctrl_reg;
32f9d658 1951
f01eca2e
KP
1952 if (!is_edp(intel_dp))
1953 return;
1954
773538e8 1955 pps_lock(intel_dp);
e39b999a 1956
453c5420 1957 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1958 pp &= ~EDP_BLC_ENABLE;
453c5420 1959
bf13e81b 1960 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1961
1962 I915_WRITE(pp_ctrl_reg, pp);
1963 POSTING_READ(pp_ctrl_reg);
f7d2323c 1964
773538e8 1965 pps_unlock(intel_dp);
e39b999a
VS
1966
1967 intel_dp->last_backlight_off = jiffies;
f7d2323c 1968 edp_wait_backlight_off(intel_dp);
1250d107 1969}
f7d2323c 1970
1250d107
JN
1971/* Disable backlight PP control and backlight PWM. */
1972void intel_edp_backlight_off(struct intel_dp *intel_dp)
1973{
1974 if (!is_edp(intel_dp))
1975 return;
1976
1977 DRM_DEBUG_KMS("\n");
f7d2323c 1978
1250d107 1979 _intel_edp_backlight_off(intel_dp);
f7d2323c 1980 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 1981}
a4fc5ed6 1982
73580fb7
JN
1983/*
1984 * Hook for controlling the panel power control backlight through the bl_power
1985 * sysfs attribute. Take care to handle multiple calls.
1986 */
1987static void intel_edp_backlight_power(struct intel_connector *connector,
1988 bool enable)
1989{
1990 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
1991 bool is_enabled;
1992
773538e8 1993 pps_lock(intel_dp);
e39b999a 1994 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 1995 pps_unlock(intel_dp);
73580fb7
JN
1996
1997 if (is_enabled == enable)
1998 return;
1999
23ba9373
JN
2000 DRM_DEBUG_KMS("panel power control backlight %s\n",
2001 enable ? "enable" : "disable");
73580fb7
JN
2002
2003 if (enable)
2004 _intel_edp_backlight_on(intel_dp);
2005 else
2006 _intel_edp_backlight_off(intel_dp);
2007}
2008
2bd2ad64 2009static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2010{
da63a9f2
PZ
2011 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2012 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2013 struct drm_device *dev = crtc->dev;
d240f20f
JB
2014 struct drm_i915_private *dev_priv = dev->dev_private;
2015 u32 dpa_ctl;
2016
2bd2ad64
DV
2017 assert_pipe_disabled(dev_priv,
2018 to_intel_crtc(crtc)->pipe);
2019
d240f20f
JB
2020 DRM_DEBUG_KMS("\n");
2021 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2022 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2023 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2024
2025 /* We don't adjust intel_dp->DP while tearing down the link, to
2026 * facilitate link retraining (e.g. after hotplug). Hence clear all
2027 * enable bits here to ensure that we don't enable too much. */
2028 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2029 intel_dp->DP |= DP_PLL_ENABLE;
2030 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2031 POSTING_READ(DP_A);
2032 udelay(200);
d240f20f
JB
2033}
2034
2bd2ad64 2035static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2036{
da63a9f2
PZ
2037 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2038 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2039 struct drm_device *dev = crtc->dev;
d240f20f
JB
2040 struct drm_i915_private *dev_priv = dev->dev_private;
2041 u32 dpa_ctl;
2042
2bd2ad64
DV
2043 assert_pipe_disabled(dev_priv,
2044 to_intel_crtc(crtc)->pipe);
2045
d240f20f 2046 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2047 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2048 "dp pll off, should be on\n");
2049 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2050
2051 /* We can't rely on the value tracked for the DP register in
2052 * intel_dp->DP because link_down must not change that (otherwise link
2053 * re-training will fail. */
298b0b39 2054 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2055 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2056 POSTING_READ(DP_A);
d240f20f
JB
2057 udelay(200);
2058}
2059
c7ad3810 2060/* If the sink supports it, try to set the power state appropriately */
c19b0669 2061void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2062{
2063 int ret, i;
2064
2065 /* Should have a valid DPCD by this point */
2066 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2067 return;
2068
2069 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2070 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2071 DP_SET_POWER_D3);
c7ad3810
JB
2072 } else {
2073 /*
2074 * When turning on, we need to retry for 1ms to give the sink
2075 * time to wake up.
2076 */
2077 for (i = 0; i < 3; i++) {
9d1a1031
JN
2078 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2079 DP_SET_POWER_D0);
c7ad3810
JB
2080 if (ret == 1)
2081 break;
2082 msleep(1);
2083 }
2084 }
f9cac721
JN
2085
2086 if (ret != 1)
2087 DRM_DEBUG_KMS("failed to %s sink power state\n",
2088 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2089}
2090
19d8fe15
DV
2091static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2092 enum pipe *pipe)
d240f20f 2093{
19d8fe15 2094 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2095 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2096 struct drm_device *dev = encoder->base.dev;
2097 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2098 enum intel_display_power_domain power_domain;
2099 u32 tmp;
2100
2101 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2102 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2103 return false;
2104
2105 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2106
2107 if (!(tmp & DP_PORT_EN))
2108 return false;
2109
bc7d38a4 2110 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2111 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2112 } else if (IS_CHERRYVIEW(dev)) {
2113 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2114 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2115 *pipe = PORT_TO_PIPE(tmp);
2116 } else {
2117 u32 trans_sel;
2118 u32 trans_dp;
2119 int i;
2120
2121 switch (intel_dp->output_reg) {
2122 case PCH_DP_B:
2123 trans_sel = TRANS_DP_PORT_SEL_B;
2124 break;
2125 case PCH_DP_C:
2126 trans_sel = TRANS_DP_PORT_SEL_C;
2127 break;
2128 case PCH_DP_D:
2129 trans_sel = TRANS_DP_PORT_SEL_D;
2130 break;
2131 default:
2132 return true;
2133 }
2134
055e393f 2135 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2136 trans_dp = I915_READ(TRANS_DP_CTL(i));
2137 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2138 *pipe = i;
2139 return true;
2140 }
2141 }
19d8fe15 2142
4a0833ec
DV
2143 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2144 intel_dp->output_reg);
2145 }
d240f20f 2146
19d8fe15
DV
2147 return true;
2148}
d240f20f 2149
045ac3b5 2150static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2151 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2152{
2153 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2154 u32 tmp, flags = 0;
63000ef6
XZ
2155 struct drm_device *dev = encoder->base.dev;
2156 struct drm_i915_private *dev_priv = dev->dev_private;
2157 enum port port = dp_to_dig_port(intel_dp)->port;
2158 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2159 int dotclock;
045ac3b5 2160
9ed109a7
DV
2161 tmp = I915_READ(intel_dp->output_reg);
2162 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2163 pipe_config->has_audio = true;
2164
63000ef6 2165 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2166 if (tmp & DP_SYNC_HS_HIGH)
2167 flags |= DRM_MODE_FLAG_PHSYNC;
2168 else
2169 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2170
63000ef6
XZ
2171 if (tmp & DP_SYNC_VS_HIGH)
2172 flags |= DRM_MODE_FLAG_PVSYNC;
2173 else
2174 flags |= DRM_MODE_FLAG_NVSYNC;
2175 } else {
2176 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2177 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2178 flags |= DRM_MODE_FLAG_PHSYNC;
2179 else
2180 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2181
63000ef6
XZ
2182 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2183 flags |= DRM_MODE_FLAG_PVSYNC;
2184 else
2185 flags |= DRM_MODE_FLAG_NVSYNC;
2186 }
045ac3b5 2187
2d112de7 2188 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2189
8c875fca
VS
2190 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2191 tmp & DP_COLOR_RANGE_16_235)
2192 pipe_config->limited_color_range = true;
2193
eb14cb74
VS
2194 pipe_config->has_dp_encoder = true;
2195
2196 intel_dp_get_m_n(crtc, pipe_config);
2197
18442d08 2198 if (port == PORT_A) {
f1f644dc
JB
2199 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2200 pipe_config->port_clock = 162000;
2201 else
2202 pipe_config->port_clock = 270000;
2203 }
18442d08
VS
2204
2205 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2206 &pipe_config->dp_m_n);
2207
2208 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2209 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2210
2d112de7 2211 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2212
c6cd2ee2
JN
2213 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2214 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2215 /*
2216 * This is a big fat ugly hack.
2217 *
2218 * Some machines in UEFI boot mode provide us a VBT that has 18
2219 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2220 * unknown we fail to light up. Yet the same BIOS boots up with
2221 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2222 * max, not what it tells us to use.
2223 *
2224 * Note: This will still be broken if the eDP panel is not lit
2225 * up by the BIOS, and thus we can't get the mode at module
2226 * load.
2227 */
2228 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2229 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2230 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2231 }
045ac3b5
JB
2232}
2233
e8cb4558 2234static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2235{
e8cb4558 2236 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2237 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2238 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2239
6e3c9717 2240 if (crtc->config->has_audio)
495a5bb8 2241 intel_audio_codec_disable(encoder);
6cb49835 2242
b32c6f48
RV
2243 if (HAS_PSR(dev) && !HAS_DDI(dev))
2244 intel_psr_disable(intel_dp);
2245
6cb49835
DV
2246 /* Make sure the panel is off before trying to change the mode. But also
2247 * ensure that we have vdd while we switch off the panel. */
24f3e092 2248 intel_edp_panel_vdd_on(intel_dp);
4be73780 2249 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2250 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2251 intel_edp_panel_off(intel_dp);
3739850b 2252
08aff3fe
VS
2253 /* disable the port before the pipe on g4x */
2254 if (INTEL_INFO(dev)->gen < 5)
3739850b 2255 intel_dp_link_down(intel_dp);
d240f20f
JB
2256}
2257
08aff3fe 2258static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2259{
2bd2ad64 2260 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2261 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2262
49277c31 2263 intel_dp_link_down(intel_dp);
08aff3fe
VS
2264 if (port == PORT_A)
2265 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2266}
2267
2268static void vlv_post_disable_dp(struct intel_encoder *encoder)
2269{
2270 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2271
2272 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2273}
2274
580d3811
VS
2275static void chv_post_disable_dp(struct intel_encoder *encoder)
2276{
2277 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2278 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2279 struct drm_device *dev = encoder->base.dev;
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 struct intel_crtc *intel_crtc =
2282 to_intel_crtc(encoder->base.crtc);
2283 enum dpio_channel ch = vlv_dport_to_channel(dport);
2284 enum pipe pipe = intel_crtc->pipe;
2285 u32 val;
2286
2287 intel_dp_link_down(intel_dp);
2288
2289 mutex_lock(&dev_priv->dpio_lock);
2290
2291 /* Propagate soft reset to data lane reset */
97fd4d5c 2292 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2293 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2294 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2295
97fd4d5c
VS
2296 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2297 val |= CHV_PCS_REQ_SOFTRESET_EN;
2298 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2299
2300 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2301 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2302 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2303
2304 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2305 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2306 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2307
2308 mutex_unlock(&dev_priv->dpio_lock);
2309}
2310
7b13b58a
VS
2311static void
2312_intel_dp_set_link_train(struct intel_dp *intel_dp,
2313 uint32_t *DP,
2314 uint8_t dp_train_pat)
2315{
2316 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2317 struct drm_device *dev = intel_dig_port->base.base.dev;
2318 struct drm_i915_private *dev_priv = dev->dev_private;
2319 enum port port = intel_dig_port->port;
2320
2321 if (HAS_DDI(dev)) {
2322 uint32_t temp = I915_READ(DP_TP_CTL(port));
2323
2324 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2325 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2326 else
2327 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2328
2329 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2330 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2331 case DP_TRAINING_PATTERN_DISABLE:
2332 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2333
2334 break;
2335 case DP_TRAINING_PATTERN_1:
2336 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2337 break;
2338 case DP_TRAINING_PATTERN_2:
2339 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2340 break;
2341 case DP_TRAINING_PATTERN_3:
2342 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2343 break;
2344 }
2345 I915_WRITE(DP_TP_CTL(port), temp);
2346
2347 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2348 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2349
2350 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2351 case DP_TRAINING_PATTERN_DISABLE:
2352 *DP |= DP_LINK_TRAIN_OFF_CPT;
2353 break;
2354 case DP_TRAINING_PATTERN_1:
2355 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2356 break;
2357 case DP_TRAINING_PATTERN_2:
2358 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2359 break;
2360 case DP_TRAINING_PATTERN_3:
2361 DRM_ERROR("DP training pattern 3 not supported\n");
2362 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2363 break;
2364 }
2365
2366 } else {
2367 if (IS_CHERRYVIEW(dev))
2368 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2369 else
2370 *DP &= ~DP_LINK_TRAIN_MASK;
2371
2372 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2373 case DP_TRAINING_PATTERN_DISABLE:
2374 *DP |= DP_LINK_TRAIN_OFF;
2375 break;
2376 case DP_TRAINING_PATTERN_1:
2377 *DP |= DP_LINK_TRAIN_PAT_1;
2378 break;
2379 case DP_TRAINING_PATTERN_2:
2380 *DP |= DP_LINK_TRAIN_PAT_2;
2381 break;
2382 case DP_TRAINING_PATTERN_3:
2383 if (IS_CHERRYVIEW(dev)) {
2384 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2385 } else {
2386 DRM_ERROR("DP training pattern 3 not supported\n");
2387 *DP |= DP_LINK_TRAIN_PAT_2;
2388 }
2389 break;
2390 }
2391 }
2392}
2393
2394static void intel_dp_enable_port(struct intel_dp *intel_dp)
2395{
2396 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2397 struct drm_i915_private *dev_priv = dev->dev_private;
2398
7b13b58a
VS
2399 /* enable with pattern 1 (as per spec) */
2400 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2401 DP_TRAINING_PATTERN_1);
2402
2403 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2404 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2405
2406 /*
2407 * Magic for VLV/CHV. We _must_ first set up the register
2408 * without actually enabling the port, and then do another
2409 * write to enable the port. Otherwise link training will
2410 * fail when the power sequencer is freshly used for this port.
2411 */
2412 intel_dp->DP |= DP_PORT_EN;
2413
2414 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2415 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2416}
2417
e8cb4558 2418static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2419{
e8cb4558
DV
2420 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2421 struct drm_device *dev = encoder->base.dev;
2422 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2423 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2424 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2425
0c33d8d7
DV
2426 if (WARN_ON(dp_reg & DP_PORT_EN))
2427 return;
5d613501 2428
093e3f13
VS
2429 pps_lock(intel_dp);
2430
2431 if (IS_VALLEYVIEW(dev))
2432 vlv_init_panel_power_sequencer(intel_dp);
2433
7b13b58a 2434 intel_dp_enable_port(intel_dp);
093e3f13
VS
2435
2436 edp_panel_vdd_on(intel_dp);
2437 edp_panel_on(intel_dp);
2438 edp_panel_vdd_off(intel_dp, true);
2439
2440 pps_unlock(intel_dp);
2441
61234fa5
VS
2442 if (IS_VALLEYVIEW(dev))
2443 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2444
f01eca2e 2445 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2446 intel_dp_start_link_train(intel_dp);
33a34e4e 2447 intel_dp_complete_link_train(intel_dp);
3ab9c637 2448 intel_dp_stop_link_train(intel_dp);
c1dec79a 2449
6e3c9717 2450 if (crtc->config->has_audio) {
c1dec79a
JN
2451 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2452 pipe_name(crtc->pipe));
2453 intel_audio_codec_enable(encoder);
2454 }
ab1f90f9 2455}
89b667f8 2456
ecff4f3b
JN
2457static void g4x_enable_dp(struct intel_encoder *encoder)
2458{
828f5c6e
JN
2459 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2460
ecff4f3b 2461 intel_enable_dp(encoder);
4be73780 2462 intel_edp_backlight_on(intel_dp);
ab1f90f9 2463}
89b667f8 2464
ab1f90f9
JN
2465static void vlv_enable_dp(struct intel_encoder *encoder)
2466{
828f5c6e
JN
2467 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2468
4be73780 2469 intel_edp_backlight_on(intel_dp);
b32c6f48 2470 intel_psr_enable(intel_dp);
d240f20f
JB
2471}
2472
ecff4f3b 2473static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2474{
2475 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2476 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2477
8ac33ed3
DV
2478 intel_dp_prepare(encoder);
2479
d41f1efb
DV
2480 /* Only ilk+ has port A */
2481 if (dport->port == PORT_A) {
2482 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2483 ironlake_edp_pll_on(intel_dp);
d41f1efb 2484 }
ab1f90f9
JN
2485}
2486
83b84597
VS
2487static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2488{
2489 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2490 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2491 enum pipe pipe = intel_dp->pps_pipe;
2492 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2493
2494 edp_panel_vdd_off_sync(intel_dp);
2495
2496 /*
2497 * VLV seems to get confused when multiple power seqeuencers
2498 * have the same port selected (even if only one has power/vdd
2499 * enabled). The failure manifests as vlv_wait_port_ready() failing
2500 * CHV on the other hand doesn't seem to mind having the same port
2501 * selected in multiple power seqeuencers, but let's clear the
2502 * port select always when logically disconnecting a power sequencer
2503 * from a port.
2504 */
2505 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2506 pipe_name(pipe), port_name(intel_dig_port->port));
2507 I915_WRITE(pp_on_reg, 0);
2508 POSTING_READ(pp_on_reg);
2509
2510 intel_dp->pps_pipe = INVALID_PIPE;
2511}
2512
a4a5d2f8
VS
2513static void vlv_steal_power_sequencer(struct drm_device *dev,
2514 enum pipe pipe)
2515{
2516 struct drm_i915_private *dev_priv = dev->dev_private;
2517 struct intel_encoder *encoder;
2518
2519 lockdep_assert_held(&dev_priv->pps_mutex);
2520
ac3c12e4
VS
2521 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2522 return;
2523
a4a5d2f8
VS
2524 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2525 base.head) {
2526 struct intel_dp *intel_dp;
773538e8 2527 enum port port;
a4a5d2f8
VS
2528
2529 if (encoder->type != INTEL_OUTPUT_EDP)
2530 continue;
2531
2532 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2533 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2534
2535 if (intel_dp->pps_pipe != pipe)
2536 continue;
2537
2538 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2539 pipe_name(pipe), port_name(port));
a4a5d2f8 2540
034e43c6
VS
2541 WARN(encoder->connectors_active,
2542 "stealing pipe %c power sequencer from active eDP port %c\n",
2543 pipe_name(pipe), port_name(port));
a4a5d2f8 2544
a4a5d2f8 2545 /* make sure vdd is off before we steal it */
83b84597 2546 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2547 }
2548}
2549
2550static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2551{
2552 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2553 struct intel_encoder *encoder = &intel_dig_port->base;
2554 struct drm_device *dev = encoder->base.dev;
2555 struct drm_i915_private *dev_priv = dev->dev_private;
2556 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2557
2558 lockdep_assert_held(&dev_priv->pps_mutex);
2559
093e3f13
VS
2560 if (!is_edp(intel_dp))
2561 return;
2562
a4a5d2f8
VS
2563 if (intel_dp->pps_pipe == crtc->pipe)
2564 return;
2565
2566 /*
2567 * If another power sequencer was being used on this
2568 * port previously make sure to turn off vdd there while
2569 * we still have control of it.
2570 */
2571 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2572 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2573
2574 /*
2575 * We may be stealing the power
2576 * sequencer from another port.
2577 */
2578 vlv_steal_power_sequencer(dev, crtc->pipe);
2579
2580 /* now it's all ours */
2581 intel_dp->pps_pipe = crtc->pipe;
2582
2583 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2584 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2585
2586 /* init power sequencer on this pipe and port */
36b5f425
VS
2587 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2588 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2589}
2590
ab1f90f9 2591static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2592{
2bd2ad64 2593 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2594 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2595 struct drm_device *dev = encoder->base.dev;
89b667f8 2596 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2597 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2598 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2599 int pipe = intel_crtc->pipe;
2600 u32 val;
a4fc5ed6 2601
ab1f90f9 2602 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2603
ab3c759a 2604 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2605 val = 0;
2606 if (pipe)
2607 val |= (1<<21);
2608 else
2609 val &= ~(1<<21);
2610 val |= 0x001000c4;
ab3c759a
CML
2611 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2612 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2613 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2614
ab1f90f9
JN
2615 mutex_unlock(&dev_priv->dpio_lock);
2616
2617 intel_enable_dp(encoder);
89b667f8
JB
2618}
2619
ecff4f3b 2620static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2621{
2622 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2623 struct drm_device *dev = encoder->base.dev;
2624 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2625 struct intel_crtc *intel_crtc =
2626 to_intel_crtc(encoder->base.crtc);
e4607fcf 2627 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2628 int pipe = intel_crtc->pipe;
89b667f8 2629
8ac33ed3
DV
2630 intel_dp_prepare(encoder);
2631
89b667f8 2632 /* Program Tx lane resets to default */
0980a60f 2633 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2634 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2635 DPIO_PCS_TX_LANE2_RESET |
2636 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2637 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2638 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2639 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2640 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2641 DPIO_PCS_CLK_SOFT_RESET);
2642
2643 /* Fix up inter-pair skew failure */
ab3c759a
CML
2644 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2645 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2646 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2647 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2648}
2649
e4a1d846
CML
2650static void chv_pre_enable_dp(struct intel_encoder *encoder)
2651{
2652 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2653 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2654 struct drm_device *dev = encoder->base.dev;
2655 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2656 struct intel_crtc *intel_crtc =
2657 to_intel_crtc(encoder->base.crtc);
2658 enum dpio_channel ch = vlv_dport_to_channel(dport);
2659 int pipe = intel_crtc->pipe;
2660 int data, i;
949c1d43 2661 u32 val;
e4a1d846 2662
e4a1d846 2663 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2664
570e2a74
VS
2665 /* allow hardware to manage TX FIFO reset source */
2666 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2667 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2668 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2669
2670 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2671 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2672 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2673
949c1d43 2674 /* Deassert soft data lane reset*/
97fd4d5c 2675 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2676 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2677 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2678
2679 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2680 val |= CHV_PCS_REQ_SOFTRESET_EN;
2681 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2682
2683 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2684 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2685 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2686
97fd4d5c 2687 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2688 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2690
2691 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2692 for (i = 0; i < 4; i++) {
2693 /* Set the latency optimal bit */
2694 data = (i == 1) ? 0x0 : 0x6;
2695 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2696 data << DPIO_FRC_LATENCY_SHFIT);
2697
2698 /* Set the upar bit */
2699 data = (i == 1) ? 0x0 : 0x1;
2700 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2701 data << DPIO_UPAR_SHIFT);
2702 }
2703
2704 /* Data lane stagger programming */
2705 /* FIXME: Fix up value only after power analysis */
2706
2707 mutex_unlock(&dev_priv->dpio_lock);
2708
e4a1d846 2709 intel_enable_dp(encoder);
e4a1d846
CML
2710}
2711
9197c88b
VS
2712static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2713{
2714 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2715 struct drm_device *dev = encoder->base.dev;
2716 struct drm_i915_private *dev_priv = dev->dev_private;
2717 struct intel_crtc *intel_crtc =
2718 to_intel_crtc(encoder->base.crtc);
2719 enum dpio_channel ch = vlv_dport_to_channel(dport);
2720 enum pipe pipe = intel_crtc->pipe;
2721 u32 val;
2722
625695f8
VS
2723 intel_dp_prepare(encoder);
2724
9197c88b
VS
2725 mutex_lock(&dev_priv->dpio_lock);
2726
b9e5ac3c
VS
2727 /* program left/right clock distribution */
2728 if (pipe != PIPE_B) {
2729 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2730 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2731 if (ch == DPIO_CH0)
2732 val |= CHV_BUFLEFTENA1_FORCE;
2733 if (ch == DPIO_CH1)
2734 val |= CHV_BUFRIGHTENA1_FORCE;
2735 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2736 } else {
2737 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2738 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2739 if (ch == DPIO_CH0)
2740 val |= CHV_BUFLEFTENA2_FORCE;
2741 if (ch == DPIO_CH1)
2742 val |= CHV_BUFRIGHTENA2_FORCE;
2743 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2744 }
2745
9197c88b
VS
2746 /* program clock channel usage */
2747 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2748 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2749 if (pipe != PIPE_B)
2750 val &= ~CHV_PCS_USEDCLKCHANNEL;
2751 else
2752 val |= CHV_PCS_USEDCLKCHANNEL;
2753 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2754
2755 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2756 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2757 if (pipe != PIPE_B)
2758 val &= ~CHV_PCS_USEDCLKCHANNEL;
2759 else
2760 val |= CHV_PCS_USEDCLKCHANNEL;
2761 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2762
2763 /*
2764 * This a a bit weird since generally CL
2765 * matches the pipe, but here we need to
2766 * pick the CL based on the port.
2767 */
2768 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2769 if (pipe != PIPE_B)
2770 val &= ~CHV_CMN_USEDCLKCHANNEL;
2771 else
2772 val |= CHV_CMN_USEDCLKCHANNEL;
2773 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2774
2775 mutex_unlock(&dev_priv->dpio_lock);
2776}
2777
a4fc5ed6 2778/*
df0c237d
JB
2779 * Native read with retry for link status and receiver capability reads for
2780 * cases where the sink may still be asleep.
9d1a1031
JN
2781 *
2782 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2783 * supposed to retry 3 times per the spec.
a4fc5ed6 2784 */
9d1a1031
JN
2785static ssize_t
2786intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2787 void *buffer, size_t size)
a4fc5ed6 2788{
9d1a1031
JN
2789 ssize_t ret;
2790 int i;
61da5fab 2791
f6a19066
VS
2792 /*
2793 * Sometime we just get the same incorrect byte repeated
2794 * over the entire buffer. Doing just one throw away read
2795 * initially seems to "solve" it.
2796 */
2797 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2798
61da5fab 2799 for (i = 0; i < 3; i++) {
9d1a1031
JN
2800 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2801 if (ret == size)
2802 return ret;
61da5fab
JB
2803 msleep(1);
2804 }
a4fc5ed6 2805
9d1a1031 2806 return ret;
a4fc5ed6
KP
2807}
2808
2809/*
2810 * Fetch AUX CH registers 0x202 - 0x207 which contain
2811 * link status information
2812 */
2813static bool
93f62dad 2814intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2815{
9d1a1031
JN
2816 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2817 DP_LANE0_1_STATUS,
2818 link_status,
2819 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2820}
2821
1100244e 2822/* These are source-specific values. */
a4fc5ed6 2823static uint8_t
1a2eb460 2824intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2825{
30add22d 2826 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2827 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2828 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2829
7ad14a29
SJ
2830 if (INTEL_INFO(dev)->gen >= 9) {
2831 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2832 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2833 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2834 } else if (IS_VALLEYVIEW(dev))
bd60018a 2835 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2836 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2837 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2838 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2839 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2840 else
bd60018a 2841 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2842}
2843
2844static uint8_t
2845intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2846{
30add22d 2847 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2848 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2849
5a9d1f1a
DL
2850 if (INTEL_INFO(dev)->gen >= 9) {
2851 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2853 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2855 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2856 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2857 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2859 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2860 default:
2861 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2862 }
2863 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2864 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2865 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2866 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2867 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2868 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2869 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2870 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2872 default:
bd60018a 2873 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2874 }
e2fa6fba
P
2875 } else if (IS_VALLEYVIEW(dev)) {
2876 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2878 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2879 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2880 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2881 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2882 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2884 default:
bd60018a 2885 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2886 }
bc7d38a4 2887 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2888 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2889 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2890 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2891 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2892 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2893 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2894 default:
bd60018a 2895 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2896 }
2897 } else {
2898 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2900 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2901 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2902 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2904 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2905 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2906 default:
bd60018a 2907 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2908 }
a4fc5ed6
KP
2909 }
2910}
2911
e2fa6fba
P
2912static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2913{
2914 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2915 struct drm_i915_private *dev_priv = dev->dev_private;
2916 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2917 struct intel_crtc *intel_crtc =
2918 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2919 unsigned long demph_reg_value, preemph_reg_value,
2920 uniqtranscale_reg_value;
2921 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2922 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2923 int pipe = intel_crtc->pipe;
e2fa6fba
P
2924
2925 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2926 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2927 preemph_reg_value = 0x0004000;
2928 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2930 demph_reg_value = 0x2B405555;
2931 uniqtranscale_reg_value = 0x552AB83A;
2932 break;
bd60018a 2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2934 demph_reg_value = 0x2B404040;
2935 uniqtranscale_reg_value = 0x5548B83A;
2936 break;
bd60018a 2937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2938 demph_reg_value = 0x2B245555;
2939 uniqtranscale_reg_value = 0x5560B83A;
2940 break;
bd60018a 2941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2942 demph_reg_value = 0x2B405555;
2943 uniqtranscale_reg_value = 0x5598DA3A;
2944 break;
2945 default:
2946 return 0;
2947 }
2948 break;
bd60018a 2949 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2950 preemph_reg_value = 0x0002000;
2951 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2953 demph_reg_value = 0x2B404040;
2954 uniqtranscale_reg_value = 0x5552B83A;
2955 break;
bd60018a 2956 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2957 demph_reg_value = 0x2B404848;
2958 uniqtranscale_reg_value = 0x5580B83A;
2959 break;
bd60018a 2960 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2961 demph_reg_value = 0x2B404040;
2962 uniqtranscale_reg_value = 0x55ADDA3A;
2963 break;
2964 default:
2965 return 0;
2966 }
2967 break;
bd60018a 2968 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
2969 preemph_reg_value = 0x0000000;
2970 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2971 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2972 demph_reg_value = 0x2B305555;
2973 uniqtranscale_reg_value = 0x5570B83A;
2974 break;
bd60018a 2975 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2976 demph_reg_value = 0x2B2B4040;
2977 uniqtranscale_reg_value = 0x55ADDA3A;
2978 break;
2979 default:
2980 return 0;
2981 }
2982 break;
bd60018a 2983 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
2984 preemph_reg_value = 0x0006000;
2985 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2986 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2987 demph_reg_value = 0x1B405555;
2988 uniqtranscale_reg_value = 0x55ADDA3A;
2989 break;
2990 default:
2991 return 0;
2992 }
2993 break;
2994 default:
2995 return 0;
2996 }
2997
0980a60f 2998 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
2999 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3000 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3001 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3002 uniqtranscale_reg_value);
ab3c759a
CML
3003 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3004 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3005 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3006 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3007 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3008
3009 return 0;
3010}
3011
e4a1d846
CML
3012static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3013{
3014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3015 struct drm_i915_private *dev_priv = dev->dev_private;
3016 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3017 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3018 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3019 uint8_t train_set = intel_dp->train_set[0];
3020 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3021 enum pipe pipe = intel_crtc->pipe;
3022 int i;
e4a1d846
CML
3023
3024 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3025 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3026 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3028 deemph_reg_value = 128;
3029 margin_reg_value = 52;
3030 break;
bd60018a 3031 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3032 deemph_reg_value = 128;
3033 margin_reg_value = 77;
3034 break;
bd60018a 3035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3036 deemph_reg_value = 128;
3037 margin_reg_value = 102;
3038 break;
bd60018a 3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3040 deemph_reg_value = 128;
3041 margin_reg_value = 154;
3042 /* FIXME extra to set for 1200 */
3043 break;
3044 default:
3045 return 0;
3046 }
3047 break;
bd60018a 3048 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3049 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3051 deemph_reg_value = 85;
3052 margin_reg_value = 78;
3053 break;
bd60018a 3054 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3055 deemph_reg_value = 85;
3056 margin_reg_value = 116;
3057 break;
bd60018a 3058 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3059 deemph_reg_value = 85;
3060 margin_reg_value = 154;
3061 break;
3062 default:
3063 return 0;
3064 }
3065 break;
bd60018a 3066 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3067 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3068 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3069 deemph_reg_value = 64;
3070 margin_reg_value = 104;
3071 break;
bd60018a 3072 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3073 deemph_reg_value = 64;
3074 margin_reg_value = 154;
3075 break;
3076 default:
3077 return 0;
3078 }
3079 break;
bd60018a 3080 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3081 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3082 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3083 deemph_reg_value = 43;
3084 margin_reg_value = 154;
3085 break;
3086 default:
3087 return 0;
3088 }
3089 break;
3090 default:
3091 return 0;
3092 }
3093
3094 mutex_lock(&dev_priv->dpio_lock);
3095
3096 /* Clear calc init */
1966e59e
VS
3097 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3098 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3099 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3100 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3101 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3102
3103 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3104 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3105 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3106 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3107 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3108
a02ef3c7
VS
3109 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3110 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3111 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3112 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3113
3114 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3115 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3116 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3117 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3118
e4a1d846 3119 /* Program swing deemph */
f72df8db
VS
3120 for (i = 0; i < 4; i++) {
3121 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3122 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3123 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3124 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3125 }
e4a1d846
CML
3126
3127 /* Program swing margin */
f72df8db
VS
3128 for (i = 0; i < 4; i++) {
3129 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3130 val &= ~DPIO_SWING_MARGIN000_MASK;
3131 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3132 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3133 }
e4a1d846
CML
3134
3135 /* Disable unique transition scale */
f72df8db
VS
3136 for (i = 0; i < 4; i++) {
3137 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3138 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3139 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3140 }
e4a1d846
CML
3141
3142 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3143 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3144 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3145 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3146
3147 /*
3148 * The document said it needs to set bit 27 for ch0 and bit 26
3149 * for ch1. Might be a typo in the doc.
3150 * For now, for this unique transition scale selection, set bit
3151 * 27 for ch0 and ch1.
3152 */
f72df8db
VS
3153 for (i = 0; i < 4; i++) {
3154 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3155 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3156 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3157 }
e4a1d846 3158
f72df8db
VS
3159 for (i = 0; i < 4; i++) {
3160 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3161 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3162 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3163 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3164 }
e4a1d846
CML
3165 }
3166
3167 /* Start swing calculation */
1966e59e
VS
3168 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3169 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3170 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3171
3172 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3173 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3174 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3175
3176 /* LRC Bypass */
3177 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3178 val |= DPIO_LRC_BYPASS;
3179 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3180
3181 mutex_unlock(&dev_priv->dpio_lock);
3182
3183 return 0;
3184}
3185
a4fc5ed6 3186static void
0301b3ac
JN
3187intel_get_adjust_train(struct intel_dp *intel_dp,
3188 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3189{
3190 uint8_t v = 0;
3191 uint8_t p = 0;
3192 int lane;
1a2eb460
KP
3193 uint8_t voltage_max;
3194 uint8_t preemph_max;
a4fc5ed6 3195
33a34e4e 3196 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3197 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3198 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3199
3200 if (this_v > v)
3201 v = this_v;
3202 if (this_p > p)
3203 p = this_p;
3204 }
3205
1a2eb460 3206 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3207 if (v >= voltage_max)
3208 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3209
1a2eb460
KP
3210 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3211 if (p >= preemph_max)
3212 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3213
3214 for (lane = 0; lane < 4; lane++)
33a34e4e 3215 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3216}
3217
3218static uint32_t
f0a3424e 3219intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3220{
3cf2efb1 3221 uint32_t signal_levels = 0;
a4fc5ed6 3222
3cf2efb1 3223 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3224 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3225 default:
3226 signal_levels |= DP_VOLTAGE_0_4;
3227 break;
bd60018a 3228 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3229 signal_levels |= DP_VOLTAGE_0_6;
3230 break;
bd60018a 3231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3232 signal_levels |= DP_VOLTAGE_0_8;
3233 break;
bd60018a 3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3235 signal_levels |= DP_VOLTAGE_1_2;
3236 break;
3237 }
3cf2efb1 3238 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3239 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3240 default:
3241 signal_levels |= DP_PRE_EMPHASIS_0;
3242 break;
bd60018a 3243 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3244 signal_levels |= DP_PRE_EMPHASIS_3_5;
3245 break;
bd60018a 3246 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3247 signal_levels |= DP_PRE_EMPHASIS_6;
3248 break;
bd60018a 3249 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3250 signal_levels |= DP_PRE_EMPHASIS_9_5;
3251 break;
3252 }
3253 return signal_levels;
3254}
3255
e3421a18
ZW
3256/* Gen6's DP voltage swing and pre-emphasis control */
3257static uint32_t
3258intel_gen6_edp_signal_levels(uint8_t train_set)
3259{
3c5a62b5
YL
3260 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3261 DP_TRAIN_PRE_EMPHASIS_MASK);
3262 switch (signal_levels) {
bd60018a
SJ
3263 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3265 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3267 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3270 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3273 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3276 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3277 default:
3c5a62b5
YL
3278 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3279 "0x%x\n", signal_levels);
3280 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3281 }
3282}
3283
1a2eb460
KP
3284/* Gen7's DP voltage swing and pre-emphasis control */
3285static uint32_t
3286intel_gen7_edp_signal_levels(uint8_t train_set)
3287{
3288 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3289 DP_TRAIN_PRE_EMPHASIS_MASK);
3290 switch (signal_levels) {
bd60018a 3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3292 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3294 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3296 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3297
bd60018a 3298 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3299 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3301 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3302
bd60018a 3303 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3304 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3306 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3307
3308 default:
3309 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3310 "0x%x\n", signal_levels);
3311 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3312 }
3313}
3314
d6c0d722
PZ
3315/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3316static uint32_t
f0a3424e 3317intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3318{
d6c0d722
PZ
3319 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3320 DP_TRAIN_PRE_EMPHASIS_MASK);
3321 switch (signal_levels) {
bd60018a 3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3323 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3325 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3327 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3329 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3330
bd60018a 3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3332 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3334 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3335 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3336 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3337
bd60018a 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3339 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3341 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3342
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3344 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3345 default:
3346 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3347 "0x%x\n", signal_levels);
c5fe6a06 3348 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3349 }
a4fc5ed6
KP
3350}
3351
f0a3424e
PZ
3352/* Properly updates "DP" with the correct signal levels. */
3353static void
3354intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3355{
3356 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3357 enum port port = intel_dig_port->port;
f0a3424e
PZ
3358 struct drm_device *dev = intel_dig_port->base.base.dev;
3359 uint32_t signal_levels, mask;
3360 uint8_t train_set = intel_dp->train_set[0];
3361
5a9d1f1a 3362 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3363 signal_levels = intel_hsw_signal_levels(train_set);
3364 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3365 } else if (IS_CHERRYVIEW(dev)) {
3366 signal_levels = intel_chv_signal_levels(intel_dp);
3367 mask = 0;
e2fa6fba
P
3368 } else if (IS_VALLEYVIEW(dev)) {
3369 signal_levels = intel_vlv_signal_levels(intel_dp);
3370 mask = 0;
bc7d38a4 3371 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3372 signal_levels = intel_gen7_edp_signal_levels(train_set);
3373 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3374 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3375 signal_levels = intel_gen6_edp_signal_levels(train_set);
3376 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3377 } else {
3378 signal_levels = intel_gen4_signal_levels(train_set);
3379 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3380 }
3381
3382 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3383
3384 *DP = (*DP & ~mask) | signal_levels;
3385}
3386
a4fc5ed6 3387static bool
ea5b213a 3388intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3389 uint32_t *DP,
58e10eb9 3390 uint8_t dp_train_pat)
a4fc5ed6 3391{
174edf1f
PZ
3392 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3393 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3394 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3395 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3396 int ret, len;
a4fc5ed6 3397
7b13b58a 3398 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3399
70aff66c 3400 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3401 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3402
2cdfe6c8
JN
3403 buf[0] = dp_train_pat;
3404 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3405 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3406 /* don't write DP_TRAINING_LANEx_SET on disable */
3407 len = 1;
3408 } else {
3409 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3410 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3411 len = intel_dp->lane_count + 1;
47ea7542 3412 }
a4fc5ed6 3413
9d1a1031
JN
3414 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3415 buf, len);
2cdfe6c8
JN
3416
3417 return ret == len;
a4fc5ed6
KP
3418}
3419
70aff66c
JN
3420static bool
3421intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3422 uint8_t dp_train_pat)
3423{
953d22e8 3424 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3425 intel_dp_set_signal_levels(intel_dp, DP);
3426 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3427}
3428
3429static bool
3430intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3431 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3432{
3433 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3434 struct drm_device *dev = intel_dig_port->base.base.dev;
3435 struct drm_i915_private *dev_priv = dev->dev_private;
3436 int ret;
3437
3438 intel_get_adjust_train(intel_dp, link_status);
3439 intel_dp_set_signal_levels(intel_dp, DP);
3440
3441 I915_WRITE(intel_dp->output_reg, *DP);
3442 POSTING_READ(intel_dp->output_reg);
3443
9d1a1031
JN
3444 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3445 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3446
3447 return ret == intel_dp->lane_count;
3448}
3449
3ab9c637
ID
3450static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3451{
3452 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3453 struct drm_device *dev = intel_dig_port->base.base.dev;
3454 struct drm_i915_private *dev_priv = dev->dev_private;
3455 enum port port = intel_dig_port->port;
3456 uint32_t val;
3457
3458 if (!HAS_DDI(dev))
3459 return;
3460
3461 val = I915_READ(DP_TP_CTL(port));
3462 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3463 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3464 I915_WRITE(DP_TP_CTL(port), val);
3465
3466 /*
3467 * On PORT_A we can have only eDP in SST mode. There the only reason
3468 * we need to set idle transmission mode is to work around a HW issue
3469 * where we enable the pipe while not in idle link-training mode.
3470 * In this case there is requirement to wait for a minimum number of
3471 * idle patterns to be sent.
3472 */
3473 if (port == PORT_A)
3474 return;
3475
3476 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3477 1))
3478 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3479}
3480
33a34e4e 3481/* Enable corresponding port and start training pattern 1 */
c19b0669 3482void
33a34e4e 3483intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3484{
da63a9f2 3485 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3486 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3487 int i;
3488 uint8_t voltage;
cdb0e95b 3489 int voltage_tries, loop_tries;
ea5b213a 3490 uint32_t DP = intel_dp->DP;
6aba5b6c 3491 uint8_t link_config[2];
a4fc5ed6 3492
affa9354 3493 if (HAS_DDI(dev))
c19b0669
PZ
3494 intel_ddi_prepare_link_retrain(encoder);
3495
3cf2efb1 3496 /* Write the link configuration data */
6aba5b6c
JN
3497 link_config[0] = intel_dp->link_bw;
3498 link_config[1] = intel_dp->lane_count;
3499 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3500 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3501 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
bc27b7d3 3502 if (intel_dp->num_supported_rates)
a8f3ef61
SJ
3503 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3504 &intel_dp->rate_select, 1);
6aba5b6c
JN
3505
3506 link_config[0] = 0;
3507 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3508 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3509
3510 DP |= DP_PORT_EN;
1a2eb460 3511
70aff66c
JN
3512 /* clock recovery */
3513 if (!intel_dp_reset_link_train(intel_dp, &DP,
3514 DP_TRAINING_PATTERN_1 |
3515 DP_LINK_SCRAMBLING_DISABLE)) {
3516 DRM_ERROR("failed to enable link training\n");
3517 return;
3518 }
3519
a4fc5ed6 3520 voltage = 0xff;
cdb0e95b
KP
3521 voltage_tries = 0;
3522 loop_tries = 0;
a4fc5ed6 3523 for (;;) {
70aff66c 3524 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3525
a7c9655f 3526 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3527 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3528 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3529 break;
93f62dad 3530 }
a4fc5ed6 3531
01916270 3532 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3533 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3534 break;
3535 }
3536
3537 /* Check to see if we've tried the max voltage */
3538 for (i = 0; i < intel_dp->lane_count; i++)
3539 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3540 break;
3b4f819d 3541 if (i == intel_dp->lane_count) {
b06fbda3
DV
3542 ++loop_tries;
3543 if (loop_tries == 5) {
3def84b3 3544 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3545 break;
3546 }
70aff66c
JN
3547 intel_dp_reset_link_train(intel_dp, &DP,
3548 DP_TRAINING_PATTERN_1 |
3549 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3550 voltage_tries = 0;
3551 continue;
3552 }
a4fc5ed6 3553
3cf2efb1 3554 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3555 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3556 ++voltage_tries;
b06fbda3 3557 if (voltage_tries == 5) {
3def84b3 3558 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3559 break;
3560 }
3561 } else
3562 voltage_tries = 0;
3563 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3564
70aff66c
JN
3565 /* Update training set as requested by target */
3566 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3567 DRM_ERROR("failed to update link training\n");
3568 break;
3569 }
a4fc5ed6
KP
3570 }
3571
33a34e4e
JB
3572 intel_dp->DP = DP;
3573}
3574
c19b0669 3575void
33a34e4e
JB
3576intel_dp_complete_link_train(struct intel_dp *intel_dp)
3577{
33a34e4e 3578 bool channel_eq = false;
37f80975 3579 int tries, cr_tries;
33a34e4e 3580 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3581 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3582
3583 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3584 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3585 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3586
a4fc5ed6 3587 /* channel equalization */
70aff66c 3588 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3589 training_pattern |
70aff66c
JN
3590 DP_LINK_SCRAMBLING_DISABLE)) {
3591 DRM_ERROR("failed to start channel equalization\n");
3592 return;
3593 }
3594
a4fc5ed6 3595 tries = 0;
37f80975 3596 cr_tries = 0;
a4fc5ed6
KP
3597 channel_eq = false;
3598 for (;;) {
70aff66c 3599 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3600
37f80975
JB
3601 if (cr_tries > 5) {
3602 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3603 break;
3604 }
3605
a7c9655f 3606 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3607 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3608 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3609 break;
70aff66c 3610 }
a4fc5ed6 3611
37f80975 3612 /* Make sure clock is still ok */
01916270 3613 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3614 intel_dp_start_link_train(intel_dp);
70aff66c 3615 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3616 training_pattern |
70aff66c 3617 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3618 cr_tries++;
3619 continue;
3620 }
3621
1ffdff13 3622 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3623 channel_eq = true;
3624 break;
3625 }
a4fc5ed6 3626
37f80975
JB
3627 /* Try 5 times, then try clock recovery if that fails */
3628 if (tries > 5) {
37f80975 3629 intel_dp_start_link_train(intel_dp);
70aff66c 3630 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3631 training_pattern |
70aff66c 3632 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3633 tries = 0;
3634 cr_tries++;
3635 continue;
3636 }
a4fc5ed6 3637
70aff66c
JN
3638 /* Update training set as requested by target */
3639 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3640 DRM_ERROR("failed to update link training\n");
3641 break;
3642 }
3cf2efb1 3643 ++tries;
869184a6 3644 }
3cf2efb1 3645
3ab9c637
ID
3646 intel_dp_set_idle_link_train(intel_dp);
3647
3648 intel_dp->DP = DP;
3649
d6c0d722 3650 if (channel_eq)
07f42258 3651 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3652
3ab9c637
ID
3653}
3654
3655void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3656{
70aff66c 3657 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3658 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3659}
3660
3661static void
ea5b213a 3662intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3663{
da63a9f2 3664 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3665 enum port port = intel_dig_port->port;
da63a9f2 3666 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3667 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3668 uint32_t DP = intel_dp->DP;
a4fc5ed6 3669
bc76e320 3670 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3671 return;
3672
0c33d8d7 3673 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3674 return;
3675
28c97730 3676 DRM_DEBUG_KMS("\n");
32f9d658 3677
bc7d38a4 3678 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3679 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3680 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3681 } else {
aad3d14d
VS
3682 if (IS_CHERRYVIEW(dev))
3683 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3684 else
3685 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3686 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3687 }
fe255d00 3688 POSTING_READ(intel_dp->output_reg);
5eb08b69 3689
493a7081 3690 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3691 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3692 /* Hardware workaround: leaving our transcoder select
3693 * set to transcoder B while it's off will prevent the
3694 * corresponding HDMI output on transcoder A.
3695 *
3696 * Combine this with another hardware workaround:
3697 * transcoder select bit can only be cleared while the
3698 * port is enabled.
3699 */
3700 DP &= ~DP_PIPEB_SELECT;
3701 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3702 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3703 }
3704
832afda6 3705 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3706 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3707 POSTING_READ(intel_dp->output_reg);
f01eca2e 3708 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3709}
3710
26d61aad
KP
3711static bool
3712intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3713{
a031d709
RV
3714 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3715 struct drm_device *dev = dig_port->base.base.dev;
3716 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3717 uint8_t rev;
a031d709 3718
9d1a1031
JN
3719 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3720 sizeof(intel_dp->dpcd)) < 0)
edb39244 3721 return false; /* aux transfer failed */
92fd8fd1 3722
a8e98153 3723 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3724
edb39244
AJ
3725 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3726 return false; /* DPCD not present */
3727
2293bb5c
SK
3728 /* Check if the panel supports PSR */
3729 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3730 if (is_edp(intel_dp)) {
9d1a1031
JN
3731 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3732 intel_dp->psr_dpcd,
3733 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3734 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3735 dev_priv->psr.sink_support = true;
50003939 3736 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3737 }
50003939
JN
3738 }
3739
7809a611 3740 /* Training Pattern 3 support, both source and sink */
06ea66b6 3741 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3742 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3743 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3744 intel_dp->use_tps3 = true;
f8d8a672 3745 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3746 } else
3747 intel_dp->use_tps3 = false;
3748
fc0f8e25
SJ
3749 /* Intermediate frequency support */
3750 if (is_edp(intel_dp) &&
3751 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3752 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3753 (rev >= 0x03)) { /* eDp v1.4 or higher */
ea2d8a42
VS
3754 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3755 int i;
3756
fc0f8e25
SJ
3757 intel_dp_dpcd_read_wake(&intel_dp->aux,
3758 DP_SUPPORTED_LINK_RATES,
ea2d8a42
VS
3759 supported_rates,
3760 sizeof(supported_rates));
3761
3762 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3763 int val = le16_to_cpu(supported_rates[i]);
3764
3765 if (val == 0)
3766 break;
3767
3768 intel_dp->supported_rates[i] = val * 200;
3769 }
3770 intel_dp->num_supported_rates = i;
fc0f8e25 3771 }
edb39244
AJ
3772 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3773 DP_DWN_STRM_PORT_PRESENT))
3774 return true; /* native DP sink */
3775
3776 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3777 return true; /* no per-port downstream info */
3778
9d1a1031
JN
3779 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3780 intel_dp->downstream_ports,
3781 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3782 return false; /* downstream port status fetch failed */
3783
3784 return true;
92fd8fd1
KP
3785}
3786
0d198328
AJ
3787static void
3788intel_dp_probe_oui(struct intel_dp *intel_dp)
3789{
3790 u8 buf[3];
3791
3792 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3793 return;
3794
9d1a1031 3795 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3796 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3797 buf[0], buf[1], buf[2]);
3798
9d1a1031 3799 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3800 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3801 buf[0], buf[1], buf[2]);
3802}
3803
0e32b39c
DA
3804static bool
3805intel_dp_probe_mst(struct intel_dp *intel_dp)
3806{
3807 u8 buf[1];
3808
3809 if (!intel_dp->can_mst)
3810 return false;
3811
3812 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3813 return false;
3814
0e32b39c
DA
3815 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3816 if (buf[0] & DP_MST_CAP) {
3817 DRM_DEBUG_KMS("Sink is MST capable\n");
3818 intel_dp->is_mst = true;
3819 } else {
3820 DRM_DEBUG_KMS("Sink is not MST capable\n");
3821 intel_dp->is_mst = false;
3822 }
3823 }
0e32b39c
DA
3824
3825 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3826 return intel_dp->is_mst;
3827}
3828
d2e216d0
RV
3829int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3830{
3831 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3832 struct drm_device *dev = intel_dig_port->base.base.dev;
3833 struct intel_crtc *intel_crtc =
3834 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3835 u8 buf;
3836 int test_crc_count;
3837 int attempts = 6;
d2e216d0 3838
ad9dc91b 3839 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3840 return -EIO;
d2e216d0 3841
ad9dc91b 3842 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3843 return -ENOTTY;
3844
1dda5f93
RV
3845 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3846 return -EIO;
3847
9d1a1031 3848 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3849 buf | DP_TEST_SINK_START) < 0)
bda0381e 3850 return -EIO;
d2e216d0 3851
1dda5f93 3852 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3853 return -EIO;
ad9dc91b 3854 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3855
ad9dc91b 3856 do {
1dda5f93
RV
3857 if (drm_dp_dpcd_readb(&intel_dp->aux,
3858 DP_TEST_SINK_MISC, &buf) < 0)
3859 return -EIO;
ad9dc91b
RV
3860 intel_wait_for_vblank(dev, intel_crtc->pipe);
3861 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3862
3863 if (attempts == 0) {
90bd1f46
DV
3864 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3865 return -ETIMEDOUT;
ad9dc91b 3866 }
d2e216d0 3867
9d1a1031 3868 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3869 return -EIO;
d2e216d0 3870
1dda5f93
RV
3871 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3872 return -EIO;
3873 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3874 buf & ~DP_TEST_SINK_START) < 0)
3875 return -EIO;
ce31d9f4 3876
d2e216d0
RV
3877 return 0;
3878}
3879
a60f0e38
JB
3880static bool
3881intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3882{
9d1a1031
JN
3883 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3884 DP_DEVICE_SERVICE_IRQ_VECTOR,
3885 sink_irq_vector, 1) == 1;
a60f0e38
JB
3886}
3887
0e32b39c
DA
3888static bool
3889intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3890{
3891 int ret;
3892
3893 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3894 DP_SINK_COUNT_ESI,
3895 sink_irq_vector, 14);
3896 if (ret != 14)
3897 return false;
3898
3899 return true;
3900}
3901
a60f0e38
JB
3902static void
3903intel_dp_handle_test_request(struct intel_dp *intel_dp)
3904{
3905 /* NAK by default */
9d1a1031 3906 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3907}
3908
0e32b39c
DA
3909static int
3910intel_dp_check_mst_status(struct intel_dp *intel_dp)
3911{
3912 bool bret;
3913
3914 if (intel_dp->is_mst) {
3915 u8 esi[16] = { 0 };
3916 int ret = 0;
3917 int retry;
3918 bool handled;
3919 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3920go_again:
3921 if (bret == true) {
3922
3923 /* check link status - esi[10] = 0x200c */
3924 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3925 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3926 intel_dp_start_link_train(intel_dp);
3927 intel_dp_complete_link_train(intel_dp);
3928 intel_dp_stop_link_train(intel_dp);
3929 }
3930
6f34cc39 3931 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3932 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3933
3934 if (handled) {
3935 for (retry = 0; retry < 3; retry++) {
3936 int wret;
3937 wret = drm_dp_dpcd_write(&intel_dp->aux,
3938 DP_SINK_COUNT_ESI+1,
3939 &esi[1], 3);
3940 if (wret == 3) {
3941 break;
3942 }
3943 }
3944
3945 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3946 if (bret == true) {
6f34cc39 3947 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3948 goto go_again;
3949 }
3950 } else
3951 ret = 0;
3952
3953 return ret;
3954 } else {
3955 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3956 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3957 intel_dp->is_mst = false;
3958 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3959 /* send a hotplug event */
3960 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3961 }
3962 }
3963 return -EINVAL;
3964}
3965
a4fc5ed6
KP
3966/*
3967 * According to DP spec
3968 * 5.1.2:
3969 * 1. Read DPCD
3970 * 2. Configure link according to Receiver Capabilities
3971 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3972 * 4. Check link status on receipt of hot-plug interrupt
3973 */
a5146200 3974static void
ea5b213a 3975intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 3976{
5b215bcf 3977 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 3978 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 3979 u8 sink_irq_vector;
93f62dad 3980 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 3981
5b215bcf
DA
3982 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3983
da63a9f2 3984 if (!intel_encoder->connectors_active)
d2b996ac 3985 return;
59cd09e1 3986
da63a9f2 3987 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
3988 return;
3989
1a125d8a
ID
3990 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3991 return;
3992
92fd8fd1 3993 /* Try to read receiver status if the link appears to be up */
93f62dad 3994 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
3995 return;
3996 }
3997
92fd8fd1 3998 /* Now read the DPCD to see if it's actually running */
26d61aad 3999 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4000 return;
4001 }
4002
a60f0e38
JB
4003 /* Try to read the source of the interrupt */
4004 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4005 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4006 /* Clear interrupt source */
9d1a1031
JN
4007 drm_dp_dpcd_writeb(&intel_dp->aux,
4008 DP_DEVICE_SERVICE_IRQ_VECTOR,
4009 sink_irq_vector);
a60f0e38
JB
4010
4011 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4012 intel_dp_handle_test_request(intel_dp);
4013 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4014 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4015 }
4016
1ffdff13 4017 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4018 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4019 intel_encoder->base.name);
33a34e4e
JB
4020 intel_dp_start_link_train(intel_dp);
4021 intel_dp_complete_link_train(intel_dp);
3ab9c637 4022 intel_dp_stop_link_train(intel_dp);
33a34e4e 4023 }
a4fc5ed6 4024}
a4fc5ed6 4025
caf9ab24 4026/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4027static enum drm_connector_status
26d61aad 4028intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4029{
caf9ab24 4030 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4031 uint8_t type;
4032
4033 if (!intel_dp_get_dpcd(intel_dp))
4034 return connector_status_disconnected;
4035
4036 /* if there's no downstream port, we're done */
4037 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4038 return connector_status_connected;
caf9ab24
AJ
4039
4040 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4041 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4042 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4043 uint8_t reg;
9d1a1031
JN
4044
4045 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4046 &reg, 1) < 0)
caf9ab24 4047 return connector_status_unknown;
9d1a1031 4048
23235177
AJ
4049 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4050 : connector_status_disconnected;
caf9ab24
AJ
4051 }
4052
4053 /* If no HPD, poke DDC gently */
0b99836f 4054 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4055 return connector_status_connected;
caf9ab24
AJ
4056
4057 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4058 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4059 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4060 if (type == DP_DS_PORT_TYPE_VGA ||
4061 type == DP_DS_PORT_TYPE_NON_EDID)
4062 return connector_status_unknown;
4063 } else {
4064 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4065 DP_DWN_STRM_PORT_TYPE_MASK;
4066 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4067 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4068 return connector_status_unknown;
4069 }
caf9ab24
AJ
4070
4071 /* Anything else is out of spec, warn and ignore */
4072 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4073 return connector_status_disconnected;
71ba9000
AJ
4074}
4075
d410b56d
CW
4076static enum drm_connector_status
4077edp_detect(struct intel_dp *intel_dp)
4078{
4079 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4080 enum drm_connector_status status;
4081
4082 status = intel_panel_detect(dev);
4083 if (status == connector_status_unknown)
4084 status = connector_status_connected;
4085
4086 return status;
4087}
4088
5eb08b69 4089static enum drm_connector_status
a9756bb5 4090ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4091{
30add22d 4092 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4093 struct drm_i915_private *dev_priv = dev->dev_private;
4094 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4095
1b469639
DL
4096 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4097 return connector_status_disconnected;
4098
26d61aad 4099 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4100}
4101
2a592bec
DA
4102static int g4x_digital_port_connected(struct drm_device *dev,
4103 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4104{
a4fc5ed6 4105 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4106 uint32_t bit;
5eb08b69 4107
232a6ee9
TP
4108 if (IS_VALLEYVIEW(dev)) {
4109 switch (intel_dig_port->port) {
4110 case PORT_B:
4111 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4112 break;
4113 case PORT_C:
4114 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4115 break;
4116 case PORT_D:
4117 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4118 break;
4119 default:
2a592bec 4120 return -EINVAL;
232a6ee9
TP
4121 }
4122 } else {
4123 switch (intel_dig_port->port) {
4124 case PORT_B:
4125 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4126 break;
4127 case PORT_C:
4128 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4129 break;
4130 case PORT_D:
4131 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4132 break;
4133 default:
2a592bec 4134 return -EINVAL;
232a6ee9 4135 }
a4fc5ed6
KP
4136 }
4137
10f76a38 4138 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4139 return 0;
4140 return 1;
4141}
4142
4143static enum drm_connector_status
4144g4x_dp_detect(struct intel_dp *intel_dp)
4145{
4146 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4147 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4148 int ret;
4149
4150 /* Can't disconnect eDP, but you can close the lid... */
4151 if (is_edp(intel_dp)) {
4152 enum drm_connector_status status;
4153
4154 status = intel_panel_detect(dev);
4155 if (status == connector_status_unknown)
4156 status = connector_status_connected;
4157 return status;
4158 }
4159
4160 ret = g4x_digital_port_connected(dev, intel_dig_port);
4161 if (ret == -EINVAL)
4162 return connector_status_unknown;
4163 else if (ret == 0)
a4fc5ed6
KP
4164 return connector_status_disconnected;
4165
26d61aad 4166 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4167}
4168
8c241fef 4169static struct edid *
beb60608 4170intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4171{
beb60608 4172 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4173
9cd300e0
JN
4174 /* use cached edid if we have one */
4175 if (intel_connector->edid) {
9cd300e0
JN
4176 /* invalid edid */
4177 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4178 return NULL;
4179
55e9edeb 4180 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4181 } else
4182 return drm_get_edid(&intel_connector->base,
4183 &intel_dp->aux.ddc);
4184}
8c241fef 4185
beb60608
CW
4186static void
4187intel_dp_set_edid(struct intel_dp *intel_dp)
4188{
4189 struct intel_connector *intel_connector = intel_dp->attached_connector;
4190 struct edid *edid;
8c241fef 4191
beb60608
CW
4192 edid = intel_dp_get_edid(intel_dp);
4193 intel_connector->detect_edid = edid;
4194
4195 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4196 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4197 else
4198 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4199}
4200
beb60608
CW
4201static void
4202intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4203{
beb60608 4204 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4205
beb60608
CW
4206 kfree(intel_connector->detect_edid);
4207 intel_connector->detect_edid = NULL;
9cd300e0 4208
beb60608
CW
4209 intel_dp->has_audio = false;
4210}
d6f24d0f 4211
beb60608
CW
4212static enum intel_display_power_domain
4213intel_dp_power_get(struct intel_dp *dp)
4214{
4215 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4216 enum intel_display_power_domain power_domain;
4217
4218 power_domain = intel_display_port_power_domain(encoder);
4219 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4220
4221 return power_domain;
4222}
d6f24d0f 4223
beb60608
CW
4224static void
4225intel_dp_power_put(struct intel_dp *dp,
4226 enum intel_display_power_domain power_domain)
4227{
4228 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4229 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4230}
4231
a9756bb5
ZW
4232static enum drm_connector_status
4233intel_dp_detect(struct drm_connector *connector, bool force)
4234{
4235 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4236 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4237 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4238 struct drm_device *dev = connector->dev;
a9756bb5 4239 enum drm_connector_status status;
671dedd2 4240 enum intel_display_power_domain power_domain;
0e32b39c 4241 bool ret;
a9756bb5 4242
164c8598 4243 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4244 connector->base.id, connector->name);
beb60608 4245 intel_dp_unset_edid(intel_dp);
164c8598 4246
0e32b39c
DA
4247 if (intel_dp->is_mst) {
4248 /* MST devices are disconnected from a monitor POV */
4249 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4250 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4251 return connector_status_disconnected;
0e32b39c
DA
4252 }
4253
beb60608 4254 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4255
d410b56d
CW
4256 /* Can't disconnect eDP, but you can close the lid... */
4257 if (is_edp(intel_dp))
4258 status = edp_detect(intel_dp);
4259 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4260 status = ironlake_dp_detect(intel_dp);
4261 else
4262 status = g4x_dp_detect(intel_dp);
4263 if (status != connector_status_connected)
c8c8fb33 4264 goto out;
a9756bb5 4265
0d198328
AJ
4266 intel_dp_probe_oui(intel_dp);
4267
0e32b39c
DA
4268 ret = intel_dp_probe_mst(intel_dp);
4269 if (ret) {
4270 /* if we are in MST mode then this connector
4271 won't appear connected or have anything with EDID on it */
4272 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4273 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4274 status = connector_status_disconnected;
4275 goto out;
4276 }
4277
beb60608 4278 intel_dp_set_edid(intel_dp);
a9756bb5 4279
d63885da
PZ
4280 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4281 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4282 status = connector_status_connected;
4283
4284out:
beb60608 4285 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4286 return status;
a4fc5ed6
KP
4287}
4288
beb60608
CW
4289static void
4290intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4291{
df0e9248 4292 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4293 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4294 enum intel_display_power_domain power_domain;
a4fc5ed6 4295
beb60608
CW
4296 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4297 connector->base.id, connector->name);
4298 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4299
beb60608
CW
4300 if (connector->status != connector_status_connected)
4301 return;
671dedd2 4302
beb60608
CW
4303 power_domain = intel_dp_power_get(intel_dp);
4304
4305 intel_dp_set_edid(intel_dp);
4306
4307 intel_dp_power_put(intel_dp, power_domain);
4308
4309 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4310 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4311}
4312
4313static int intel_dp_get_modes(struct drm_connector *connector)
4314{
4315 struct intel_connector *intel_connector = to_intel_connector(connector);
4316 struct edid *edid;
4317
4318 edid = intel_connector->detect_edid;
4319 if (edid) {
4320 int ret = intel_connector_update_modes(connector, edid);
4321 if (ret)
4322 return ret;
4323 }
32f9d658 4324
f8779fda 4325 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4326 if (is_edp(intel_attached_dp(connector)) &&
4327 intel_connector->panel.fixed_mode) {
f8779fda 4328 struct drm_display_mode *mode;
beb60608
CW
4329
4330 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4331 intel_connector->panel.fixed_mode);
f8779fda 4332 if (mode) {
32f9d658
ZW
4333 drm_mode_probed_add(connector, mode);
4334 return 1;
4335 }
4336 }
beb60608 4337
32f9d658 4338 return 0;
a4fc5ed6
KP
4339}
4340
1aad7ac0
CW
4341static bool
4342intel_dp_detect_audio(struct drm_connector *connector)
4343{
1aad7ac0 4344 bool has_audio = false;
beb60608 4345 struct edid *edid;
1aad7ac0 4346
beb60608
CW
4347 edid = to_intel_connector(connector)->detect_edid;
4348 if (edid)
1aad7ac0 4349 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4350
1aad7ac0
CW
4351 return has_audio;
4352}
4353
f684960e
CW
4354static int
4355intel_dp_set_property(struct drm_connector *connector,
4356 struct drm_property *property,
4357 uint64_t val)
4358{
e953fd7b 4359 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4360 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4361 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4362 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4363 int ret;
4364
662595df 4365 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4366 if (ret)
4367 return ret;
4368
3f43c48d 4369 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4370 int i = val;
4371 bool has_audio;
4372
4373 if (i == intel_dp->force_audio)
f684960e
CW
4374 return 0;
4375
1aad7ac0 4376 intel_dp->force_audio = i;
f684960e 4377
c3e5f67b 4378 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4379 has_audio = intel_dp_detect_audio(connector);
4380 else
c3e5f67b 4381 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4382
4383 if (has_audio == intel_dp->has_audio)
f684960e
CW
4384 return 0;
4385
1aad7ac0 4386 intel_dp->has_audio = has_audio;
f684960e
CW
4387 goto done;
4388 }
4389
e953fd7b 4390 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4391 bool old_auto = intel_dp->color_range_auto;
4392 uint32_t old_range = intel_dp->color_range;
4393
55bc60db
VS
4394 switch (val) {
4395 case INTEL_BROADCAST_RGB_AUTO:
4396 intel_dp->color_range_auto = true;
4397 break;
4398 case INTEL_BROADCAST_RGB_FULL:
4399 intel_dp->color_range_auto = false;
4400 intel_dp->color_range = 0;
4401 break;
4402 case INTEL_BROADCAST_RGB_LIMITED:
4403 intel_dp->color_range_auto = false;
4404 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4405 break;
4406 default:
4407 return -EINVAL;
4408 }
ae4edb80
DV
4409
4410 if (old_auto == intel_dp->color_range_auto &&
4411 old_range == intel_dp->color_range)
4412 return 0;
4413
e953fd7b
CW
4414 goto done;
4415 }
4416
53b41837
YN
4417 if (is_edp(intel_dp) &&
4418 property == connector->dev->mode_config.scaling_mode_property) {
4419 if (val == DRM_MODE_SCALE_NONE) {
4420 DRM_DEBUG_KMS("no scaling not supported\n");
4421 return -EINVAL;
4422 }
4423
4424 if (intel_connector->panel.fitting_mode == val) {
4425 /* the eDP scaling property is not changed */
4426 return 0;
4427 }
4428 intel_connector->panel.fitting_mode = val;
4429
4430 goto done;
4431 }
4432
f684960e
CW
4433 return -EINVAL;
4434
4435done:
c0c36b94
CW
4436 if (intel_encoder->base.crtc)
4437 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4438
4439 return 0;
4440}
4441
a4fc5ed6 4442static void
73845adf 4443intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4444{
1d508706 4445 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4446
10e972d3 4447 kfree(intel_connector->detect_edid);
beb60608 4448
9cd300e0
JN
4449 if (!IS_ERR_OR_NULL(intel_connector->edid))
4450 kfree(intel_connector->edid);
4451
acd8db10
PZ
4452 /* Can't call is_edp() since the encoder may have been destroyed
4453 * already. */
4454 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4455 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4456
a4fc5ed6 4457 drm_connector_cleanup(connector);
55f78c43 4458 kfree(connector);
a4fc5ed6
KP
4459}
4460
00c09d70 4461void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4462{
da63a9f2
PZ
4463 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4464 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4465
4f71d0cb 4466 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4467 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4468 if (is_edp(intel_dp)) {
4469 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4470 /*
4471 * vdd might still be enabled do to the delayed vdd off.
4472 * Make sure vdd is actually turned off here.
4473 */
773538e8 4474 pps_lock(intel_dp);
4be73780 4475 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4476 pps_unlock(intel_dp);
4477
01527b31
CT
4478 if (intel_dp->edp_notifier.notifier_call) {
4479 unregister_reboot_notifier(&intel_dp->edp_notifier);
4480 intel_dp->edp_notifier.notifier_call = NULL;
4481 }
bd943159 4482 }
c8bd0e49 4483 drm_encoder_cleanup(encoder);
da63a9f2 4484 kfree(intel_dig_port);
24d05927
DV
4485}
4486
07f9cd0b
ID
4487static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4488{
4489 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4490
4491 if (!is_edp(intel_dp))
4492 return;
4493
951468f3
VS
4494 /*
4495 * vdd might still be enabled do to the delayed vdd off.
4496 * Make sure vdd is actually turned off here.
4497 */
afa4e53a 4498 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4499 pps_lock(intel_dp);
07f9cd0b 4500 edp_panel_vdd_off_sync(intel_dp);
773538e8 4501 pps_unlock(intel_dp);
07f9cd0b
ID
4502}
4503
49e6bc51
VS
4504static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4505{
4506 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4507 struct drm_device *dev = intel_dig_port->base.base.dev;
4508 struct drm_i915_private *dev_priv = dev->dev_private;
4509 enum intel_display_power_domain power_domain;
4510
4511 lockdep_assert_held(&dev_priv->pps_mutex);
4512
4513 if (!edp_have_panel_vdd(intel_dp))
4514 return;
4515
4516 /*
4517 * The VDD bit needs a power domain reference, so if the bit is
4518 * already enabled when we boot or resume, grab this reference and
4519 * schedule a vdd off, so we don't hold on to the reference
4520 * indefinitely.
4521 */
4522 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4523 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4524 intel_display_power_get(dev_priv, power_domain);
4525
4526 edp_panel_vdd_schedule_off(intel_dp);
4527}
4528
6d93c0c4
ID
4529static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4530{
49e6bc51
VS
4531 struct intel_dp *intel_dp;
4532
4533 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4534 return;
4535
4536 intel_dp = enc_to_intel_dp(encoder);
4537
4538 pps_lock(intel_dp);
4539
4540 /*
4541 * Read out the current power sequencer assignment,
4542 * in case the BIOS did something with it.
4543 */
4544 if (IS_VALLEYVIEW(encoder->dev))
4545 vlv_initial_power_sequencer_setup(intel_dp);
4546
4547 intel_edp_panel_vdd_sanitize(intel_dp);
4548
4549 pps_unlock(intel_dp);
6d93c0c4
ID
4550}
4551
a4fc5ed6 4552static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4553 .dpms = intel_connector_dpms,
a4fc5ed6 4554 .detect = intel_dp_detect,
beb60608 4555 .force = intel_dp_force,
a4fc5ed6 4556 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4557 .set_property = intel_dp_set_property,
2545e4a6 4558 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4559 .destroy = intel_dp_connector_destroy,
c6f95f27 4560 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4561};
4562
4563static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4564 .get_modes = intel_dp_get_modes,
4565 .mode_valid = intel_dp_mode_valid,
df0e9248 4566 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4567};
4568
a4fc5ed6 4569static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4570 .reset = intel_dp_encoder_reset,
24d05927 4571 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4572};
4573
0e32b39c 4574void
21d40d37 4575intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4576{
0e32b39c 4577 return;
c8110e52 4578}
6207937d 4579
b2c5c181 4580enum irqreturn
13cf5504
DA
4581intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4582{
4583 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4584 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4585 struct drm_device *dev = intel_dig_port->base.base.dev;
4586 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4587 enum intel_display_power_domain power_domain;
b2c5c181 4588 enum irqreturn ret = IRQ_NONE;
1c767b33 4589
0e32b39c
DA
4590 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4591 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4592
7a7f84cc
VS
4593 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4594 /*
4595 * vdd off can generate a long pulse on eDP which
4596 * would require vdd on to handle it, and thus we
4597 * would end up in an endless cycle of
4598 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4599 */
4600 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4601 port_name(intel_dig_port->port));
a8b3d52f 4602 return IRQ_HANDLED;
7a7f84cc
VS
4603 }
4604
26fbb774
VS
4605 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4606 port_name(intel_dig_port->port),
0e32b39c 4607 long_hpd ? "long" : "short");
13cf5504 4608
1c767b33
ID
4609 power_domain = intel_display_port_power_domain(intel_encoder);
4610 intel_display_power_get(dev_priv, power_domain);
4611
0e32b39c 4612 if (long_hpd) {
2a592bec
DA
4613
4614 if (HAS_PCH_SPLIT(dev)) {
4615 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4616 goto mst_fail;
4617 } else {
4618 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4619 goto mst_fail;
4620 }
0e32b39c
DA
4621
4622 if (!intel_dp_get_dpcd(intel_dp)) {
4623 goto mst_fail;
4624 }
4625
4626 intel_dp_probe_oui(intel_dp);
4627
4628 if (!intel_dp_probe_mst(intel_dp))
4629 goto mst_fail;
4630
4631 } else {
4632 if (intel_dp->is_mst) {
1c767b33 4633 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4634 goto mst_fail;
4635 }
4636
4637 if (!intel_dp->is_mst) {
4638 /*
4639 * we'll check the link status via the normal hot plug path later -
4640 * but for short hpds we should check it now
4641 */
5b215bcf 4642 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4643 intel_dp_check_link_status(intel_dp);
5b215bcf 4644 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4645 }
4646 }
b2c5c181
DV
4647
4648 ret = IRQ_HANDLED;
4649
1c767b33 4650 goto put_power;
0e32b39c
DA
4651mst_fail:
4652 /* if we were in MST mode, and device is not there get out of MST mode */
4653 if (intel_dp->is_mst) {
4654 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4655 intel_dp->is_mst = false;
4656 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4657 }
1c767b33
ID
4658put_power:
4659 intel_display_power_put(dev_priv, power_domain);
4660
4661 return ret;
13cf5504
DA
4662}
4663
e3421a18
ZW
4664/* Return which DP Port should be selected for Transcoder DP control */
4665int
0206e353 4666intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4667{
4668 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4669 struct intel_encoder *intel_encoder;
4670 struct intel_dp *intel_dp;
e3421a18 4671
fa90ecef
PZ
4672 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4673 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4674
fa90ecef
PZ
4675 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4676 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4677 return intel_dp->output_reg;
e3421a18 4678 }
ea5b213a 4679
e3421a18
ZW
4680 return -1;
4681}
4682
36e83a18 4683/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4684bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4685{
4686 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4687 union child_device_config *p_child;
36e83a18 4688 int i;
5d8a7752
VS
4689 static const short port_mapping[] = {
4690 [PORT_B] = PORT_IDPB,
4691 [PORT_C] = PORT_IDPC,
4692 [PORT_D] = PORT_IDPD,
4693 };
36e83a18 4694
3b32a35b
VS
4695 if (port == PORT_A)
4696 return true;
4697
41aa3448 4698 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4699 return false;
4700
41aa3448
RV
4701 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4702 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4703
5d8a7752 4704 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4705 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4706 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4707 return true;
4708 }
4709 return false;
4710}
4711
0e32b39c 4712void
f684960e
CW
4713intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4714{
53b41837
YN
4715 struct intel_connector *intel_connector = to_intel_connector(connector);
4716
3f43c48d 4717 intel_attach_force_audio_property(connector);
e953fd7b 4718 intel_attach_broadcast_rgb_property(connector);
55bc60db 4719 intel_dp->color_range_auto = true;
53b41837
YN
4720
4721 if (is_edp(intel_dp)) {
4722 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4723 drm_object_attach_property(
4724 &connector->base,
53b41837 4725 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4726 DRM_MODE_SCALE_ASPECT);
4727 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4728 }
f684960e
CW
4729}
4730
dada1a9f
ID
4731static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4732{
4733 intel_dp->last_power_cycle = jiffies;
4734 intel_dp->last_power_on = jiffies;
4735 intel_dp->last_backlight_off = jiffies;
4736}
4737
67a54566
DV
4738static void
4739intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4740 struct intel_dp *intel_dp)
67a54566
DV
4741{
4742 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4743 struct edp_power_seq cur, vbt, spec,
4744 *final = &intel_dp->pps_delays;
67a54566 4745 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4746 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4747
e39b999a
VS
4748 lockdep_assert_held(&dev_priv->pps_mutex);
4749
81ddbc69
VS
4750 /* already initialized? */
4751 if (final->t11_t12 != 0)
4752 return;
4753
453c5420 4754 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4755 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4756 pp_on_reg = PCH_PP_ON_DELAYS;
4757 pp_off_reg = PCH_PP_OFF_DELAYS;
4758 pp_div_reg = PCH_PP_DIVISOR;
4759 } else {
bf13e81b
JN
4760 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4761
4762 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4763 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4764 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4765 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4766 }
67a54566
DV
4767
4768 /* Workaround: Need to write PP_CONTROL with the unlock key as
4769 * the very first thing. */
453c5420 4770 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4771 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4772
453c5420
JB
4773 pp_on = I915_READ(pp_on_reg);
4774 pp_off = I915_READ(pp_off_reg);
4775 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4776
4777 /* Pull timing values out of registers */
4778 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4779 PANEL_POWER_UP_DELAY_SHIFT;
4780
4781 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4782 PANEL_LIGHT_ON_DELAY_SHIFT;
4783
4784 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4785 PANEL_LIGHT_OFF_DELAY_SHIFT;
4786
4787 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4788 PANEL_POWER_DOWN_DELAY_SHIFT;
4789
4790 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4791 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4792
4793 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4794 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4795
41aa3448 4796 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4797
4798 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4799 * our hw here, which are all in 100usec. */
4800 spec.t1_t3 = 210 * 10;
4801 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4802 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4803 spec.t10 = 500 * 10;
4804 /* This one is special and actually in units of 100ms, but zero
4805 * based in the hw (so we need to add 100 ms). But the sw vbt
4806 * table multiplies it with 1000 to make it in units of 100usec,
4807 * too. */
4808 spec.t11_t12 = (510 + 100) * 10;
4809
4810 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4811 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4812
4813 /* Use the max of the register settings and vbt. If both are
4814 * unset, fall back to the spec limits. */
36b5f425 4815#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4816 spec.field : \
4817 max(cur.field, vbt.field))
4818 assign_final(t1_t3);
4819 assign_final(t8);
4820 assign_final(t9);
4821 assign_final(t10);
4822 assign_final(t11_t12);
4823#undef assign_final
4824
36b5f425 4825#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4826 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4827 intel_dp->backlight_on_delay = get_delay(t8);
4828 intel_dp->backlight_off_delay = get_delay(t9);
4829 intel_dp->panel_power_down_delay = get_delay(t10);
4830 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4831#undef get_delay
4832
f30d26e4
JN
4833 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4834 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4835 intel_dp->panel_power_cycle_delay);
4836
4837 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4838 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4839}
4840
4841static void
4842intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4843 struct intel_dp *intel_dp)
f30d26e4
JN
4844{
4845 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4846 u32 pp_on, pp_off, pp_div, port_sel = 0;
4847 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4848 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4849 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4850 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4851
e39b999a 4852 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4853
4854 if (HAS_PCH_SPLIT(dev)) {
4855 pp_on_reg = PCH_PP_ON_DELAYS;
4856 pp_off_reg = PCH_PP_OFF_DELAYS;
4857 pp_div_reg = PCH_PP_DIVISOR;
4858 } else {
bf13e81b
JN
4859 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4860
4861 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4862 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4863 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4864 }
4865
b2f19d1a
PZ
4866 /*
4867 * And finally store the new values in the power sequencer. The
4868 * backlight delays are set to 1 because we do manual waits on them. For
4869 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4870 * we'll end up waiting for the backlight off delay twice: once when we
4871 * do the manual sleep, and once when we disable the panel and wait for
4872 * the PP_STATUS bit to become zero.
4873 */
f30d26e4 4874 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4875 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4876 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4877 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4878 /* Compute the divisor for the pp clock, simply match the Bspec
4879 * formula. */
453c5420 4880 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4881 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4882 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4883
4884 /* Haswell doesn't have any port selection bits for the panel
4885 * power sequencer any more. */
bc7d38a4 4886 if (IS_VALLEYVIEW(dev)) {
ad933b56 4887 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4888 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4889 if (port == PORT_A)
a24c144c 4890 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4891 else
a24c144c 4892 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4893 }
4894
453c5420
JB
4895 pp_on |= port_sel;
4896
4897 I915_WRITE(pp_on_reg, pp_on);
4898 I915_WRITE(pp_off_reg, pp_off);
4899 I915_WRITE(pp_div_reg, pp_div);
67a54566 4900
67a54566 4901 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4902 I915_READ(pp_on_reg),
4903 I915_READ(pp_off_reg),
4904 I915_READ(pp_div_reg));
f684960e
CW
4905}
4906
b33a2815
VK
4907/**
4908 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4909 * @dev: DRM device
4910 * @refresh_rate: RR to be programmed
4911 *
4912 * This function gets called when refresh rate (RR) has to be changed from
4913 * one frequency to another. Switches can be between high and low RR
4914 * supported by the panel or to any other RR based on media playback (in
4915 * this case, RR value needs to be passed from user space).
4916 *
4917 * The caller of this function needs to take a lock on dev_priv->drrs.
4918 */
96178eeb 4919static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4920{
4921 struct drm_i915_private *dev_priv = dev->dev_private;
4922 struct intel_encoder *encoder;
96178eeb
VK
4923 struct intel_digital_port *dig_port = NULL;
4924 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4925 struct intel_crtc_state *config = NULL;
439d7ac0 4926 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4927 u32 reg, val;
96178eeb 4928 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4929
4930 if (refresh_rate <= 0) {
4931 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4932 return;
4933 }
4934
96178eeb
VK
4935 if (intel_dp == NULL) {
4936 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4937 return;
4938 }
4939
1fcc9d1c 4940 /*
e4d59f6b
RV
4941 * FIXME: This needs proper synchronization with psr state for some
4942 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4943 */
439d7ac0 4944
96178eeb
VK
4945 dig_port = dp_to_dig_port(intel_dp);
4946 encoder = &dig_port->base;
439d7ac0
PB
4947 intel_crtc = encoder->new_crtc;
4948
4949 if (!intel_crtc) {
4950 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4951 return;
4952 }
4953
6e3c9717 4954 config = intel_crtc->config;
439d7ac0 4955
96178eeb 4956 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
4957 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4958 return;
4959 }
4960
96178eeb
VK
4961 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4962 refresh_rate)
439d7ac0
PB
4963 index = DRRS_LOW_RR;
4964
96178eeb 4965 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
4966 DRM_DEBUG_KMS(
4967 "DRRS requested for previously set RR...ignoring\n");
4968 return;
4969 }
4970
4971 if (!intel_crtc->active) {
4972 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4973 return;
4974 }
4975
44395bfe 4976 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
4977 switch (index) {
4978 case DRRS_HIGH_RR:
4979 intel_dp_set_m_n(intel_crtc, M1_N1);
4980 break;
4981 case DRRS_LOW_RR:
4982 intel_dp_set_m_n(intel_crtc, M2_N2);
4983 break;
4984 case DRRS_MAX_RR:
4985 default:
4986 DRM_ERROR("Unsupported refreshrate type\n");
4987 }
4988 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 4989 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 4990 val = I915_READ(reg);
a4c30b1d 4991
439d7ac0 4992 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
4993 if (IS_VALLEYVIEW(dev))
4994 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4995 else
4996 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 4997 } else {
6fa7aec1
VK
4998 if (IS_VALLEYVIEW(dev))
4999 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5000 else
5001 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5002 }
5003 I915_WRITE(reg, val);
5004 }
5005
4e9ac947
VK
5006 dev_priv->drrs.refresh_rate_type = index;
5007
5008 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5009}
5010
b33a2815
VK
5011/**
5012 * intel_edp_drrs_enable - init drrs struct if supported
5013 * @intel_dp: DP struct
5014 *
5015 * Initializes frontbuffer_bits and drrs.dp
5016 */
c395578e
VK
5017void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5018{
5019 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5020 struct drm_i915_private *dev_priv = dev->dev_private;
5021 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5022 struct drm_crtc *crtc = dig_port->base.base.crtc;
5023 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5024
5025 if (!intel_crtc->config->has_drrs) {
5026 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5027 return;
5028 }
5029
5030 mutex_lock(&dev_priv->drrs.mutex);
5031 if (WARN_ON(dev_priv->drrs.dp)) {
5032 DRM_ERROR("DRRS already enabled\n");
5033 goto unlock;
5034 }
5035
5036 dev_priv->drrs.busy_frontbuffer_bits = 0;
5037
5038 dev_priv->drrs.dp = intel_dp;
5039
5040unlock:
5041 mutex_unlock(&dev_priv->drrs.mutex);
5042}
5043
b33a2815
VK
5044/**
5045 * intel_edp_drrs_disable - Disable DRRS
5046 * @intel_dp: DP struct
5047 *
5048 */
c395578e
VK
5049void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5050{
5051 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5052 struct drm_i915_private *dev_priv = dev->dev_private;
5053 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5054 struct drm_crtc *crtc = dig_port->base.base.crtc;
5055 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5056
5057 if (!intel_crtc->config->has_drrs)
5058 return;
5059
5060 mutex_lock(&dev_priv->drrs.mutex);
5061 if (!dev_priv->drrs.dp) {
5062 mutex_unlock(&dev_priv->drrs.mutex);
5063 return;
5064 }
5065
5066 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5067 intel_dp_set_drrs_state(dev_priv->dev,
5068 intel_dp->attached_connector->panel.
5069 fixed_mode->vrefresh);
5070
5071 dev_priv->drrs.dp = NULL;
5072 mutex_unlock(&dev_priv->drrs.mutex);
5073
5074 cancel_delayed_work_sync(&dev_priv->drrs.work);
5075}
5076
4e9ac947
VK
5077static void intel_edp_drrs_downclock_work(struct work_struct *work)
5078{
5079 struct drm_i915_private *dev_priv =
5080 container_of(work, typeof(*dev_priv), drrs.work.work);
5081 struct intel_dp *intel_dp;
5082
5083 mutex_lock(&dev_priv->drrs.mutex);
5084
5085 intel_dp = dev_priv->drrs.dp;
5086
5087 if (!intel_dp)
5088 goto unlock;
5089
439d7ac0 5090 /*
4e9ac947
VK
5091 * The delayed work can race with an invalidate hence we need to
5092 * recheck.
439d7ac0
PB
5093 */
5094
4e9ac947
VK
5095 if (dev_priv->drrs.busy_frontbuffer_bits)
5096 goto unlock;
439d7ac0 5097
4e9ac947
VK
5098 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5099 intel_dp_set_drrs_state(dev_priv->dev,
5100 intel_dp->attached_connector->panel.
5101 downclock_mode->vrefresh);
439d7ac0 5102
4e9ac947 5103unlock:
439d7ac0 5104
4e9ac947 5105 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5106}
5107
b33a2815
VK
5108/**
5109 * intel_edp_drrs_invalidate - Invalidate DRRS
5110 * @dev: DRM device
5111 * @frontbuffer_bits: frontbuffer plane tracking bits
5112 *
5113 * When there is a disturbance on screen (due to cursor movement/time
5114 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5115 * high RR.
5116 *
5117 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5118 */
a93fad0f
VK
5119void intel_edp_drrs_invalidate(struct drm_device *dev,
5120 unsigned frontbuffer_bits)
5121{
5122 struct drm_i915_private *dev_priv = dev->dev_private;
5123 struct drm_crtc *crtc;
5124 enum pipe pipe;
5125
5126 if (!dev_priv->drrs.dp)
5127 return;
5128
3954e733
R
5129 cancel_delayed_work_sync(&dev_priv->drrs.work);
5130
a93fad0f
VK
5131 mutex_lock(&dev_priv->drrs.mutex);
5132 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5133 pipe = to_intel_crtc(crtc)->pipe;
5134
5135 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5136 intel_dp_set_drrs_state(dev_priv->dev,
5137 dev_priv->drrs.dp->attached_connector->panel.
5138 fixed_mode->vrefresh);
5139 }
5140
5141 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5142
5143 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5144 mutex_unlock(&dev_priv->drrs.mutex);
5145}
5146
b33a2815
VK
5147/**
5148 * intel_edp_drrs_flush - Flush DRRS
5149 * @dev: DRM device
5150 * @frontbuffer_bits: frontbuffer plane tracking bits
5151 *
5152 * When there is no movement on screen, DRRS work can be scheduled.
5153 * This DRRS work is responsible for setting relevant registers after a
5154 * timeout of 1 second.
5155 *
5156 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5157 */
a93fad0f
VK
5158void intel_edp_drrs_flush(struct drm_device *dev,
5159 unsigned frontbuffer_bits)
5160{
5161 struct drm_i915_private *dev_priv = dev->dev_private;
5162 struct drm_crtc *crtc;
5163 enum pipe pipe;
5164
5165 if (!dev_priv->drrs.dp)
5166 return;
5167
3954e733
R
5168 cancel_delayed_work_sync(&dev_priv->drrs.work);
5169
a93fad0f
VK
5170 mutex_lock(&dev_priv->drrs.mutex);
5171 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5172 pipe = to_intel_crtc(crtc)->pipe;
5173 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5174
a93fad0f
VK
5175 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5176 !dev_priv->drrs.busy_frontbuffer_bits)
5177 schedule_delayed_work(&dev_priv->drrs.work,
5178 msecs_to_jiffies(1000));
5179 mutex_unlock(&dev_priv->drrs.mutex);
5180}
5181
b33a2815
VK
5182/**
5183 * DOC: Display Refresh Rate Switching (DRRS)
5184 *
5185 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5186 * which enables swtching between low and high refresh rates,
5187 * dynamically, based on the usage scenario. This feature is applicable
5188 * for internal panels.
5189 *
5190 * Indication that the panel supports DRRS is given by the panel EDID, which
5191 * would list multiple refresh rates for one resolution.
5192 *
5193 * DRRS is of 2 types - static and seamless.
5194 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5195 * (may appear as a blink on screen) and is used in dock-undock scenario.
5196 * Seamless DRRS involves changing RR without any visual effect to the user
5197 * and can be used during normal system usage. This is done by programming
5198 * certain registers.
5199 *
5200 * Support for static/seamless DRRS may be indicated in the VBT based on
5201 * inputs from the panel spec.
5202 *
5203 * DRRS saves power by switching to low RR based on usage scenarios.
5204 *
5205 * eDP DRRS:-
5206 * The implementation is based on frontbuffer tracking implementation.
5207 * When there is a disturbance on the screen triggered by user activity or a
5208 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5209 * When there is no movement on screen, after a timeout of 1 second, a switch
5210 * to low RR is made.
5211 * For integration with frontbuffer tracking code,
5212 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5213 *
5214 * DRRS can be further extended to support other internal panels and also
5215 * the scenario of video playback wherein RR is set based on the rate
5216 * requested by userspace.
5217 */
5218
5219/**
5220 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5221 * @intel_connector: eDP connector
5222 * @fixed_mode: preferred mode of panel
5223 *
5224 * This function is called only once at driver load to initialize basic
5225 * DRRS stuff.
5226 *
5227 * Returns:
5228 * Downclock mode if panel supports it, else return NULL.
5229 * DRRS support is determined by the presence of downclock mode (apart
5230 * from VBT setting).
5231 */
4f9db5b5 5232static struct drm_display_mode *
96178eeb
VK
5233intel_dp_drrs_init(struct intel_connector *intel_connector,
5234 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5235{
5236 struct drm_connector *connector = &intel_connector->base;
96178eeb 5237 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5238 struct drm_i915_private *dev_priv = dev->dev_private;
5239 struct drm_display_mode *downclock_mode = NULL;
5240
5241 if (INTEL_INFO(dev)->gen <= 6) {
5242 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5243 return NULL;
5244 }
5245
5246 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5247 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5248 return NULL;
5249 }
5250
5251 downclock_mode = intel_find_panel_downclock
5252 (dev, fixed_mode, connector);
5253
5254 if (!downclock_mode) {
a1d26342 5255 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5256 return NULL;
5257 }
5258
4e9ac947
VK
5259 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5260
96178eeb 5261 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5262
96178eeb 5263 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5264
96178eeb 5265 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5266 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5267 return downclock_mode;
5268}
5269
ed92f0b2 5270static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5271 struct intel_connector *intel_connector)
ed92f0b2
PZ
5272{
5273 struct drm_connector *connector = &intel_connector->base;
5274 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5275 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5276 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5277 struct drm_i915_private *dev_priv = dev->dev_private;
5278 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5279 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5280 bool has_dpcd;
5281 struct drm_display_mode *scan;
5282 struct edid *edid;
6517d273 5283 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5284
96178eeb 5285 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5286
ed92f0b2
PZ
5287 if (!is_edp(intel_dp))
5288 return true;
5289
49e6bc51
VS
5290 pps_lock(intel_dp);
5291 intel_edp_panel_vdd_sanitize(intel_dp);
5292 pps_unlock(intel_dp);
63635217 5293
ed92f0b2 5294 /* Cache DPCD and EDID for edp. */
ed92f0b2 5295 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5296
5297 if (has_dpcd) {
5298 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5299 dev_priv->no_aux_handshake =
5300 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5301 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5302 } else {
5303 /* if this fails, presume the device is a ghost */
5304 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5305 return false;
5306 }
5307
5308 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5309 pps_lock(intel_dp);
36b5f425 5310 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5311 pps_unlock(intel_dp);
ed92f0b2 5312
060c8778 5313 mutex_lock(&dev->mode_config.mutex);
0b99836f 5314 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5315 if (edid) {
5316 if (drm_add_edid_modes(connector, edid)) {
5317 drm_mode_connector_update_edid_property(connector,
5318 edid);
5319 drm_edid_to_eld(connector, edid);
5320 } else {
5321 kfree(edid);
5322 edid = ERR_PTR(-EINVAL);
5323 }
5324 } else {
5325 edid = ERR_PTR(-ENOENT);
5326 }
5327 intel_connector->edid = edid;
5328
5329 /* prefer fixed mode from EDID if available */
5330 list_for_each_entry(scan, &connector->probed_modes, head) {
5331 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5332 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5333 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5334 intel_connector, fixed_mode);
ed92f0b2
PZ
5335 break;
5336 }
5337 }
5338
5339 /* fallback to VBT if available for eDP */
5340 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5341 fixed_mode = drm_mode_duplicate(dev,
5342 dev_priv->vbt.lfp_lvds_vbt_mode);
5343 if (fixed_mode)
5344 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5345 }
060c8778 5346 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5347
01527b31
CT
5348 if (IS_VALLEYVIEW(dev)) {
5349 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5350 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5351
5352 /*
5353 * Figure out the current pipe for the initial backlight setup.
5354 * If the current pipe isn't valid, try the PPS pipe, and if that
5355 * fails just assume pipe A.
5356 */
5357 if (IS_CHERRYVIEW(dev))
5358 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5359 else
5360 pipe = PORT_TO_PIPE(intel_dp->DP);
5361
5362 if (pipe != PIPE_A && pipe != PIPE_B)
5363 pipe = intel_dp->pps_pipe;
5364
5365 if (pipe != PIPE_A && pipe != PIPE_B)
5366 pipe = PIPE_A;
5367
5368 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5369 pipe_name(pipe));
01527b31
CT
5370 }
5371
4f9db5b5 5372 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5373 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5374 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5375
5376 return true;
5377}
5378
16c25533 5379bool
f0fec3f2
PZ
5380intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5381 struct intel_connector *intel_connector)
a4fc5ed6 5382{
f0fec3f2
PZ
5383 struct drm_connector *connector = &intel_connector->base;
5384 struct intel_dp *intel_dp = &intel_dig_port->dp;
5385 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5386 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5387 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5388 enum port port = intel_dig_port->port;
0b99836f 5389 int type;
a4fc5ed6 5390
a4a5d2f8
VS
5391 intel_dp->pps_pipe = INVALID_PIPE;
5392
ec5b01dd 5393 /* intel_dp vfuncs */
b6b5e383
DL
5394 if (INTEL_INFO(dev)->gen >= 9)
5395 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5396 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5397 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5398 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5399 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5400 else if (HAS_PCH_SPLIT(dev))
5401 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5402 else
5403 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5404
b9ca5fad
DL
5405 if (INTEL_INFO(dev)->gen >= 9)
5406 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5407 else
5408 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5409
0767935e
DV
5410 /* Preserve the current hw state. */
5411 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5412 intel_dp->attached_connector = intel_connector;
3d3dc149 5413
3b32a35b 5414 if (intel_dp_is_edp(dev, port))
b329530c 5415 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5416 else
5417 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5418
f7d24902
ID
5419 /*
5420 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5421 * for DP the encoder type can be set by the caller to
5422 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5423 */
5424 if (type == DRM_MODE_CONNECTOR_eDP)
5425 intel_encoder->type = INTEL_OUTPUT_EDP;
5426
c17ed5b5
VS
5427 /* eDP only on port B and/or C on vlv/chv */
5428 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5429 port != PORT_B && port != PORT_C))
5430 return false;
5431
e7281eab
ID
5432 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5433 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5434 port_name(port));
5435
b329530c 5436 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5437 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5438
a4fc5ed6
KP
5439 connector->interlace_allowed = true;
5440 connector->doublescan_allowed = 0;
5441
f0fec3f2 5442 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5443 edp_panel_vdd_work);
a4fc5ed6 5444
df0e9248 5445 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5446 drm_connector_register(connector);
a4fc5ed6 5447
affa9354 5448 if (HAS_DDI(dev))
bcbc889b
PZ
5449 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5450 else
5451 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5452 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5453
0b99836f 5454 /* Set up the hotplug pin. */
ab9d7c30
PZ
5455 switch (port) {
5456 case PORT_A:
1d843f9d 5457 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5458 break;
5459 case PORT_B:
1d843f9d 5460 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5461 break;
5462 case PORT_C:
1d843f9d 5463 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5464 break;
5465 case PORT_D:
1d843f9d 5466 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5467 break;
5468 default:
ad1c0b19 5469 BUG();
5eb08b69
ZW
5470 }
5471
dada1a9f 5472 if (is_edp(intel_dp)) {
773538e8 5473 pps_lock(intel_dp);
1e74a324
VS
5474 intel_dp_init_panel_power_timestamps(intel_dp);
5475 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5476 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5477 else
36b5f425 5478 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5479 pps_unlock(intel_dp);
dada1a9f 5480 }
0095e6dc 5481
9d1a1031 5482 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5483
0e32b39c 5484 /* init MST on ports that can support it */
c86ea3d0 5485 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5486 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5487 intel_dp_mst_encoder_init(intel_dig_port,
5488 intel_connector->base.base.id);
0e32b39c
DA
5489 }
5490 }
5491
36b5f425 5492 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5493 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5494 if (is_edp(intel_dp)) {
5495 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5496 /*
5497 * vdd might still be enabled do to the delayed vdd off.
5498 * Make sure vdd is actually turned off here.
5499 */
773538e8 5500 pps_lock(intel_dp);
4be73780 5501 edp_panel_vdd_off_sync(intel_dp);
773538e8 5502 pps_unlock(intel_dp);
15b1d171 5503 }
34ea3d38 5504 drm_connector_unregister(connector);
b2f246a8 5505 drm_connector_cleanup(connector);
16c25533 5506 return false;
b2f246a8 5507 }
32f9d658 5508
f684960e
CW
5509 intel_dp_add_properties(intel_dp, connector);
5510
a4fc5ed6
KP
5511 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5512 * 0xd. Failure to do so will result in spurious interrupts being
5513 * generated on the port when a cable is not attached.
5514 */
5515 if (IS_G4X(dev) && !IS_GM45(dev)) {
5516 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5517 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5518 }
16c25533
PZ
5519
5520 return true;
a4fc5ed6 5521}
f0fec3f2
PZ
5522
5523void
5524intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5525{
13cf5504 5526 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5527 struct intel_digital_port *intel_dig_port;
5528 struct intel_encoder *intel_encoder;
5529 struct drm_encoder *encoder;
5530 struct intel_connector *intel_connector;
5531
b14c5679 5532 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5533 if (!intel_dig_port)
5534 return;
5535
b14c5679 5536 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5537 if (!intel_connector) {
5538 kfree(intel_dig_port);
5539 return;
5540 }
5541
5542 intel_encoder = &intel_dig_port->base;
5543 encoder = &intel_encoder->base;
5544
5545 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5546 DRM_MODE_ENCODER_TMDS);
5547
5bfe2ac0 5548 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5549 intel_encoder->disable = intel_disable_dp;
00c09d70 5550 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5551 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5552 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5553 if (IS_CHERRYVIEW(dev)) {
9197c88b 5554 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5555 intel_encoder->pre_enable = chv_pre_enable_dp;
5556 intel_encoder->enable = vlv_enable_dp;
580d3811 5557 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5558 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5559 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5560 intel_encoder->pre_enable = vlv_pre_enable_dp;
5561 intel_encoder->enable = vlv_enable_dp;
49277c31 5562 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5563 } else {
ecff4f3b
JN
5564 intel_encoder->pre_enable = g4x_pre_enable_dp;
5565 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5566 if (INTEL_INFO(dev)->gen >= 5)
5567 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5568 }
f0fec3f2 5569
174edf1f 5570 intel_dig_port->port = port;
f0fec3f2
PZ
5571 intel_dig_port->dp.output_reg = output_reg;
5572
00c09d70 5573 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5574 if (IS_CHERRYVIEW(dev)) {
5575 if (port == PORT_D)
5576 intel_encoder->crtc_mask = 1 << 2;
5577 else
5578 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5579 } else {
5580 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5581 }
bc079e8b 5582 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5583 intel_encoder->hot_plug = intel_dp_hot_plug;
5584
13cf5504
DA
5585 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5586 dev_priv->hpd_irq_port[port] = intel_dig_port;
5587
15b1d171
PZ
5588 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5589 drm_encoder_cleanup(encoder);
5590 kfree(intel_dig_port);
b2f246a8 5591 kfree(intel_connector);
15b1d171 5592 }
f0fec3f2 5593}
0e32b39c
DA
5594
5595void intel_dp_mst_suspend(struct drm_device *dev)
5596{
5597 struct drm_i915_private *dev_priv = dev->dev_private;
5598 int i;
5599
5600 /* disable MST */
5601 for (i = 0; i < I915_MAX_PORTS; i++) {
5602 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5603 if (!intel_dig_port)
5604 continue;
5605
5606 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5607 if (!intel_dig_port->dp.can_mst)
5608 continue;
5609 if (intel_dig_port->dp.is_mst)
5610 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5611 }
5612 }
5613}
5614
5615void intel_dp_mst_resume(struct drm_device *dev)
5616{
5617 struct drm_i915_private *dev_priv = dev->dev_private;
5618 int i;
5619
5620 for (i = 0; i < I915_MAX_PORTS; i++) {
5621 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5622 if (!intel_dig_port)
5623 continue;
5624 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5625 int ret;
5626
5627 if (!intel_dig_port->dp.can_mst)
5628 continue;
5629
5630 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5631 if (ret != 0) {
5632 intel_dp_check_mst_status(&intel_dig_port->dp);
5633 }
5634 }
5635 }
5636}