]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Add eDP intermediate frequencies for CHV
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
fe51bfb9
VS
90static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
f4896f15 93static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 94
cfcb0fc9
JB
95/**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102static bool is_edp(struct intel_dp *intel_dp)
103{
da63a9f2
PZ
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
107}
108
68b4d824 109static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 110{
68b4d824
ID
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
114}
115
df0e9248
CW
116static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117{
fa90ecef 118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
119}
120
ea5b213a 121static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 122static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 123static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 124static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
125static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
a4fc5ed6 127
ed4e9c1d
VS
128static int
129intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 130{
7183dc29 131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
1db10e28 136 case DP_LINK_BW_5_4:
d4eead50 137 break;
a4fc5ed6 138 default:
d4eead50
ID
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
a4fc5ed6
KP
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145}
146
eeb6324d
PZ
147static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148{
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161}
162
cd9dde44
AJ
163/*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
a4fc5ed6 180static int
c898261c 181intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 182{
cd9dde44 183 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
184}
185
fe27d53e
DA
186static int
187intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188{
189 return (max_link_clock * max_lanes * 8) / 10;
190}
191
c19de8eb 192static enum drm_mode_status
a4fc5ed6
KP
193intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195{
df0e9248 196 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 201
dd06f90e
JN
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
204 return MODE_PANEL;
205
dd06f90e 206 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 207 return MODE_PANEL;
03afc4a2
DV
208
209 target_clock = fixed_mode->clock;
7de56f43
ZY
210 }
211
50fec21a 212 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 213 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
c4867936 219 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
0af78a2b
DV
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
a4fc5ed6
KP
227 return MODE_OK;
228}
229
a4f1289e 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
231{
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240}
241
c2af70e2 242static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
243{
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249}
250
fb0f8fbf
KP
251/* hrawclock is 1/4 the FSB frequency */
252static int
253intel_hrawclk(struct drm_device *dev)
254{
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
9473c8f4
VP
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
fb0f8fbf
KP
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283}
284
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b
JN
288static void
289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 290 struct intel_dp *intel_dp);
bf13e81b 291
773538e8
VS
292static void pps_lock(struct intel_dp *intel_dp)
293{
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308}
309
310static void pps_unlock(struct intel_dp *intel_dp)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322}
323
961a0db0
VS
324static void
325vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326{
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 331 bool pll_enabled;
961a0db0
VS
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
d288f65f
VS
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
961a0db0
VS
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
961a0db0
VS
382}
383
bf13e81b
JN
384static enum pipe
385vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386{
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 392 enum pipe pipe;
bf13e81b 393
e39b999a 394 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 395
a8c3344e
VS
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
a4a5d2f8
VS
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
a8c3344e
VS
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
a4a5d2f8 427
a8c3344e
VS
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
36b5f425
VS
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 438
961a0db0
VS
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
444
445 return intel_dp->pps_pipe;
446}
447
6491ab27
VS
448typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453{
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455}
456
457static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461}
462
463static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return true;
467}
bf13e81b 468
a4a5d2f8 469static enum pipe
6491ab27
VS
470vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
a4a5d2f8
VS
473{
474 enum pipe pipe;
bf13e81b 475
bf13e81b
JN
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
6491ab27
VS
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
a4a5d2f8 486 return pipe;
bf13e81b
JN
487 }
488
a4a5d2f8
VS
489 return INVALID_PIPE;
490}
491
492static void
493vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494{
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
6491ab27
VS
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
a4a5d2f8
VS
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
bf13e81b
JN
520 }
521
a4a5d2f8
VS
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
36b5f425
VS
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
527}
528
773538e8
VS
529void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530{
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
bf13e81b
JN
556}
557
558static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566}
567
568static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569{
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576}
577
01527b31
CT
578/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582{
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
773538e8 593 pps_lock(intel_dp);
e39b999a 594
01527b31 595 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
01527b31
CT
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
773538e8 609 pps_unlock(intel_dp);
e39b999a 610
01527b31
CT
611 return 0;
612}
613
4be73780 614static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 615{
30add22d 616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
e39b999a
VS
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
9a42356b
VS
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
bf13e81b 625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
626}
627
4be73780 628static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 629{
30add22d 630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
e39b999a
VS
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
9a42356b
VS
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
773538e8 639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
640}
641
9b984dae
KP
642static void
643intel_dp_check_edp(struct intel_dp *intel_dp)
644{
30add22d 645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 646 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 647
9b984dae
KP
648 if (!is_edp(intel_dp))
649 return;
453c5420 650
4be73780 651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
656 }
657}
658
9ee32fea
DV
659static uint32_t
660intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661{
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
666 uint32_t status;
667 bool done;
668
ef04f00d 669#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 670 if (has_aux_irq)
b18ac466 671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 672 msecs_to_jiffies_timeout(10));
9ee32fea
DV
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678#undef C
679
680 return status;
681}
682
ec5b01dd 683static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 684{
174edf1f
PZ
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 687
ec5b01dd
DL
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 691 */
ec5b01dd
DL
692 return index ? 0 : intel_hrawclk(dev) / 2;
693}
694
695static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 if (index)
701 return 0;
702
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 706 else
b84a1cf8 707 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
708 } else {
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710 }
711}
712
713static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714{
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
718
719 if (intel_dig_port->port == PORT_A) {
720 if (index)
721 return 0;
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
bc86625a
CW
725 switch (index) {
726 case 0: return 63;
727 case 1: return 72;
728 default: return 0;
729 }
ec5b01dd 730 } else {
bc86625a 731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 732 }
b84a1cf8
RV
733}
734
ec5b01dd
DL
735static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736{
737 return index ? 0 : 100;
738}
739
b6b5e383
DL
740static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741{
742 /*
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 */
747 return index ? 0 : 1;
748}
749
5ed12a19
DL
750static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751 bool has_aux_irq,
752 int send_bytes,
753 uint32_t aux_clock_divider)
754{
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
758
759 if (IS_GEN6(dev))
760 precharge = 3;
761 else
762 precharge = 5;
763
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766 else
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 770 DP_AUX_CH_CTL_DONE |
5ed12a19 771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 773 timeout |
788d4433 774 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
778}
779
b9ca5fad
DL
780static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 bool has_aux_irq,
782 int send_bytes,
783 uint32_t unused)
784{
785 return DP_AUX_CH_CTL_SEND_BUSY |
786 DP_AUX_CH_CTL_DONE |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793}
794
b84a1cf8
RV
795static int
796intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 797 const uint8_t *send, int send_bytes,
b84a1cf8
RV
798 uint8_t *recv, int recv_size)
799{
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
bc86625a 805 uint32_t aux_clock_divider;
b84a1cf8
RV
806 int i, ret, recv_bytes;
807 uint32_t status;
5ed12a19 808 int try, clock = 0;
4e6b788c 809 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
810 bool vdd;
811
773538e8 812 pps_lock(intel_dp);
e39b999a 813
72c3500a
VS
814 /*
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818 * ourselves.
819 */
1e0560e0 820 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
821
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
824 * deep sleep states.
825 */
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828 intel_dp_check_edp(intel_dp);
5eb08b69 829
c67a470b
PZ
830 intel_aux_display_runtime_get(dev_priv);
831
11bee43e
JB
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
ef04f00d 834 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 break;
837 msleep(1);
838 }
839
840 if (try == 3) {
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 I915_READ(ch_ctl));
9ee32fea
DV
843 ret = -EBUSY;
844 goto out;
4f7f7b7e
CW
845 }
846
46a5ae9f
PZ
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 ret = -E2BIG;
850 goto out;
851 }
852
ec5b01dd 853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 has_aux_irq,
856 send_bytes,
857 aux_clock_divider);
5ed12a19 858
bc86625a
CW
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
a4f1289e
RV
864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
bc86625a
CW
866
867 /* Send the command and wait for it to complete */
5ed12a19 868 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
869
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872 /* Clear done status and any errors */
873 I915_WRITE(ch_ctl,
874 status |
875 DP_AUX_CH_CTL_DONE |
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue;
882 if (status & DP_AUX_CH_CTL_DONE)
883 break;
884 }
4f7f7b7e 885 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
886 break;
887 }
888
a4fc5ed6 889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
891 ret = -EBUSY;
892 goto out;
a4fc5ed6
KP
893 }
894
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
897 */
a5b3da54 898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
900 ret = -EIO;
901 goto out;
a5b3da54 902 }
1ae8c0a5
KP
903
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
a5b3da54 906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
908 ret = -ETIMEDOUT;
909 goto out;
a4fc5ed6
KP
910 }
911
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
0206e353 917
4f7f7b7e 918 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
a4fc5ed6 921
9ee32fea
DV
922 ret = recv_bytes;
923out:
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 925 intel_aux_display_runtime_put(dev_priv);
9ee32fea 926
884f19e9
JN
927 if (vdd)
928 edp_panel_vdd_off(intel_dp, false);
929
773538e8 930 pps_unlock(intel_dp);
e39b999a 931
9ee32fea 932 return ret;
a4fc5ed6
KP
933}
934
a6c8aff0
JN
935#define BARE_ADDRESS_SIZE 3
936#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
937static ssize_t
938intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 939{
9d1a1031
JN
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
a4fc5ed6 943 int ret;
a4fc5ed6 944
9d1a1031
JN
945 txbuf[0] = msg->request << 4;
946 txbuf[1] = msg->address >> 8;
947 txbuf[2] = msg->address & 0xff;
948 txbuf[3] = msg->size - 1;
46a5ae9f 949
9d1a1031
JN
950 switch (msg->request & ~DP_AUX_I2C_MOT) {
951 case DP_AUX_NATIVE_WRITE:
952 case DP_AUX_I2C_WRITE:
a6c8aff0 953 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 954 rxsize = 1;
f51a44b9 955
9d1a1031
JN
956 if (WARN_ON(txsize > 20))
957 return -E2BIG;
a4fc5ed6 958
9d1a1031 959 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 960
9d1a1031
JN
961 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
962 if (ret > 0) {
963 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 964
9d1a1031
JN
965 /* Return payload size. */
966 ret = msg->size;
967 }
968 break;
46a5ae9f 969
9d1a1031
JN
970 case DP_AUX_NATIVE_READ:
971 case DP_AUX_I2C_READ:
a6c8aff0 972 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 973 rxsize = msg->size + 1;
a4fc5ed6 974
9d1a1031
JN
975 if (WARN_ON(rxsize > 20))
976 return -E2BIG;
a4fc5ed6 977
9d1a1031
JN
978 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
979 if (ret > 0) {
980 msg->reply = rxbuf[0] >> 4;
981 /*
982 * Assume happy day, and copy the data. The caller is
983 * expected to check msg->reply before touching it.
984 *
985 * Return payload size.
986 */
987 ret--;
988 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 989 }
9d1a1031
JN
990 break;
991
992 default:
993 ret = -EINVAL;
994 break;
a4fc5ed6 995 }
f51a44b9 996
9d1a1031 997 return ret;
a4fc5ed6
KP
998}
999
9d1a1031
JN
1000static void
1001intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1002{
1003 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1004 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1005 enum port port = intel_dig_port->port;
0b99836f 1006 const char *name = NULL;
ab2c0672
DA
1007 int ret;
1008
33ad6626
JN
1009 switch (port) {
1010 case PORT_A:
1011 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1012 name = "DPDDC-A";
ab2c0672 1013 break;
33ad6626
JN
1014 case PORT_B:
1015 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1016 name = "DPDDC-B";
ab2c0672 1017 break;
33ad6626
JN
1018 case PORT_C:
1019 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1020 name = "DPDDC-C";
ab2c0672 1021 break;
33ad6626
JN
1022 case PORT_D:
1023 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1024 name = "DPDDC-D";
33ad6626
JN
1025 break;
1026 default:
1027 BUG();
ab2c0672
DA
1028 }
1029
1b1aad75
DL
1030 /*
1031 * The AUX_CTL register is usually DP_CTL + 0x10.
1032 *
1033 * On Haswell and Broadwell though:
1034 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1035 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1036 *
1037 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1038 */
1039 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1040 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1041
0b99836f 1042 intel_dp->aux.name = name;
9d1a1031
JN
1043 intel_dp->aux.dev = dev->dev;
1044 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1045
0b99836f
JN
1046 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1047 connector->base.kdev->kobj.name);
8316f337 1048
4f71d0cb 1049 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1050 if (ret < 0) {
4f71d0cb 1051 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1052 name, ret);
1053 return;
ab2c0672 1054 }
8a5e6aeb 1055
0b99836f
JN
1056 ret = sysfs_create_link(&connector->base.kdev->kobj,
1057 &intel_dp->aux.ddc.dev.kobj,
1058 intel_dp->aux.ddc.dev.kobj.name);
1059 if (ret < 0) {
1060 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1061 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1062 }
a4fc5ed6
KP
1063}
1064
80f65de3
ID
1065static void
1066intel_dp_connector_unregister(struct intel_connector *intel_connector)
1067{
1068 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1069
0e32b39c
DA
1070 if (!intel_connector->mst_port)
1071 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1072 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1073 intel_connector_unregister(intel_connector);
1074}
1075
5416d871 1076static void
c3346ef6 1077skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1078{
1079 u32 ctrl1;
1080
1081 pipe_config->ddi_pll_sel = SKL_DPLL0;
1082 pipe_config->dpll_hw_state.cfgcr1 = 0;
1083 pipe_config->dpll_hw_state.cfgcr2 = 0;
1084
1085 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1086 switch (link_clock / 2) {
1087 case 81000:
5416d871
DL
1088 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1089 SKL_DPLL0);
1090 break;
c3346ef6 1091 case 135000:
5416d871
DL
1092 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1093 SKL_DPLL0);
1094 break;
c3346ef6 1095 case 270000:
5416d871
DL
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1097 SKL_DPLL0);
1098 break;
c3346ef6
SJ
1099 case 162000:
1100 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1101 SKL_DPLL0);
1102 break;
1103 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1104 results in CDCLK change. Need to handle the change of CDCLK by
1105 disabling pipes and re-enabling them */
1106 case 108000:
1107 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1108 SKL_DPLL0);
1109 break;
1110 case 216000:
1111 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1112 SKL_DPLL0);
1113 break;
1114
5416d871
DL
1115 }
1116 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1117}
1118
0e50338c 1119static void
5cec258b 1120hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1121{
1122 switch (link_bw) {
1123 case DP_LINK_BW_1_62:
1124 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1125 break;
1126 case DP_LINK_BW_2_7:
1127 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1128 break;
1129 case DP_LINK_BW_5_4:
1130 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1131 break;
1132 }
1133}
1134
fc0f8e25 1135static int
12f6a2e2 1136intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1137{
12f6a2e2
VS
1138 if (intel_dp->num_supported_rates) {
1139 *sink_rates = intel_dp->supported_rates;
ea2d8a42 1140 return intel_dp->num_supported_rates;
fc0f8e25 1141 }
12f6a2e2
VS
1142
1143 *sink_rates = default_rates;
1144
1145 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1146}
1147
a8f3ef61 1148static int
1db10e28 1149intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1150{
636280ba
VS
1151 if (INTEL_INFO(dev)->gen >= 9) {
1152 *source_rates = gen9_rates;
1153 return ARRAY_SIZE(gen9_rates);
fe51bfb9
VS
1154 } else if (IS_CHERRYVIEW(dev)) {
1155 *source_rates = chv_rates;
1156 return ARRAY_SIZE(chv_rates);
a8f3ef61 1157 }
636280ba
VS
1158
1159 *source_rates = default_rates;
1160
1db10e28
VS
1161 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1162 /* WaDisableHBR2:skl */
1163 return (DP_LINK_BW_2_7 >> 3) + 1;
1164 else if (INTEL_INFO(dev)->gen >= 8 ||
1165 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1166 return (DP_LINK_BW_5_4 >> 3) + 1;
1167 else
1168 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1169}
1170
c6bb3538
DV
1171static void
1172intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1173 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1174{
1175 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1176 const struct dp_link_dpll *divisor = NULL;
1177 int i, count = 0;
c6bb3538
DV
1178
1179 if (IS_G4X(dev)) {
9dd4ffdf
CML
1180 divisor = gen4_dpll;
1181 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1182 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1183 divisor = pch_dpll;
1184 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1185 } else if (IS_CHERRYVIEW(dev)) {
1186 divisor = chv_dpll;
1187 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1188 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1189 divisor = vlv_dpll;
1190 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1191 }
9dd4ffdf
CML
1192
1193 if (divisor && count) {
1194 for (i = 0; i < count; i++) {
1195 if (link_bw == divisor[i].link_bw) {
1196 pipe_config->dpll = divisor[i].dpll;
1197 pipe_config->clock_set = true;
1198 break;
1199 }
1200 }
c6bb3538
DV
1201 }
1202}
1203
2ecae76a
VS
1204static int intersect_rates(const int *source_rates, int source_len,
1205 const int *sink_rates, int sink_len,
1206 int *supported_rates)
a8f3ef61
SJ
1207{
1208 int i = 0, j = 0, k = 0;
1209
a8f3ef61
SJ
1210 while (i < source_len && j < sink_len) {
1211 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1212 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1213 return k;
a8f3ef61
SJ
1214 supported_rates[k] = source_rates[i];
1215 ++k;
1216 ++i;
1217 ++j;
1218 } else if (source_rates[i] < sink_rates[j]) {
1219 ++i;
1220 } else {
1221 ++j;
1222 }
1223 }
1224 return k;
1225}
1226
2ecae76a
VS
1227static int intel_supported_rates(struct intel_dp *intel_dp,
1228 int *supported_rates)
1229{
1230 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1231 const int *source_rates, *sink_rates;
1232 int source_len, sink_len;
1233
1234 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1235 source_len = intel_dp_source_rates(dev, &source_rates);
1236
1237 return intersect_rates(source_rates, source_len,
1238 sink_rates, sink_len,
1239 supported_rates);
1240}
1241
f4896f15 1242static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1243{
1244 int i = 0;
1245
1246 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1247 if (find == rates[i])
1248 break;
1249
1250 return i;
1251}
1252
50fec21a
VS
1253int
1254intel_dp_max_link_rate(struct intel_dp *intel_dp)
1255{
1256 int rates[DP_MAX_SUPPORTED_RATES] = {};
1257 int len;
1258
1259 len = intel_supported_rates(intel_dp, rates);
1260 if (WARN_ON(len <= 0))
1261 return 162000;
1262
1263 return rates[rate_to_index(0, rates) - 1];
1264}
1265
ed4e9c1d
VS
1266int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1267{
1268 return rate_to_index(rate, intel_dp->supported_rates);
1269}
1270
00c09d70 1271bool
5bfe2ac0 1272intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1273 struct intel_crtc_state *pipe_config)
a4fc5ed6 1274{
5bfe2ac0 1275 struct drm_device *dev = encoder->base.dev;
36008365 1276 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1277 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1278 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1279 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1280 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1281 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1282 int lane_count, clock;
56071a20 1283 int min_lane_count = 1;
eeb6324d 1284 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1285 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1286 int min_clock = 0;
a8f3ef61 1287 int max_clock;
083f9560 1288 int bpp, mode_rate;
ff9a6750 1289 int link_avail, link_clock;
2ecae76a
VS
1290 int supported_rates[DP_MAX_SUPPORTED_RATES] = {};
1291 int supported_len;
a8f3ef61 1292
2ecae76a 1293 supported_len = intel_supported_rates(intel_dp, supported_rates);
a8f3ef61
SJ
1294
1295 /* No common link rates between source and sink */
1296 WARN_ON(supported_len <= 0);
1297
1298 max_clock = supported_len - 1;
a4fc5ed6 1299
bc7d38a4 1300 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1301 pipe_config->has_pch_encoder = true;
1302
03afc4a2 1303 pipe_config->has_dp_encoder = true;
f769cd24 1304 pipe_config->has_drrs = false;
9ed109a7 1305 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1306
dd06f90e
JN
1307 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1308 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1309 adjusted_mode);
2dd24552
JB
1310 if (!HAS_PCH_SPLIT(dev))
1311 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1312 intel_connector->panel.fitting_mode);
1313 else
b074cec8
JB
1314 intel_pch_panel_fitting(intel_crtc, pipe_config,
1315 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1316 }
1317
cb1793ce 1318 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1319 return false;
1320
083f9560 1321 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61
SJ
1322 "max bw %d pixel clock %iKHz\n",
1323 max_lane_count, supported_rates[max_clock],
241bfc38 1324 adjusted_mode->crtc_clock);
083f9560 1325
36008365
DV
1326 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1327 * bpc in between. */
3e7ca985 1328 bpp = pipe_config->pipe_bpp;
56071a20
JN
1329 if (is_edp(intel_dp)) {
1330 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1331 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1332 dev_priv->vbt.edp_bpp);
1333 bpp = dev_priv->vbt.edp_bpp;
1334 }
1335
344c5bbc
JN
1336 /*
1337 * Use the maximum clock and number of lanes the eDP panel
1338 * advertizes being capable of. The panels are generally
1339 * designed to support only a single clock and lane
1340 * configuration, and typically these values correspond to the
1341 * native resolution of the panel.
1342 */
1343 min_lane_count = max_lane_count;
1344 min_clock = max_clock;
7984211e 1345 }
657445fe 1346
36008365 1347 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1348 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1349 bpp);
36008365 1350
c6930992 1351 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1352 for (lane_count = min_lane_count;
1353 lane_count <= max_lane_count;
1354 lane_count <<= 1) {
1355
1356 link_clock = supported_rates[clock];
36008365
DV
1357 link_avail = intel_dp_max_data_rate(link_clock,
1358 lane_count);
1359
1360 if (mode_rate <= link_avail) {
1361 goto found;
1362 }
1363 }
1364 }
1365 }
c4867936 1366
36008365 1367 return false;
3685a8f3 1368
36008365 1369found:
55bc60db
VS
1370 if (intel_dp->color_range_auto) {
1371 /*
1372 * See:
1373 * CEA-861-E - 5.1 Default Encoding Parameters
1374 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1375 */
18316c8c 1376 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1377 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1378 else
1379 intel_dp->color_range = 0;
1380 }
1381
3685a8f3 1382 if (intel_dp->color_range)
50f3b016 1383 pipe_config->limited_color_range = true;
a4fc5ed6 1384
36008365 1385 intel_dp->lane_count = lane_count;
a8f3ef61 1386
bc27b7d3
VS
1387 if (intel_dp->num_supported_rates) {
1388 intel_dp->link_bw = 0;
a8f3ef61 1389 intel_dp->rate_select =
ed4e9c1d 1390 intel_dp_rate_select(intel_dp, supported_rates[clock]);
bc27b7d3
VS
1391 } else {
1392 intel_dp->link_bw =
1393 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1394 intel_dp->rate_select = 0;
a8f3ef61
SJ
1395 }
1396
657445fe 1397 pipe_config->pipe_bpp = bpp;
a8f3ef61 1398 pipe_config->port_clock = supported_rates[clock];
a4fc5ed6 1399
36008365
DV
1400 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1401 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1402 pipe_config->port_clock, bpp);
36008365
DV
1403 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1404 mode_rate, link_avail);
a4fc5ed6 1405
03afc4a2 1406 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1407 adjusted_mode->crtc_clock,
1408 pipe_config->port_clock,
03afc4a2 1409 &pipe_config->dp_m_n);
9d1a455b 1410
439d7ac0 1411 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1412 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1413 pipe_config->has_drrs = true;
439d7ac0
PB
1414 intel_link_compute_m_n(bpp, lane_count,
1415 intel_connector->panel.downclock_mode->clock,
1416 pipe_config->port_clock,
1417 &pipe_config->dp_m2_n2);
1418 }
1419
5416d871 1420 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
c3346ef6 1421 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
5416d871 1422 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1423 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1424 else
1425 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1426
03afc4a2 1427 return true;
a4fc5ed6
KP
1428}
1429
7c62a164 1430static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1431{
7c62a164
DV
1432 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1433 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1434 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1435 struct drm_i915_private *dev_priv = dev->dev_private;
1436 u32 dpa_ctl;
1437
6e3c9717
ACO
1438 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1439 crtc->config->port_clock);
ea9b6006
DV
1440 dpa_ctl = I915_READ(DP_A);
1441 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1442
6e3c9717 1443 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1444 /* For a long time we've carried around a ILK-DevA w/a for the
1445 * 160MHz clock. If we're really unlucky, it's still required.
1446 */
1447 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1448 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1449 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1450 } else {
1451 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1452 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1453 }
1ce17038 1454
ea9b6006
DV
1455 I915_WRITE(DP_A, dpa_ctl);
1456
1457 POSTING_READ(DP_A);
1458 udelay(500);
1459}
1460
8ac33ed3 1461static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1462{
b934223d 1463 struct drm_device *dev = encoder->base.dev;
417e822d 1464 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1465 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1466 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1467 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1468 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1469
417e822d 1470 /*
1a2eb460 1471 * There are four kinds of DP registers:
417e822d
KP
1472 *
1473 * IBX PCH
1a2eb460
KP
1474 * SNB CPU
1475 * IVB CPU
417e822d
KP
1476 * CPT PCH
1477 *
1478 * IBX PCH and CPU are the same for almost everything,
1479 * except that the CPU DP PLL is configured in this
1480 * register
1481 *
1482 * CPT PCH is quite different, having many bits moved
1483 * to the TRANS_DP_CTL register instead. That
1484 * configuration happens (oddly) in ironlake_pch_enable
1485 */
9c9e7927 1486
417e822d
KP
1487 /* Preserve the BIOS-computed detected bit. This is
1488 * supposed to be read-only.
1489 */
1490 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1491
417e822d 1492 /* Handle DP bits in common between all three register formats */
417e822d 1493 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1494 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1495
6e3c9717 1496 if (crtc->config->has_audio)
ea5b213a 1497 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1498
417e822d 1499 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1500
bc7d38a4 1501 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1502 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1503 intel_dp->DP |= DP_SYNC_HS_HIGH;
1504 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1505 intel_dp->DP |= DP_SYNC_VS_HIGH;
1506 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1507
6aba5b6c 1508 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1509 intel_dp->DP |= DP_ENHANCED_FRAMING;
1510
7c62a164 1511 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1512 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1513 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1514 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1515
1516 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1517 intel_dp->DP |= DP_SYNC_HS_HIGH;
1518 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1519 intel_dp->DP |= DP_SYNC_VS_HIGH;
1520 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1521
6aba5b6c 1522 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1523 intel_dp->DP |= DP_ENHANCED_FRAMING;
1524
44f37d1f
CML
1525 if (!IS_CHERRYVIEW(dev)) {
1526 if (crtc->pipe == 1)
1527 intel_dp->DP |= DP_PIPEB_SELECT;
1528 } else {
1529 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1530 }
417e822d
KP
1531 } else {
1532 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1533 }
a4fc5ed6
KP
1534}
1535
ffd6749d
PZ
1536#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1537#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1538
1a5ef5b7
PZ
1539#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1540#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1541
ffd6749d
PZ
1542#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1543#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1544
4be73780 1545static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1546 u32 mask,
1547 u32 value)
bd943159 1548{
30add22d 1549 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1550 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1551 u32 pp_stat_reg, pp_ctrl_reg;
1552
e39b999a
VS
1553 lockdep_assert_held(&dev_priv->pps_mutex);
1554
bf13e81b
JN
1555 pp_stat_reg = _pp_stat_reg(intel_dp);
1556 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1557
99ea7127 1558 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1559 mask, value,
1560 I915_READ(pp_stat_reg),
1561 I915_READ(pp_ctrl_reg));
32ce697c 1562
453c5420 1563 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1564 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1565 I915_READ(pp_stat_reg),
1566 I915_READ(pp_ctrl_reg));
32ce697c 1567 }
54c136d4
CW
1568
1569 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1570}
32ce697c 1571
4be73780 1572static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1573{
1574 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1575 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1576}
1577
4be73780 1578static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1579{
1580 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1581 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1582}
1583
4be73780 1584static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1585{
1586 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1587
1588 /* When we disable the VDD override bit last we have to do the manual
1589 * wait. */
1590 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1591 intel_dp->panel_power_cycle_delay);
1592
4be73780 1593 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1594}
1595
4be73780 1596static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1597{
1598 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1599 intel_dp->backlight_on_delay);
1600}
1601
4be73780 1602static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1603{
1604 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1605 intel_dp->backlight_off_delay);
1606}
99ea7127 1607
832dd3c1
KP
1608/* Read the current pp_control value, unlocking the register if it
1609 * is locked
1610 */
1611
453c5420 1612static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1613{
453c5420
JB
1614 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1615 struct drm_i915_private *dev_priv = dev->dev_private;
1616 u32 control;
832dd3c1 1617
e39b999a
VS
1618 lockdep_assert_held(&dev_priv->pps_mutex);
1619
bf13e81b 1620 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1621 control &= ~PANEL_UNLOCK_MASK;
1622 control |= PANEL_UNLOCK_REGS;
1623 return control;
bd943159
KP
1624}
1625
951468f3
VS
1626/*
1627 * Must be paired with edp_panel_vdd_off().
1628 * Must hold pps_mutex around the whole on/off sequence.
1629 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1630 */
1e0560e0 1631static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1632{
30add22d 1633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1634 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1635 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1636 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1637 enum intel_display_power_domain power_domain;
5d613501 1638 u32 pp;
453c5420 1639 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1640 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1641
e39b999a
VS
1642 lockdep_assert_held(&dev_priv->pps_mutex);
1643
97af61f5 1644 if (!is_edp(intel_dp))
adddaaf4 1645 return false;
bd943159 1646
2c623c11 1647 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1648 intel_dp->want_panel_vdd = true;
99ea7127 1649
4be73780 1650 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1651 return need_to_disable;
b0665d57 1652
4e6e1a54
ID
1653 power_domain = intel_display_port_power_domain(intel_encoder);
1654 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1655
3936fcf4
VS
1656 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1657 port_name(intel_dig_port->port));
bd943159 1658
4be73780
DV
1659 if (!edp_have_panel_power(intel_dp))
1660 wait_panel_power_cycle(intel_dp);
99ea7127 1661
453c5420 1662 pp = ironlake_get_pp_control(intel_dp);
5d613501 1663 pp |= EDP_FORCE_VDD;
ebf33b18 1664
bf13e81b
JN
1665 pp_stat_reg = _pp_stat_reg(intel_dp);
1666 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1667
1668 I915_WRITE(pp_ctrl_reg, pp);
1669 POSTING_READ(pp_ctrl_reg);
1670 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1671 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1672 /*
1673 * If the panel wasn't on, delay before accessing aux channel
1674 */
4be73780 1675 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1676 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1677 port_name(intel_dig_port->port));
f01eca2e 1678 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1679 }
adddaaf4
JN
1680
1681 return need_to_disable;
1682}
1683
951468f3
VS
1684/*
1685 * Must be paired with intel_edp_panel_vdd_off() or
1686 * intel_edp_panel_off().
1687 * Nested calls to these functions are not allowed since
1688 * we drop the lock. Caller must use some higher level
1689 * locking to prevent nested calls from other threads.
1690 */
b80d6c78 1691void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1692{
c695b6b6 1693 bool vdd;
adddaaf4 1694
c695b6b6
VS
1695 if (!is_edp(intel_dp))
1696 return;
1697
773538e8 1698 pps_lock(intel_dp);
c695b6b6 1699 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1700 pps_unlock(intel_dp);
c695b6b6 1701
e2c719b7 1702 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1703 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1704}
1705
4be73780 1706static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1707{
30add22d 1708 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1709 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1710 struct intel_digital_port *intel_dig_port =
1711 dp_to_dig_port(intel_dp);
1712 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1713 enum intel_display_power_domain power_domain;
5d613501 1714 u32 pp;
453c5420 1715 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1716
e39b999a 1717 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1718
15e899a0 1719 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1720
15e899a0 1721 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1722 return;
b0665d57 1723
3936fcf4
VS
1724 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1725 port_name(intel_dig_port->port));
bd943159 1726
be2c9196
VS
1727 pp = ironlake_get_pp_control(intel_dp);
1728 pp &= ~EDP_FORCE_VDD;
453c5420 1729
be2c9196
VS
1730 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1731 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1732
be2c9196
VS
1733 I915_WRITE(pp_ctrl_reg, pp);
1734 POSTING_READ(pp_ctrl_reg);
90791a5c 1735
be2c9196
VS
1736 /* Make sure sequencer is idle before allowing subsequent activity */
1737 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1738 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1739
be2c9196
VS
1740 if ((pp & POWER_TARGET_ON) == 0)
1741 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1742
be2c9196
VS
1743 power_domain = intel_display_port_power_domain(intel_encoder);
1744 intel_display_power_put(dev_priv, power_domain);
bd943159 1745}
5d613501 1746
4be73780 1747static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1748{
1749 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1750 struct intel_dp, panel_vdd_work);
bd943159 1751
773538e8 1752 pps_lock(intel_dp);
15e899a0
VS
1753 if (!intel_dp->want_panel_vdd)
1754 edp_panel_vdd_off_sync(intel_dp);
773538e8 1755 pps_unlock(intel_dp);
bd943159
KP
1756}
1757
aba86890
ID
1758static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1759{
1760 unsigned long delay;
1761
1762 /*
1763 * Queue the timer to fire a long time from now (relative to the power
1764 * down delay) to keep the panel power up across a sequence of
1765 * operations.
1766 */
1767 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1768 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1769}
1770
951468f3
VS
1771/*
1772 * Must be paired with edp_panel_vdd_on().
1773 * Must hold pps_mutex around the whole on/off sequence.
1774 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1775 */
4be73780 1776static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1777{
e39b999a
VS
1778 struct drm_i915_private *dev_priv =
1779 intel_dp_to_dev(intel_dp)->dev_private;
1780
1781 lockdep_assert_held(&dev_priv->pps_mutex);
1782
97af61f5
KP
1783 if (!is_edp(intel_dp))
1784 return;
5d613501 1785
e2c719b7 1786 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1787 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1788
bd943159
KP
1789 intel_dp->want_panel_vdd = false;
1790
aba86890 1791 if (sync)
4be73780 1792 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1793 else
1794 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1795}
1796
9f0fb5be 1797static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1798{
30add22d 1799 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1800 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1801 u32 pp;
453c5420 1802 u32 pp_ctrl_reg;
9934c132 1803
9f0fb5be
VS
1804 lockdep_assert_held(&dev_priv->pps_mutex);
1805
97af61f5 1806 if (!is_edp(intel_dp))
bd943159 1807 return;
99ea7127 1808
3936fcf4
VS
1809 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1810 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1811
e7a89ace
VS
1812 if (WARN(edp_have_panel_power(intel_dp),
1813 "eDP port %c panel power already on\n",
1814 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1815 return;
9934c132 1816
4be73780 1817 wait_panel_power_cycle(intel_dp);
37c6c9b0 1818
bf13e81b 1819 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1820 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1821 if (IS_GEN5(dev)) {
1822 /* ILK workaround: disable reset around power sequence */
1823 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1824 I915_WRITE(pp_ctrl_reg, pp);
1825 POSTING_READ(pp_ctrl_reg);
05ce1a49 1826 }
37c6c9b0 1827
1c0ae80a 1828 pp |= POWER_TARGET_ON;
99ea7127
KP
1829 if (!IS_GEN5(dev))
1830 pp |= PANEL_POWER_RESET;
1831
453c5420
JB
1832 I915_WRITE(pp_ctrl_reg, pp);
1833 POSTING_READ(pp_ctrl_reg);
9934c132 1834
4be73780 1835 wait_panel_on(intel_dp);
dce56b3c 1836 intel_dp->last_power_on = jiffies;
9934c132 1837
05ce1a49
KP
1838 if (IS_GEN5(dev)) {
1839 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1840 I915_WRITE(pp_ctrl_reg, pp);
1841 POSTING_READ(pp_ctrl_reg);
05ce1a49 1842 }
9f0fb5be 1843}
e39b999a 1844
9f0fb5be
VS
1845void intel_edp_panel_on(struct intel_dp *intel_dp)
1846{
1847 if (!is_edp(intel_dp))
1848 return;
1849
1850 pps_lock(intel_dp);
1851 edp_panel_on(intel_dp);
773538e8 1852 pps_unlock(intel_dp);
9934c132
JB
1853}
1854
9f0fb5be
VS
1855
1856static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1857{
4e6e1a54
ID
1858 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1859 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1860 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1861 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1862 enum intel_display_power_domain power_domain;
99ea7127 1863 u32 pp;
453c5420 1864 u32 pp_ctrl_reg;
9934c132 1865
9f0fb5be
VS
1866 lockdep_assert_held(&dev_priv->pps_mutex);
1867
97af61f5
KP
1868 if (!is_edp(intel_dp))
1869 return;
37c6c9b0 1870
3936fcf4
VS
1871 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1872 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1873
3936fcf4
VS
1874 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1875 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1876
453c5420 1877 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1878 /* We need to switch off panel power _and_ force vdd, for otherwise some
1879 * panels get very unhappy and cease to work. */
b3064154
PJ
1880 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1881 EDP_BLC_ENABLE);
453c5420 1882
bf13e81b 1883 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1884
849e39f5
PZ
1885 intel_dp->want_panel_vdd = false;
1886
453c5420
JB
1887 I915_WRITE(pp_ctrl_reg, pp);
1888 POSTING_READ(pp_ctrl_reg);
9934c132 1889
dce56b3c 1890 intel_dp->last_power_cycle = jiffies;
4be73780 1891 wait_panel_off(intel_dp);
849e39f5
PZ
1892
1893 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1894 power_domain = intel_display_port_power_domain(intel_encoder);
1895 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1896}
e39b999a 1897
9f0fb5be
VS
1898void intel_edp_panel_off(struct intel_dp *intel_dp)
1899{
1900 if (!is_edp(intel_dp))
1901 return;
e39b999a 1902
9f0fb5be
VS
1903 pps_lock(intel_dp);
1904 edp_panel_off(intel_dp);
773538e8 1905 pps_unlock(intel_dp);
9934c132
JB
1906}
1907
1250d107
JN
1908/* Enable backlight in the panel power control. */
1909static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1910{
da63a9f2
PZ
1911 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1912 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1913 struct drm_i915_private *dev_priv = dev->dev_private;
1914 u32 pp;
453c5420 1915 u32 pp_ctrl_reg;
32f9d658 1916
01cb9ea6
JB
1917 /*
1918 * If we enable the backlight right away following a panel power
1919 * on, we may see slight flicker as the panel syncs with the eDP
1920 * link. So delay a bit to make sure the image is solid before
1921 * allowing it to appear.
1922 */
4be73780 1923 wait_backlight_on(intel_dp);
e39b999a 1924
773538e8 1925 pps_lock(intel_dp);
e39b999a 1926
453c5420 1927 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1928 pp |= EDP_BLC_ENABLE;
453c5420 1929
bf13e81b 1930 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1931
1932 I915_WRITE(pp_ctrl_reg, pp);
1933 POSTING_READ(pp_ctrl_reg);
e39b999a 1934
773538e8 1935 pps_unlock(intel_dp);
32f9d658
ZW
1936}
1937
1250d107
JN
1938/* Enable backlight PWM and backlight PP control. */
1939void intel_edp_backlight_on(struct intel_dp *intel_dp)
1940{
1941 if (!is_edp(intel_dp))
1942 return;
1943
1944 DRM_DEBUG_KMS("\n");
1945
1946 intel_panel_enable_backlight(intel_dp->attached_connector);
1947 _intel_edp_backlight_on(intel_dp);
1948}
1949
1950/* Disable backlight in the panel power control. */
1951static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1952{
30add22d 1953 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1954 struct drm_i915_private *dev_priv = dev->dev_private;
1955 u32 pp;
453c5420 1956 u32 pp_ctrl_reg;
32f9d658 1957
f01eca2e
KP
1958 if (!is_edp(intel_dp))
1959 return;
1960
773538e8 1961 pps_lock(intel_dp);
e39b999a 1962
453c5420 1963 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1964 pp &= ~EDP_BLC_ENABLE;
453c5420 1965
bf13e81b 1966 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1967
1968 I915_WRITE(pp_ctrl_reg, pp);
1969 POSTING_READ(pp_ctrl_reg);
f7d2323c 1970
773538e8 1971 pps_unlock(intel_dp);
e39b999a
VS
1972
1973 intel_dp->last_backlight_off = jiffies;
f7d2323c 1974 edp_wait_backlight_off(intel_dp);
1250d107 1975}
f7d2323c 1976
1250d107
JN
1977/* Disable backlight PP control and backlight PWM. */
1978void intel_edp_backlight_off(struct intel_dp *intel_dp)
1979{
1980 if (!is_edp(intel_dp))
1981 return;
1982
1983 DRM_DEBUG_KMS("\n");
f7d2323c 1984
1250d107 1985 _intel_edp_backlight_off(intel_dp);
f7d2323c 1986 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 1987}
a4fc5ed6 1988
73580fb7
JN
1989/*
1990 * Hook for controlling the panel power control backlight through the bl_power
1991 * sysfs attribute. Take care to handle multiple calls.
1992 */
1993static void intel_edp_backlight_power(struct intel_connector *connector,
1994 bool enable)
1995{
1996 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
1997 bool is_enabled;
1998
773538e8 1999 pps_lock(intel_dp);
e39b999a 2000 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2001 pps_unlock(intel_dp);
73580fb7
JN
2002
2003 if (is_enabled == enable)
2004 return;
2005
23ba9373
JN
2006 DRM_DEBUG_KMS("panel power control backlight %s\n",
2007 enable ? "enable" : "disable");
73580fb7
JN
2008
2009 if (enable)
2010 _intel_edp_backlight_on(intel_dp);
2011 else
2012 _intel_edp_backlight_off(intel_dp);
2013}
2014
2bd2ad64 2015static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2016{
da63a9f2
PZ
2017 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2018 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2019 struct drm_device *dev = crtc->dev;
d240f20f
JB
2020 struct drm_i915_private *dev_priv = dev->dev_private;
2021 u32 dpa_ctl;
2022
2bd2ad64
DV
2023 assert_pipe_disabled(dev_priv,
2024 to_intel_crtc(crtc)->pipe);
2025
d240f20f
JB
2026 DRM_DEBUG_KMS("\n");
2027 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2028 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2029 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2030
2031 /* We don't adjust intel_dp->DP while tearing down the link, to
2032 * facilitate link retraining (e.g. after hotplug). Hence clear all
2033 * enable bits here to ensure that we don't enable too much. */
2034 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2035 intel_dp->DP |= DP_PLL_ENABLE;
2036 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2037 POSTING_READ(DP_A);
2038 udelay(200);
d240f20f
JB
2039}
2040
2bd2ad64 2041static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2042{
da63a9f2
PZ
2043 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2044 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2045 struct drm_device *dev = crtc->dev;
d240f20f
JB
2046 struct drm_i915_private *dev_priv = dev->dev_private;
2047 u32 dpa_ctl;
2048
2bd2ad64
DV
2049 assert_pipe_disabled(dev_priv,
2050 to_intel_crtc(crtc)->pipe);
2051
d240f20f 2052 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2053 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2054 "dp pll off, should be on\n");
2055 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2056
2057 /* We can't rely on the value tracked for the DP register in
2058 * intel_dp->DP because link_down must not change that (otherwise link
2059 * re-training will fail. */
298b0b39 2060 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2061 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2062 POSTING_READ(DP_A);
d240f20f
JB
2063 udelay(200);
2064}
2065
c7ad3810 2066/* If the sink supports it, try to set the power state appropriately */
c19b0669 2067void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2068{
2069 int ret, i;
2070
2071 /* Should have a valid DPCD by this point */
2072 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2073 return;
2074
2075 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2076 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2077 DP_SET_POWER_D3);
c7ad3810
JB
2078 } else {
2079 /*
2080 * When turning on, we need to retry for 1ms to give the sink
2081 * time to wake up.
2082 */
2083 for (i = 0; i < 3; i++) {
9d1a1031
JN
2084 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2085 DP_SET_POWER_D0);
c7ad3810
JB
2086 if (ret == 1)
2087 break;
2088 msleep(1);
2089 }
2090 }
f9cac721
JN
2091
2092 if (ret != 1)
2093 DRM_DEBUG_KMS("failed to %s sink power state\n",
2094 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2095}
2096
19d8fe15
DV
2097static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2098 enum pipe *pipe)
d240f20f 2099{
19d8fe15 2100 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2101 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2102 struct drm_device *dev = encoder->base.dev;
2103 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2104 enum intel_display_power_domain power_domain;
2105 u32 tmp;
2106
2107 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2108 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2109 return false;
2110
2111 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2112
2113 if (!(tmp & DP_PORT_EN))
2114 return false;
2115
bc7d38a4 2116 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2117 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2118 } else if (IS_CHERRYVIEW(dev)) {
2119 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2120 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2121 *pipe = PORT_TO_PIPE(tmp);
2122 } else {
2123 u32 trans_sel;
2124 u32 trans_dp;
2125 int i;
2126
2127 switch (intel_dp->output_reg) {
2128 case PCH_DP_B:
2129 trans_sel = TRANS_DP_PORT_SEL_B;
2130 break;
2131 case PCH_DP_C:
2132 trans_sel = TRANS_DP_PORT_SEL_C;
2133 break;
2134 case PCH_DP_D:
2135 trans_sel = TRANS_DP_PORT_SEL_D;
2136 break;
2137 default:
2138 return true;
2139 }
2140
055e393f 2141 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2142 trans_dp = I915_READ(TRANS_DP_CTL(i));
2143 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2144 *pipe = i;
2145 return true;
2146 }
2147 }
19d8fe15 2148
4a0833ec
DV
2149 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2150 intel_dp->output_reg);
2151 }
d240f20f 2152
19d8fe15
DV
2153 return true;
2154}
d240f20f 2155
045ac3b5 2156static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2157 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2158{
2159 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2160 u32 tmp, flags = 0;
63000ef6
XZ
2161 struct drm_device *dev = encoder->base.dev;
2162 struct drm_i915_private *dev_priv = dev->dev_private;
2163 enum port port = dp_to_dig_port(intel_dp)->port;
2164 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2165 int dotclock;
045ac3b5 2166
9ed109a7
DV
2167 tmp = I915_READ(intel_dp->output_reg);
2168 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2169 pipe_config->has_audio = true;
2170
63000ef6 2171 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2172 if (tmp & DP_SYNC_HS_HIGH)
2173 flags |= DRM_MODE_FLAG_PHSYNC;
2174 else
2175 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2176
63000ef6
XZ
2177 if (tmp & DP_SYNC_VS_HIGH)
2178 flags |= DRM_MODE_FLAG_PVSYNC;
2179 else
2180 flags |= DRM_MODE_FLAG_NVSYNC;
2181 } else {
2182 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2183 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2184 flags |= DRM_MODE_FLAG_PHSYNC;
2185 else
2186 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2187
63000ef6
XZ
2188 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2189 flags |= DRM_MODE_FLAG_PVSYNC;
2190 else
2191 flags |= DRM_MODE_FLAG_NVSYNC;
2192 }
045ac3b5 2193
2d112de7 2194 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2195
8c875fca
VS
2196 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2197 tmp & DP_COLOR_RANGE_16_235)
2198 pipe_config->limited_color_range = true;
2199
eb14cb74
VS
2200 pipe_config->has_dp_encoder = true;
2201
2202 intel_dp_get_m_n(crtc, pipe_config);
2203
18442d08 2204 if (port == PORT_A) {
f1f644dc
JB
2205 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2206 pipe_config->port_clock = 162000;
2207 else
2208 pipe_config->port_clock = 270000;
2209 }
18442d08
VS
2210
2211 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2212 &pipe_config->dp_m_n);
2213
2214 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2215 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2216
2d112de7 2217 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2218
c6cd2ee2
JN
2219 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2220 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2221 /*
2222 * This is a big fat ugly hack.
2223 *
2224 * Some machines in UEFI boot mode provide us a VBT that has 18
2225 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2226 * unknown we fail to light up. Yet the same BIOS boots up with
2227 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2228 * max, not what it tells us to use.
2229 *
2230 * Note: This will still be broken if the eDP panel is not lit
2231 * up by the BIOS, and thus we can't get the mode at module
2232 * load.
2233 */
2234 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2235 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2236 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2237 }
045ac3b5
JB
2238}
2239
e8cb4558 2240static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2241{
e8cb4558 2242 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2243 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2244 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2245
6e3c9717 2246 if (crtc->config->has_audio)
495a5bb8 2247 intel_audio_codec_disable(encoder);
6cb49835 2248
b32c6f48
RV
2249 if (HAS_PSR(dev) && !HAS_DDI(dev))
2250 intel_psr_disable(intel_dp);
2251
6cb49835
DV
2252 /* Make sure the panel is off before trying to change the mode. But also
2253 * ensure that we have vdd while we switch off the panel. */
24f3e092 2254 intel_edp_panel_vdd_on(intel_dp);
4be73780 2255 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2256 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2257 intel_edp_panel_off(intel_dp);
3739850b 2258
08aff3fe
VS
2259 /* disable the port before the pipe on g4x */
2260 if (INTEL_INFO(dev)->gen < 5)
3739850b 2261 intel_dp_link_down(intel_dp);
d240f20f
JB
2262}
2263
08aff3fe 2264static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2265{
2bd2ad64 2266 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2267 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2268
49277c31 2269 intel_dp_link_down(intel_dp);
08aff3fe
VS
2270 if (port == PORT_A)
2271 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2272}
2273
2274static void vlv_post_disable_dp(struct intel_encoder *encoder)
2275{
2276 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2277
2278 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2279}
2280
580d3811
VS
2281static void chv_post_disable_dp(struct intel_encoder *encoder)
2282{
2283 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2284 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2285 struct drm_device *dev = encoder->base.dev;
2286 struct drm_i915_private *dev_priv = dev->dev_private;
2287 struct intel_crtc *intel_crtc =
2288 to_intel_crtc(encoder->base.crtc);
2289 enum dpio_channel ch = vlv_dport_to_channel(dport);
2290 enum pipe pipe = intel_crtc->pipe;
2291 u32 val;
2292
2293 intel_dp_link_down(intel_dp);
2294
2295 mutex_lock(&dev_priv->dpio_lock);
2296
2297 /* Propagate soft reset to data lane reset */
97fd4d5c 2298 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2299 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2300 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2301
97fd4d5c
VS
2302 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2303 val |= CHV_PCS_REQ_SOFTRESET_EN;
2304 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2305
2306 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2307 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2308 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2309
2310 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2311 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2312 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2313
2314 mutex_unlock(&dev_priv->dpio_lock);
2315}
2316
7b13b58a
VS
2317static void
2318_intel_dp_set_link_train(struct intel_dp *intel_dp,
2319 uint32_t *DP,
2320 uint8_t dp_train_pat)
2321{
2322 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2323 struct drm_device *dev = intel_dig_port->base.base.dev;
2324 struct drm_i915_private *dev_priv = dev->dev_private;
2325 enum port port = intel_dig_port->port;
2326
2327 if (HAS_DDI(dev)) {
2328 uint32_t temp = I915_READ(DP_TP_CTL(port));
2329
2330 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2331 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2332 else
2333 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2334
2335 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2336 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2337 case DP_TRAINING_PATTERN_DISABLE:
2338 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2339
2340 break;
2341 case DP_TRAINING_PATTERN_1:
2342 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2343 break;
2344 case DP_TRAINING_PATTERN_2:
2345 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2346 break;
2347 case DP_TRAINING_PATTERN_3:
2348 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2349 break;
2350 }
2351 I915_WRITE(DP_TP_CTL(port), temp);
2352
2353 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2354 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2355
2356 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2357 case DP_TRAINING_PATTERN_DISABLE:
2358 *DP |= DP_LINK_TRAIN_OFF_CPT;
2359 break;
2360 case DP_TRAINING_PATTERN_1:
2361 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2362 break;
2363 case DP_TRAINING_PATTERN_2:
2364 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2365 break;
2366 case DP_TRAINING_PATTERN_3:
2367 DRM_ERROR("DP training pattern 3 not supported\n");
2368 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2369 break;
2370 }
2371
2372 } else {
2373 if (IS_CHERRYVIEW(dev))
2374 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2375 else
2376 *DP &= ~DP_LINK_TRAIN_MASK;
2377
2378 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2379 case DP_TRAINING_PATTERN_DISABLE:
2380 *DP |= DP_LINK_TRAIN_OFF;
2381 break;
2382 case DP_TRAINING_PATTERN_1:
2383 *DP |= DP_LINK_TRAIN_PAT_1;
2384 break;
2385 case DP_TRAINING_PATTERN_2:
2386 *DP |= DP_LINK_TRAIN_PAT_2;
2387 break;
2388 case DP_TRAINING_PATTERN_3:
2389 if (IS_CHERRYVIEW(dev)) {
2390 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2391 } else {
2392 DRM_ERROR("DP training pattern 3 not supported\n");
2393 *DP |= DP_LINK_TRAIN_PAT_2;
2394 }
2395 break;
2396 }
2397 }
2398}
2399
2400static void intel_dp_enable_port(struct intel_dp *intel_dp)
2401{
2402 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2403 struct drm_i915_private *dev_priv = dev->dev_private;
2404
7b13b58a
VS
2405 /* enable with pattern 1 (as per spec) */
2406 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2407 DP_TRAINING_PATTERN_1);
2408
2409 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2410 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2411
2412 /*
2413 * Magic for VLV/CHV. We _must_ first set up the register
2414 * without actually enabling the port, and then do another
2415 * write to enable the port. Otherwise link training will
2416 * fail when the power sequencer is freshly used for this port.
2417 */
2418 intel_dp->DP |= DP_PORT_EN;
2419
2420 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2421 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2422}
2423
e8cb4558 2424static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2425{
e8cb4558
DV
2426 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2427 struct drm_device *dev = encoder->base.dev;
2428 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2429 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2430 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2431
0c33d8d7
DV
2432 if (WARN_ON(dp_reg & DP_PORT_EN))
2433 return;
5d613501 2434
093e3f13
VS
2435 pps_lock(intel_dp);
2436
2437 if (IS_VALLEYVIEW(dev))
2438 vlv_init_panel_power_sequencer(intel_dp);
2439
7b13b58a 2440 intel_dp_enable_port(intel_dp);
093e3f13
VS
2441
2442 edp_panel_vdd_on(intel_dp);
2443 edp_panel_on(intel_dp);
2444 edp_panel_vdd_off(intel_dp, true);
2445
2446 pps_unlock(intel_dp);
2447
61234fa5
VS
2448 if (IS_VALLEYVIEW(dev))
2449 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2450
f01eca2e 2451 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2452 intel_dp_start_link_train(intel_dp);
33a34e4e 2453 intel_dp_complete_link_train(intel_dp);
3ab9c637 2454 intel_dp_stop_link_train(intel_dp);
c1dec79a 2455
6e3c9717 2456 if (crtc->config->has_audio) {
c1dec79a
JN
2457 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2458 pipe_name(crtc->pipe));
2459 intel_audio_codec_enable(encoder);
2460 }
ab1f90f9 2461}
89b667f8 2462
ecff4f3b
JN
2463static void g4x_enable_dp(struct intel_encoder *encoder)
2464{
828f5c6e
JN
2465 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2466
ecff4f3b 2467 intel_enable_dp(encoder);
4be73780 2468 intel_edp_backlight_on(intel_dp);
ab1f90f9 2469}
89b667f8 2470
ab1f90f9
JN
2471static void vlv_enable_dp(struct intel_encoder *encoder)
2472{
828f5c6e
JN
2473 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2474
4be73780 2475 intel_edp_backlight_on(intel_dp);
b32c6f48 2476 intel_psr_enable(intel_dp);
d240f20f
JB
2477}
2478
ecff4f3b 2479static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2480{
2481 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2482 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2483
8ac33ed3
DV
2484 intel_dp_prepare(encoder);
2485
d41f1efb
DV
2486 /* Only ilk+ has port A */
2487 if (dport->port == PORT_A) {
2488 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2489 ironlake_edp_pll_on(intel_dp);
d41f1efb 2490 }
ab1f90f9
JN
2491}
2492
83b84597
VS
2493static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2494{
2495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2496 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2497 enum pipe pipe = intel_dp->pps_pipe;
2498 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2499
2500 edp_panel_vdd_off_sync(intel_dp);
2501
2502 /*
2503 * VLV seems to get confused when multiple power seqeuencers
2504 * have the same port selected (even if only one has power/vdd
2505 * enabled). The failure manifests as vlv_wait_port_ready() failing
2506 * CHV on the other hand doesn't seem to mind having the same port
2507 * selected in multiple power seqeuencers, but let's clear the
2508 * port select always when logically disconnecting a power sequencer
2509 * from a port.
2510 */
2511 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2512 pipe_name(pipe), port_name(intel_dig_port->port));
2513 I915_WRITE(pp_on_reg, 0);
2514 POSTING_READ(pp_on_reg);
2515
2516 intel_dp->pps_pipe = INVALID_PIPE;
2517}
2518
a4a5d2f8
VS
2519static void vlv_steal_power_sequencer(struct drm_device *dev,
2520 enum pipe pipe)
2521{
2522 struct drm_i915_private *dev_priv = dev->dev_private;
2523 struct intel_encoder *encoder;
2524
2525 lockdep_assert_held(&dev_priv->pps_mutex);
2526
ac3c12e4
VS
2527 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2528 return;
2529
a4a5d2f8
VS
2530 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2531 base.head) {
2532 struct intel_dp *intel_dp;
773538e8 2533 enum port port;
a4a5d2f8
VS
2534
2535 if (encoder->type != INTEL_OUTPUT_EDP)
2536 continue;
2537
2538 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2539 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2540
2541 if (intel_dp->pps_pipe != pipe)
2542 continue;
2543
2544 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2545 pipe_name(pipe), port_name(port));
a4a5d2f8 2546
034e43c6
VS
2547 WARN(encoder->connectors_active,
2548 "stealing pipe %c power sequencer from active eDP port %c\n",
2549 pipe_name(pipe), port_name(port));
a4a5d2f8 2550
a4a5d2f8 2551 /* make sure vdd is off before we steal it */
83b84597 2552 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2553 }
2554}
2555
2556static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2557{
2558 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2559 struct intel_encoder *encoder = &intel_dig_port->base;
2560 struct drm_device *dev = encoder->base.dev;
2561 struct drm_i915_private *dev_priv = dev->dev_private;
2562 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2563
2564 lockdep_assert_held(&dev_priv->pps_mutex);
2565
093e3f13
VS
2566 if (!is_edp(intel_dp))
2567 return;
2568
a4a5d2f8
VS
2569 if (intel_dp->pps_pipe == crtc->pipe)
2570 return;
2571
2572 /*
2573 * If another power sequencer was being used on this
2574 * port previously make sure to turn off vdd there while
2575 * we still have control of it.
2576 */
2577 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2578 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2579
2580 /*
2581 * We may be stealing the power
2582 * sequencer from another port.
2583 */
2584 vlv_steal_power_sequencer(dev, crtc->pipe);
2585
2586 /* now it's all ours */
2587 intel_dp->pps_pipe = crtc->pipe;
2588
2589 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2590 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2591
2592 /* init power sequencer on this pipe and port */
36b5f425
VS
2593 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2594 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2595}
2596
ab1f90f9 2597static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2598{
2bd2ad64 2599 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2600 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2601 struct drm_device *dev = encoder->base.dev;
89b667f8 2602 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2603 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2604 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2605 int pipe = intel_crtc->pipe;
2606 u32 val;
a4fc5ed6 2607
ab1f90f9 2608 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2609
ab3c759a 2610 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2611 val = 0;
2612 if (pipe)
2613 val |= (1<<21);
2614 else
2615 val &= ~(1<<21);
2616 val |= 0x001000c4;
ab3c759a
CML
2617 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2618 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2619 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2620
ab1f90f9
JN
2621 mutex_unlock(&dev_priv->dpio_lock);
2622
2623 intel_enable_dp(encoder);
89b667f8
JB
2624}
2625
ecff4f3b 2626static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2627{
2628 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2629 struct drm_device *dev = encoder->base.dev;
2630 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2631 struct intel_crtc *intel_crtc =
2632 to_intel_crtc(encoder->base.crtc);
e4607fcf 2633 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2634 int pipe = intel_crtc->pipe;
89b667f8 2635
8ac33ed3
DV
2636 intel_dp_prepare(encoder);
2637
89b667f8 2638 /* Program Tx lane resets to default */
0980a60f 2639 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2640 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2641 DPIO_PCS_TX_LANE2_RESET |
2642 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2643 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2644 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2645 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2646 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2647 DPIO_PCS_CLK_SOFT_RESET);
2648
2649 /* Fix up inter-pair skew failure */
ab3c759a
CML
2650 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2651 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2652 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2653 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2654}
2655
e4a1d846
CML
2656static void chv_pre_enable_dp(struct intel_encoder *encoder)
2657{
2658 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2659 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2660 struct drm_device *dev = encoder->base.dev;
2661 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2662 struct intel_crtc *intel_crtc =
2663 to_intel_crtc(encoder->base.crtc);
2664 enum dpio_channel ch = vlv_dport_to_channel(dport);
2665 int pipe = intel_crtc->pipe;
2666 int data, i;
949c1d43 2667 u32 val;
e4a1d846 2668
e4a1d846 2669 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2670
570e2a74
VS
2671 /* allow hardware to manage TX FIFO reset source */
2672 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2673 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2674 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2675
2676 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2677 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2678 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2679
949c1d43 2680 /* Deassert soft data lane reset*/
97fd4d5c 2681 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2682 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2683 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2684
2685 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2686 val |= CHV_PCS_REQ_SOFTRESET_EN;
2687 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2688
2689 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2690 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2691 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2692
97fd4d5c 2693 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2694 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2695 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2696
2697 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2698 for (i = 0; i < 4; i++) {
2699 /* Set the latency optimal bit */
2700 data = (i == 1) ? 0x0 : 0x6;
2701 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2702 data << DPIO_FRC_LATENCY_SHFIT);
2703
2704 /* Set the upar bit */
2705 data = (i == 1) ? 0x0 : 0x1;
2706 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2707 data << DPIO_UPAR_SHIFT);
2708 }
2709
2710 /* Data lane stagger programming */
2711 /* FIXME: Fix up value only after power analysis */
2712
2713 mutex_unlock(&dev_priv->dpio_lock);
2714
e4a1d846 2715 intel_enable_dp(encoder);
e4a1d846
CML
2716}
2717
9197c88b
VS
2718static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2719{
2720 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2721 struct drm_device *dev = encoder->base.dev;
2722 struct drm_i915_private *dev_priv = dev->dev_private;
2723 struct intel_crtc *intel_crtc =
2724 to_intel_crtc(encoder->base.crtc);
2725 enum dpio_channel ch = vlv_dport_to_channel(dport);
2726 enum pipe pipe = intel_crtc->pipe;
2727 u32 val;
2728
625695f8
VS
2729 intel_dp_prepare(encoder);
2730
9197c88b
VS
2731 mutex_lock(&dev_priv->dpio_lock);
2732
b9e5ac3c
VS
2733 /* program left/right clock distribution */
2734 if (pipe != PIPE_B) {
2735 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2736 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2737 if (ch == DPIO_CH0)
2738 val |= CHV_BUFLEFTENA1_FORCE;
2739 if (ch == DPIO_CH1)
2740 val |= CHV_BUFRIGHTENA1_FORCE;
2741 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2742 } else {
2743 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2744 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2745 if (ch == DPIO_CH0)
2746 val |= CHV_BUFLEFTENA2_FORCE;
2747 if (ch == DPIO_CH1)
2748 val |= CHV_BUFRIGHTENA2_FORCE;
2749 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2750 }
2751
9197c88b
VS
2752 /* program clock channel usage */
2753 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2754 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2755 if (pipe != PIPE_B)
2756 val &= ~CHV_PCS_USEDCLKCHANNEL;
2757 else
2758 val |= CHV_PCS_USEDCLKCHANNEL;
2759 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2760
2761 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2762 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2763 if (pipe != PIPE_B)
2764 val &= ~CHV_PCS_USEDCLKCHANNEL;
2765 else
2766 val |= CHV_PCS_USEDCLKCHANNEL;
2767 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2768
2769 /*
2770 * This a a bit weird since generally CL
2771 * matches the pipe, but here we need to
2772 * pick the CL based on the port.
2773 */
2774 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2775 if (pipe != PIPE_B)
2776 val &= ~CHV_CMN_USEDCLKCHANNEL;
2777 else
2778 val |= CHV_CMN_USEDCLKCHANNEL;
2779 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2780
2781 mutex_unlock(&dev_priv->dpio_lock);
2782}
2783
a4fc5ed6 2784/*
df0c237d
JB
2785 * Native read with retry for link status and receiver capability reads for
2786 * cases where the sink may still be asleep.
9d1a1031
JN
2787 *
2788 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2789 * supposed to retry 3 times per the spec.
a4fc5ed6 2790 */
9d1a1031
JN
2791static ssize_t
2792intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2793 void *buffer, size_t size)
a4fc5ed6 2794{
9d1a1031
JN
2795 ssize_t ret;
2796 int i;
61da5fab 2797
f6a19066
VS
2798 /*
2799 * Sometime we just get the same incorrect byte repeated
2800 * over the entire buffer. Doing just one throw away read
2801 * initially seems to "solve" it.
2802 */
2803 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2804
61da5fab 2805 for (i = 0; i < 3; i++) {
9d1a1031
JN
2806 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2807 if (ret == size)
2808 return ret;
61da5fab
JB
2809 msleep(1);
2810 }
a4fc5ed6 2811
9d1a1031 2812 return ret;
a4fc5ed6
KP
2813}
2814
2815/*
2816 * Fetch AUX CH registers 0x202 - 0x207 which contain
2817 * link status information
2818 */
2819static bool
93f62dad 2820intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2821{
9d1a1031
JN
2822 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2823 DP_LANE0_1_STATUS,
2824 link_status,
2825 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2826}
2827
1100244e 2828/* These are source-specific values. */
a4fc5ed6 2829static uint8_t
1a2eb460 2830intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2831{
30add22d 2832 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2833 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2834 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2835
7ad14a29
SJ
2836 if (INTEL_INFO(dev)->gen >= 9) {
2837 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2838 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2839 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2840 } else if (IS_VALLEYVIEW(dev))
bd60018a 2841 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2842 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2843 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2844 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2845 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2846 else
bd60018a 2847 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2848}
2849
2850static uint8_t
2851intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2852{
30add22d 2853 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2854 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2855
5a9d1f1a
DL
2856 if (INTEL_INFO(dev)->gen >= 9) {
2857 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2859 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2860 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2861 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2863 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2864 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2865 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2866 default:
2867 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2868 }
2869 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2870 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2872 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2873 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2874 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2876 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2878 default:
bd60018a 2879 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2880 }
e2fa6fba
P
2881 } else if (IS_VALLEYVIEW(dev)) {
2882 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2884 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2885 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2886 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2887 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2888 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2889 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2890 default:
bd60018a 2891 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2892 }
bc7d38a4 2893 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2894 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2895 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2896 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2897 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2898 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2899 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2900 default:
bd60018a 2901 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2902 }
2903 } else {
2904 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2905 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2906 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2907 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2908 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2909 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2910 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2911 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2912 default:
bd60018a 2913 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2914 }
a4fc5ed6
KP
2915 }
2916}
2917
e2fa6fba
P
2918static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2919{
2920 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2921 struct drm_i915_private *dev_priv = dev->dev_private;
2922 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2923 struct intel_crtc *intel_crtc =
2924 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2925 unsigned long demph_reg_value, preemph_reg_value,
2926 uniqtranscale_reg_value;
2927 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2928 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2929 int pipe = intel_crtc->pipe;
e2fa6fba
P
2930
2931 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2932 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2933 preemph_reg_value = 0x0004000;
2934 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2936 demph_reg_value = 0x2B405555;
2937 uniqtranscale_reg_value = 0x552AB83A;
2938 break;
bd60018a 2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2940 demph_reg_value = 0x2B404040;
2941 uniqtranscale_reg_value = 0x5548B83A;
2942 break;
bd60018a 2943 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2944 demph_reg_value = 0x2B245555;
2945 uniqtranscale_reg_value = 0x5560B83A;
2946 break;
bd60018a 2947 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2948 demph_reg_value = 0x2B405555;
2949 uniqtranscale_reg_value = 0x5598DA3A;
2950 break;
2951 default:
2952 return 0;
2953 }
2954 break;
bd60018a 2955 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2956 preemph_reg_value = 0x0002000;
2957 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2958 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2959 demph_reg_value = 0x2B404040;
2960 uniqtranscale_reg_value = 0x5552B83A;
2961 break;
bd60018a 2962 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2963 demph_reg_value = 0x2B404848;
2964 uniqtranscale_reg_value = 0x5580B83A;
2965 break;
bd60018a 2966 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2967 demph_reg_value = 0x2B404040;
2968 uniqtranscale_reg_value = 0x55ADDA3A;
2969 break;
2970 default:
2971 return 0;
2972 }
2973 break;
bd60018a 2974 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
2975 preemph_reg_value = 0x0000000;
2976 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2978 demph_reg_value = 0x2B305555;
2979 uniqtranscale_reg_value = 0x5570B83A;
2980 break;
bd60018a 2981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2982 demph_reg_value = 0x2B2B4040;
2983 uniqtranscale_reg_value = 0x55ADDA3A;
2984 break;
2985 default:
2986 return 0;
2987 }
2988 break;
bd60018a 2989 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
2990 preemph_reg_value = 0x0006000;
2991 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2992 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2993 demph_reg_value = 0x1B405555;
2994 uniqtranscale_reg_value = 0x55ADDA3A;
2995 break;
2996 default:
2997 return 0;
2998 }
2999 break;
3000 default:
3001 return 0;
3002 }
3003
0980a60f 3004 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3005 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3006 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3007 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3008 uniqtranscale_reg_value);
ab3c759a
CML
3009 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3010 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3011 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3012 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3013 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3014
3015 return 0;
3016}
3017
e4a1d846
CML
3018static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3019{
3020 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3021 struct drm_i915_private *dev_priv = dev->dev_private;
3022 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3023 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3024 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3025 uint8_t train_set = intel_dp->train_set[0];
3026 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3027 enum pipe pipe = intel_crtc->pipe;
3028 int i;
e4a1d846
CML
3029
3030 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3031 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3032 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3034 deemph_reg_value = 128;
3035 margin_reg_value = 52;
3036 break;
bd60018a 3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3038 deemph_reg_value = 128;
3039 margin_reg_value = 77;
3040 break;
bd60018a 3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3042 deemph_reg_value = 128;
3043 margin_reg_value = 102;
3044 break;
bd60018a 3045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3046 deemph_reg_value = 128;
3047 margin_reg_value = 154;
3048 /* FIXME extra to set for 1200 */
3049 break;
3050 default:
3051 return 0;
3052 }
3053 break;
bd60018a 3054 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3055 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3057 deemph_reg_value = 85;
3058 margin_reg_value = 78;
3059 break;
bd60018a 3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3061 deemph_reg_value = 85;
3062 margin_reg_value = 116;
3063 break;
bd60018a 3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3065 deemph_reg_value = 85;
3066 margin_reg_value = 154;
3067 break;
3068 default:
3069 return 0;
3070 }
3071 break;
bd60018a 3072 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3073 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3074 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3075 deemph_reg_value = 64;
3076 margin_reg_value = 104;
3077 break;
bd60018a 3078 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3079 deemph_reg_value = 64;
3080 margin_reg_value = 154;
3081 break;
3082 default:
3083 return 0;
3084 }
3085 break;
bd60018a 3086 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3087 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3088 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3089 deemph_reg_value = 43;
3090 margin_reg_value = 154;
3091 break;
3092 default:
3093 return 0;
3094 }
3095 break;
3096 default:
3097 return 0;
3098 }
3099
3100 mutex_lock(&dev_priv->dpio_lock);
3101
3102 /* Clear calc init */
1966e59e
VS
3103 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3104 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3105 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3106 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3107 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3108
3109 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3110 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3111 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3112 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3113 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3114
a02ef3c7
VS
3115 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3116 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3117 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3118 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3119
3120 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3121 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3122 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3123 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3124
e4a1d846 3125 /* Program swing deemph */
f72df8db
VS
3126 for (i = 0; i < 4; i++) {
3127 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3128 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3129 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3130 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3131 }
e4a1d846
CML
3132
3133 /* Program swing margin */
f72df8db
VS
3134 for (i = 0; i < 4; i++) {
3135 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3136 val &= ~DPIO_SWING_MARGIN000_MASK;
3137 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3138 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3139 }
e4a1d846
CML
3140
3141 /* Disable unique transition scale */
f72df8db
VS
3142 for (i = 0; i < 4; i++) {
3143 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3144 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3145 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3146 }
e4a1d846
CML
3147
3148 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3149 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3150 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3151 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3152
3153 /*
3154 * The document said it needs to set bit 27 for ch0 and bit 26
3155 * for ch1. Might be a typo in the doc.
3156 * For now, for this unique transition scale selection, set bit
3157 * 27 for ch0 and ch1.
3158 */
f72df8db
VS
3159 for (i = 0; i < 4; i++) {
3160 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3161 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3162 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3163 }
e4a1d846 3164
f72df8db
VS
3165 for (i = 0; i < 4; i++) {
3166 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3167 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3168 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3169 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3170 }
e4a1d846
CML
3171 }
3172
3173 /* Start swing calculation */
1966e59e
VS
3174 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3175 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3176 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3177
3178 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3179 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3180 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3181
3182 /* LRC Bypass */
3183 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3184 val |= DPIO_LRC_BYPASS;
3185 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3186
3187 mutex_unlock(&dev_priv->dpio_lock);
3188
3189 return 0;
3190}
3191
a4fc5ed6 3192static void
0301b3ac
JN
3193intel_get_adjust_train(struct intel_dp *intel_dp,
3194 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3195{
3196 uint8_t v = 0;
3197 uint8_t p = 0;
3198 int lane;
1a2eb460
KP
3199 uint8_t voltage_max;
3200 uint8_t preemph_max;
a4fc5ed6 3201
33a34e4e 3202 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3203 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3204 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3205
3206 if (this_v > v)
3207 v = this_v;
3208 if (this_p > p)
3209 p = this_p;
3210 }
3211
1a2eb460 3212 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3213 if (v >= voltage_max)
3214 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3215
1a2eb460
KP
3216 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3217 if (p >= preemph_max)
3218 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3219
3220 for (lane = 0; lane < 4; lane++)
33a34e4e 3221 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3222}
3223
3224static uint32_t
f0a3424e 3225intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3226{
3cf2efb1 3227 uint32_t signal_levels = 0;
a4fc5ed6 3228
3cf2efb1 3229 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3231 default:
3232 signal_levels |= DP_VOLTAGE_0_4;
3233 break;
bd60018a 3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3235 signal_levels |= DP_VOLTAGE_0_6;
3236 break;
bd60018a 3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3238 signal_levels |= DP_VOLTAGE_0_8;
3239 break;
bd60018a 3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3241 signal_levels |= DP_VOLTAGE_1_2;
3242 break;
3243 }
3cf2efb1 3244 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3245 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3246 default:
3247 signal_levels |= DP_PRE_EMPHASIS_0;
3248 break;
bd60018a 3249 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3250 signal_levels |= DP_PRE_EMPHASIS_3_5;
3251 break;
bd60018a 3252 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3253 signal_levels |= DP_PRE_EMPHASIS_6;
3254 break;
bd60018a 3255 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3256 signal_levels |= DP_PRE_EMPHASIS_9_5;
3257 break;
3258 }
3259 return signal_levels;
3260}
3261
e3421a18
ZW
3262/* Gen6's DP voltage swing and pre-emphasis control */
3263static uint32_t
3264intel_gen6_edp_signal_levels(uint8_t train_set)
3265{
3c5a62b5
YL
3266 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3267 DP_TRAIN_PRE_EMPHASIS_MASK);
3268 switch (signal_levels) {
bd60018a
SJ
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3271 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3273 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3276 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3279 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3282 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3283 default:
3c5a62b5
YL
3284 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3285 "0x%x\n", signal_levels);
3286 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3287 }
3288}
3289
1a2eb460
KP
3290/* Gen7's DP voltage swing and pre-emphasis control */
3291static uint32_t
3292intel_gen7_edp_signal_levels(uint8_t train_set)
3293{
3294 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3295 DP_TRAIN_PRE_EMPHASIS_MASK);
3296 switch (signal_levels) {
bd60018a 3297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3298 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3300 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3302 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3303
bd60018a 3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3305 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3307 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3308
bd60018a 3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3310 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3312 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3313
3314 default:
3315 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3316 "0x%x\n", signal_levels);
3317 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3318 }
3319}
3320
d6c0d722
PZ
3321/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3322static uint32_t
f0a3424e 3323intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3324{
d6c0d722
PZ
3325 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3326 DP_TRAIN_PRE_EMPHASIS_MASK);
3327 switch (signal_levels) {
bd60018a 3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3329 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3331 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3333 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3335 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3336
bd60018a 3337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3338 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3340 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3342 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3343
bd60018a 3344 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3345 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3347 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3348
3349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3350 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3351 default:
3352 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3353 "0x%x\n", signal_levels);
c5fe6a06 3354 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3355 }
a4fc5ed6
KP
3356}
3357
f0a3424e
PZ
3358/* Properly updates "DP" with the correct signal levels. */
3359static void
3360intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3361{
3362 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3363 enum port port = intel_dig_port->port;
f0a3424e
PZ
3364 struct drm_device *dev = intel_dig_port->base.base.dev;
3365 uint32_t signal_levels, mask;
3366 uint8_t train_set = intel_dp->train_set[0];
3367
5a9d1f1a 3368 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3369 signal_levels = intel_hsw_signal_levels(train_set);
3370 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3371 } else if (IS_CHERRYVIEW(dev)) {
3372 signal_levels = intel_chv_signal_levels(intel_dp);
3373 mask = 0;
e2fa6fba
P
3374 } else if (IS_VALLEYVIEW(dev)) {
3375 signal_levels = intel_vlv_signal_levels(intel_dp);
3376 mask = 0;
bc7d38a4 3377 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3378 signal_levels = intel_gen7_edp_signal_levels(train_set);
3379 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3380 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3381 signal_levels = intel_gen6_edp_signal_levels(train_set);
3382 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3383 } else {
3384 signal_levels = intel_gen4_signal_levels(train_set);
3385 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3386 }
3387
3388 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3389
3390 *DP = (*DP & ~mask) | signal_levels;
3391}
3392
a4fc5ed6 3393static bool
ea5b213a 3394intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3395 uint32_t *DP,
58e10eb9 3396 uint8_t dp_train_pat)
a4fc5ed6 3397{
174edf1f
PZ
3398 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3399 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3400 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3401 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3402 int ret, len;
a4fc5ed6 3403
7b13b58a 3404 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3405
70aff66c 3406 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3407 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3408
2cdfe6c8
JN
3409 buf[0] = dp_train_pat;
3410 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3411 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3412 /* don't write DP_TRAINING_LANEx_SET on disable */
3413 len = 1;
3414 } else {
3415 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3416 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3417 len = intel_dp->lane_count + 1;
47ea7542 3418 }
a4fc5ed6 3419
9d1a1031
JN
3420 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3421 buf, len);
2cdfe6c8
JN
3422
3423 return ret == len;
a4fc5ed6
KP
3424}
3425
70aff66c
JN
3426static bool
3427intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3428 uint8_t dp_train_pat)
3429{
953d22e8 3430 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3431 intel_dp_set_signal_levels(intel_dp, DP);
3432 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3433}
3434
3435static bool
3436intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3437 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3438{
3439 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3440 struct drm_device *dev = intel_dig_port->base.base.dev;
3441 struct drm_i915_private *dev_priv = dev->dev_private;
3442 int ret;
3443
3444 intel_get_adjust_train(intel_dp, link_status);
3445 intel_dp_set_signal_levels(intel_dp, DP);
3446
3447 I915_WRITE(intel_dp->output_reg, *DP);
3448 POSTING_READ(intel_dp->output_reg);
3449
9d1a1031
JN
3450 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3451 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3452
3453 return ret == intel_dp->lane_count;
3454}
3455
3ab9c637
ID
3456static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3457{
3458 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3459 struct drm_device *dev = intel_dig_port->base.base.dev;
3460 struct drm_i915_private *dev_priv = dev->dev_private;
3461 enum port port = intel_dig_port->port;
3462 uint32_t val;
3463
3464 if (!HAS_DDI(dev))
3465 return;
3466
3467 val = I915_READ(DP_TP_CTL(port));
3468 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3469 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3470 I915_WRITE(DP_TP_CTL(port), val);
3471
3472 /*
3473 * On PORT_A we can have only eDP in SST mode. There the only reason
3474 * we need to set idle transmission mode is to work around a HW issue
3475 * where we enable the pipe while not in idle link-training mode.
3476 * In this case there is requirement to wait for a minimum number of
3477 * idle patterns to be sent.
3478 */
3479 if (port == PORT_A)
3480 return;
3481
3482 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3483 1))
3484 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3485}
3486
33a34e4e 3487/* Enable corresponding port and start training pattern 1 */
c19b0669 3488void
33a34e4e 3489intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3490{
da63a9f2 3491 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3492 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3493 int i;
3494 uint8_t voltage;
cdb0e95b 3495 int voltage_tries, loop_tries;
ea5b213a 3496 uint32_t DP = intel_dp->DP;
6aba5b6c 3497 uint8_t link_config[2];
a4fc5ed6 3498
affa9354 3499 if (HAS_DDI(dev))
c19b0669
PZ
3500 intel_ddi_prepare_link_retrain(encoder);
3501
3cf2efb1 3502 /* Write the link configuration data */
6aba5b6c
JN
3503 link_config[0] = intel_dp->link_bw;
3504 link_config[1] = intel_dp->lane_count;
3505 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3506 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3507 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
bc27b7d3 3508 if (intel_dp->num_supported_rates)
a8f3ef61
SJ
3509 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3510 &intel_dp->rate_select, 1);
6aba5b6c
JN
3511
3512 link_config[0] = 0;
3513 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3514 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3515
3516 DP |= DP_PORT_EN;
1a2eb460 3517
70aff66c
JN
3518 /* clock recovery */
3519 if (!intel_dp_reset_link_train(intel_dp, &DP,
3520 DP_TRAINING_PATTERN_1 |
3521 DP_LINK_SCRAMBLING_DISABLE)) {
3522 DRM_ERROR("failed to enable link training\n");
3523 return;
3524 }
3525
a4fc5ed6 3526 voltage = 0xff;
cdb0e95b
KP
3527 voltage_tries = 0;
3528 loop_tries = 0;
a4fc5ed6 3529 for (;;) {
70aff66c 3530 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3531
a7c9655f 3532 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3533 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3534 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3535 break;
93f62dad 3536 }
a4fc5ed6 3537
01916270 3538 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3539 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3540 break;
3541 }
3542
3543 /* Check to see if we've tried the max voltage */
3544 for (i = 0; i < intel_dp->lane_count; i++)
3545 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3546 break;
3b4f819d 3547 if (i == intel_dp->lane_count) {
b06fbda3
DV
3548 ++loop_tries;
3549 if (loop_tries == 5) {
3def84b3 3550 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3551 break;
3552 }
70aff66c
JN
3553 intel_dp_reset_link_train(intel_dp, &DP,
3554 DP_TRAINING_PATTERN_1 |
3555 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3556 voltage_tries = 0;
3557 continue;
3558 }
a4fc5ed6 3559
3cf2efb1 3560 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3561 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3562 ++voltage_tries;
b06fbda3 3563 if (voltage_tries == 5) {
3def84b3 3564 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3565 break;
3566 }
3567 } else
3568 voltage_tries = 0;
3569 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3570
70aff66c
JN
3571 /* Update training set as requested by target */
3572 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3573 DRM_ERROR("failed to update link training\n");
3574 break;
3575 }
a4fc5ed6
KP
3576 }
3577
33a34e4e
JB
3578 intel_dp->DP = DP;
3579}
3580
c19b0669 3581void
33a34e4e
JB
3582intel_dp_complete_link_train(struct intel_dp *intel_dp)
3583{
33a34e4e 3584 bool channel_eq = false;
37f80975 3585 int tries, cr_tries;
33a34e4e 3586 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3587 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3588
3589 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3590 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3591 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3592
a4fc5ed6 3593 /* channel equalization */
70aff66c 3594 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3595 training_pattern |
70aff66c
JN
3596 DP_LINK_SCRAMBLING_DISABLE)) {
3597 DRM_ERROR("failed to start channel equalization\n");
3598 return;
3599 }
3600
a4fc5ed6 3601 tries = 0;
37f80975 3602 cr_tries = 0;
a4fc5ed6
KP
3603 channel_eq = false;
3604 for (;;) {
70aff66c 3605 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3606
37f80975
JB
3607 if (cr_tries > 5) {
3608 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3609 break;
3610 }
3611
a7c9655f 3612 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3613 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3614 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3615 break;
70aff66c 3616 }
a4fc5ed6 3617
37f80975 3618 /* Make sure clock is still ok */
01916270 3619 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3620 intel_dp_start_link_train(intel_dp);
70aff66c 3621 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3622 training_pattern |
70aff66c 3623 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3624 cr_tries++;
3625 continue;
3626 }
3627
1ffdff13 3628 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3629 channel_eq = true;
3630 break;
3631 }
a4fc5ed6 3632
37f80975
JB
3633 /* Try 5 times, then try clock recovery if that fails */
3634 if (tries > 5) {
37f80975 3635 intel_dp_start_link_train(intel_dp);
70aff66c 3636 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3637 training_pattern |
70aff66c 3638 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3639 tries = 0;
3640 cr_tries++;
3641 continue;
3642 }
a4fc5ed6 3643
70aff66c
JN
3644 /* Update training set as requested by target */
3645 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3646 DRM_ERROR("failed to update link training\n");
3647 break;
3648 }
3cf2efb1 3649 ++tries;
869184a6 3650 }
3cf2efb1 3651
3ab9c637
ID
3652 intel_dp_set_idle_link_train(intel_dp);
3653
3654 intel_dp->DP = DP;
3655
d6c0d722 3656 if (channel_eq)
07f42258 3657 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3658
3ab9c637
ID
3659}
3660
3661void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3662{
70aff66c 3663 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3664 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3665}
3666
3667static void
ea5b213a 3668intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3669{
da63a9f2 3670 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3671 enum port port = intel_dig_port->port;
da63a9f2 3672 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3673 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3674 uint32_t DP = intel_dp->DP;
a4fc5ed6 3675
bc76e320 3676 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3677 return;
3678
0c33d8d7 3679 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3680 return;
3681
28c97730 3682 DRM_DEBUG_KMS("\n");
32f9d658 3683
bc7d38a4 3684 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3685 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3686 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3687 } else {
aad3d14d
VS
3688 if (IS_CHERRYVIEW(dev))
3689 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3690 else
3691 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3692 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3693 }
fe255d00 3694 POSTING_READ(intel_dp->output_reg);
5eb08b69 3695
493a7081 3696 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3697 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3698 /* Hardware workaround: leaving our transcoder select
3699 * set to transcoder B while it's off will prevent the
3700 * corresponding HDMI output on transcoder A.
3701 *
3702 * Combine this with another hardware workaround:
3703 * transcoder select bit can only be cleared while the
3704 * port is enabled.
3705 */
3706 DP &= ~DP_PIPEB_SELECT;
3707 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3708 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3709 }
3710
832afda6 3711 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3712 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3713 POSTING_READ(intel_dp->output_reg);
f01eca2e 3714 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3715}
3716
26d61aad
KP
3717static bool
3718intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3719{
a031d709
RV
3720 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3721 struct drm_device *dev = dig_port->base.base.dev;
3722 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3723 uint8_t rev;
a031d709 3724
9d1a1031
JN
3725 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3726 sizeof(intel_dp->dpcd)) < 0)
edb39244 3727 return false; /* aux transfer failed */
92fd8fd1 3728
a8e98153 3729 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3730
edb39244
AJ
3731 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3732 return false; /* DPCD not present */
3733
2293bb5c
SK
3734 /* Check if the panel supports PSR */
3735 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3736 if (is_edp(intel_dp)) {
9d1a1031
JN
3737 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3738 intel_dp->psr_dpcd,
3739 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3740 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3741 dev_priv->psr.sink_support = true;
50003939 3742 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3743 }
50003939
JN
3744 }
3745
7809a611 3746 /* Training Pattern 3 support, both source and sink */
06ea66b6 3747 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3748 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3749 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3750 intel_dp->use_tps3 = true;
f8d8a672 3751 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3752 } else
3753 intel_dp->use_tps3 = false;
3754
fc0f8e25
SJ
3755 /* Intermediate frequency support */
3756 if (is_edp(intel_dp) &&
3757 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3758 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3759 (rev >= 0x03)) { /* eDp v1.4 or higher */
ea2d8a42
VS
3760 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3761 int i;
3762
fc0f8e25
SJ
3763 intel_dp_dpcd_read_wake(&intel_dp->aux,
3764 DP_SUPPORTED_LINK_RATES,
ea2d8a42
VS
3765 supported_rates,
3766 sizeof(supported_rates));
3767
3768 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3769 int val = le16_to_cpu(supported_rates[i]);
3770
3771 if (val == 0)
3772 break;
3773
3774 intel_dp->supported_rates[i] = val * 200;
3775 }
3776 intel_dp->num_supported_rates = i;
fc0f8e25 3777 }
edb39244
AJ
3778 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3779 DP_DWN_STRM_PORT_PRESENT))
3780 return true; /* native DP sink */
3781
3782 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3783 return true; /* no per-port downstream info */
3784
9d1a1031
JN
3785 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3786 intel_dp->downstream_ports,
3787 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3788 return false; /* downstream port status fetch failed */
3789
3790 return true;
92fd8fd1
KP
3791}
3792
0d198328
AJ
3793static void
3794intel_dp_probe_oui(struct intel_dp *intel_dp)
3795{
3796 u8 buf[3];
3797
3798 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3799 return;
3800
9d1a1031 3801 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3802 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3803 buf[0], buf[1], buf[2]);
3804
9d1a1031 3805 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3806 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3807 buf[0], buf[1], buf[2]);
3808}
3809
0e32b39c
DA
3810static bool
3811intel_dp_probe_mst(struct intel_dp *intel_dp)
3812{
3813 u8 buf[1];
3814
3815 if (!intel_dp->can_mst)
3816 return false;
3817
3818 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3819 return false;
3820
0e32b39c
DA
3821 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3822 if (buf[0] & DP_MST_CAP) {
3823 DRM_DEBUG_KMS("Sink is MST capable\n");
3824 intel_dp->is_mst = true;
3825 } else {
3826 DRM_DEBUG_KMS("Sink is not MST capable\n");
3827 intel_dp->is_mst = false;
3828 }
3829 }
0e32b39c
DA
3830
3831 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3832 return intel_dp->is_mst;
3833}
3834
d2e216d0
RV
3835int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3836{
3837 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3838 struct drm_device *dev = intel_dig_port->base.base.dev;
3839 struct intel_crtc *intel_crtc =
3840 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3841 u8 buf;
3842 int test_crc_count;
3843 int attempts = 6;
d2e216d0 3844
ad9dc91b 3845 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3846 return -EIO;
d2e216d0 3847
ad9dc91b 3848 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3849 return -ENOTTY;
3850
1dda5f93
RV
3851 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3852 return -EIO;
3853
9d1a1031 3854 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3855 buf | DP_TEST_SINK_START) < 0)
bda0381e 3856 return -EIO;
d2e216d0 3857
1dda5f93 3858 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3859 return -EIO;
ad9dc91b 3860 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3861
ad9dc91b 3862 do {
1dda5f93
RV
3863 if (drm_dp_dpcd_readb(&intel_dp->aux,
3864 DP_TEST_SINK_MISC, &buf) < 0)
3865 return -EIO;
ad9dc91b
RV
3866 intel_wait_for_vblank(dev, intel_crtc->pipe);
3867 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3868
3869 if (attempts == 0) {
90bd1f46
DV
3870 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3871 return -ETIMEDOUT;
ad9dc91b 3872 }
d2e216d0 3873
9d1a1031 3874 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3875 return -EIO;
d2e216d0 3876
1dda5f93
RV
3877 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3878 return -EIO;
3879 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3880 buf & ~DP_TEST_SINK_START) < 0)
3881 return -EIO;
ce31d9f4 3882
d2e216d0
RV
3883 return 0;
3884}
3885
a60f0e38
JB
3886static bool
3887intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3888{
9d1a1031
JN
3889 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3890 DP_DEVICE_SERVICE_IRQ_VECTOR,
3891 sink_irq_vector, 1) == 1;
a60f0e38
JB
3892}
3893
0e32b39c
DA
3894static bool
3895intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3896{
3897 int ret;
3898
3899 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3900 DP_SINK_COUNT_ESI,
3901 sink_irq_vector, 14);
3902 if (ret != 14)
3903 return false;
3904
3905 return true;
3906}
3907
a60f0e38
JB
3908static void
3909intel_dp_handle_test_request(struct intel_dp *intel_dp)
3910{
3911 /* NAK by default */
9d1a1031 3912 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3913}
3914
0e32b39c
DA
3915static int
3916intel_dp_check_mst_status(struct intel_dp *intel_dp)
3917{
3918 bool bret;
3919
3920 if (intel_dp->is_mst) {
3921 u8 esi[16] = { 0 };
3922 int ret = 0;
3923 int retry;
3924 bool handled;
3925 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3926go_again:
3927 if (bret == true) {
3928
3929 /* check link status - esi[10] = 0x200c */
3930 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3931 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3932 intel_dp_start_link_train(intel_dp);
3933 intel_dp_complete_link_train(intel_dp);
3934 intel_dp_stop_link_train(intel_dp);
3935 }
3936
6f34cc39 3937 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3938 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3939
3940 if (handled) {
3941 for (retry = 0; retry < 3; retry++) {
3942 int wret;
3943 wret = drm_dp_dpcd_write(&intel_dp->aux,
3944 DP_SINK_COUNT_ESI+1,
3945 &esi[1], 3);
3946 if (wret == 3) {
3947 break;
3948 }
3949 }
3950
3951 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3952 if (bret == true) {
6f34cc39 3953 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3954 goto go_again;
3955 }
3956 } else
3957 ret = 0;
3958
3959 return ret;
3960 } else {
3961 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3962 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3963 intel_dp->is_mst = false;
3964 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3965 /* send a hotplug event */
3966 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3967 }
3968 }
3969 return -EINVAL;
3970}
3971
a4fc5ed6
KP
3972/*
3973 * According to DP spec
3974 * 5.1.2:
3975 * 1. Read DPCD
3976 * 2. Configure link according to Receiver Capabilities
3977 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3978 * 4. Check link status on receipt of hot-plug interrupt
3979 */
a5146200 3980static void
ea5b213a 3981intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 3982{
5b215bcf 3983 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 3984 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 3985 u8 sink_irq_vector;
93f62dad 3986 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 3987
5b215bcf
DA
3988 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3989
da63a9f2 3990 if (!intel_encoder->connectors_active)
d2b996ac 3991 return;
59cd09e1 3992
da63a9f2 3993 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
3994 return;
3995
1a125d8a
ID
3996 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3997 return;
3998
92fd8fd1 3999 /* Try to read receiver status if the link appears to be up */
93f62dad 4000 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4001 return;
4002 }
4003
92fd8fd1 4004 /* Now read the DPCD to see if it's actually running */
26d61aad 4005 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4006 return;
4007 }
4008
a60f0e38
JB
4009 /* Try to read the source of the interrupt */
4010 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4011 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4012 /* Clear interrupt source */
9d1a1031
JN
4013 drm_dp_dpcd_writeb(&intel_dp->aux,
4014 DP_DEVICE_SERVICE_IRQ_VECTOR,
4015 sink_irq_vector);
a60f0e38
JB
4016
4017 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4018 intel_dp_handle_test_request(intel_dp);
4019 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4020 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4021 }
4022
1ffdff13 4023 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4024 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4025 intel_encoder->base.name);
33a34e4e
JB
4026 intel_dp_start_link_train(intel_dp);
4027 intel_dp_complete_link_train(intel_dp);
3ab9c637 4028 intel_dp_stop_link_train(intel_dp);
33a34e4e 4029 }
a4fc5ed6 4030}
a4fc5ed6 4031
caf9ab24 4032/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4033static enum drm_connector_status
26d61aad 4034intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4035{
caf9ab24 4036 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4037 uint8_t type;
4038
4039 if (!intel_dp_get_dpcd(intel_dp))
4040 return connector_status_disconnected;
4041
4042 /* if there's no downstream port, we're done */
4043 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4044 return connector_status_connected;
caf9ab24
AJ
4045
4046 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4047 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4048 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4049 uint8_t reg;
9d1a1031
JN
4050
4051 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4052 &reg, 1) < 0)
caf9ab24 4053 return connector_status_unknown;
9d1a1031 4054
23235177
AJ
4055 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4056 : connector_status_disconnected;
caf9ab24
AJ
4057 }
4058
4059 /* If no HPD, poke DDC gently */
0b99836f 4060 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4061 return connector_status_connected;
caf9ab24
AJ
4062
4063 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4064 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4065 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4066 if (type == DP_DS_PORT_TYPE_VGA ||
4067 type == DP_DS_PORT_TYPE_NON_EDID)
4068 return connector_status_unknown;
4069 } else {
4070 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4071 DP_DWN_STRM_PORT_TYPE_MASK;
4072 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4073 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4074 return connector_status_unknown;
4075 }
caf9ab24
AJ
4076
4077 /* Anything else is out of spec, warn and ignore */
4078 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4079 return connector_status_disconnected;
71ba9000
AJ
4080}
4081
d410b56d
CW
4082static enum drm_connector_status
4083edp_detect(struct intel_dp *intel_dp)
4084{
4085 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4086 enum drm_connector_status status;
4087
4088 status = intel_panel_detect(dev);
4089 if (status == connector_status_unknown)
4090 status = connector_status_connected;
4091
4092 return status;
4093}
4094
5eb08b69 4095static enum drm_connector_status
a9756bb5 4096ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4097{
30add22d 4098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4099 struct drm_i915_private *dev_priv = dev->dev_private;
4100 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4101
1b469639
DL
4102 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4103 return connector_status_disconnected;
4104
26d61aad 4105 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4106}
4107
2a592bec
DA
4108static int g4x_digital_port_connected(struct drm_device *dev,
4109 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4110{
a4fc5ed6 4111 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4112 uint32_t bit;
5eb08b69 4113
232a6ee9
TP
4114 if (IS_VALLEYVIEW(dev)) {
4115 switch (intel_dig_port->port) {
4116 case PORT_B:
4117 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4118 break;
4119 case PORT_C:
4120 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4121 break;
4122 case PORT_D:
4123 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4124 break;
4125 default:
2a592bec 4126 return -EINVAL;
232a6ee9
TP
4127 }
4128 } else {
4129 switch (intel_dig_port->port) {
4130 case PORT_B:
4131 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4132 break;
4133 case PORT_C:
4134 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4135 break;
4136 case PORT_D:
4137 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4138 break;
4139 default:
2a592bec 4140 return -EINVAL;
232a6ee9 4141 }
a4fc5ed6
KP
4142 }
4143
10f76a38 4144 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4145 return 0;
4146 return 1;
4147}
4148
4149static enum drm_connector_status
4150g4x_dp_detect(struct intel_dp *intel_dp)
4151{
4152 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4153 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4154 int ret;
4155
4156 /* Can't disconnect eDP, but you can close the lid... */
4157 if (is_edp(intel_dp)) {
4158 enum drm_connector_status status;
4159
4160 status = intel_panel_detect(dev);
4161 if (status == connector_status_unknown)
4162 status = connector_status_connected;
4163 return status;
4164 }
4165
4166 ret = g4x_digital_port_connected(dev, intel_dig_port);
4167 if (ret == -EINVAL)
4168 return connector_status_unknown;
4169 else if (ret == 0)
a4fc5ed6
KP
4170 return connector_status_disconnected;
4171
26d61aad 4172 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4173}
4174
8c241fef 4175static struct edid *
beb60608 4176intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4177{
beb60608 4178 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4179
9cd300e0
JN
4180 /* use cached edid if we have one */
4181 if (intel_connector->edid) {
9cd300e0
JN
4182 /* invalid edid */
4183 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4184 return NULL;
4185
55e9edeb 4186 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4187 } else
4188 return drm_get_edid(&intel_connector->base,
4189 &intel_dp->aux.ddc);
4190}
8c241fef 4191
beb60608
CW
4192static void
4193intel_dp_set_edid(struct intel_dp *intel_dp)
4194{
4195 struct intel_connector *intel_connector = intel_dp->attached_connector;
4196 struct edid *edid;
8c241fef 4197
beb60608
CW
4198 edid = intel_dp_get_edid(intel_dp);
4199 intel_connector->detect_edid = edid;
4200
4201 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4202 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4203 else
4204 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4205}
4206
beb60608
CW
4207static void
4208intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4209{
beb60608 4210 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4211
beb60608
CW
4212 kfree(intel_connector->detect_edid);
4213 intel_connector->detect_edid = NULL;
9cd300e0 4214
beb60608
CW
4215 intel_dp->has_audio = false;
4216}
d6f24d0f 4217
beb60608
CW
4218static enum intel_display_power_domain
4219intel_dp_power_get(struct intel_dp *dp)
4220{
4221 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4222 enum intel_display_power_domain power_domain;
4223
4224 power_domain = intel_display_port_power_domain(encoder);
4225 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4226
4227 return power_domain;
4228}
d6f24d0f 4229
beb60608
CW
4230static void
4231intel_dp_power_put(struct intel_dp *dp,
4232 enum intel_display_power_domain power_domain)
4233{
4234 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4235 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4236}
4237
a9756bb5
ZW
4238static enum drm_connector_status
4239intel_dp_detect(struct drm_connector *connector, bool force)
4240{
4241 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4242 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4243 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4244 struct drm_device *dev = connector->dev;
a9756bb5 4245 enum drm_connector_status status;
671dedd2 4246 enum intel_display_power_domain power_domain;
0e32b39c 4247 bool ret;
a9756bb5 4248
164c8598 4249 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4250 connector->base.id, connector->name);
beb60608 4251 intel_dp_unset_edid(intel_dp);
164c8598 4252
0e32b39c
DA
4253 if (intel_dp->is_mst) {
4254 /* MST devices are disconnected from a monitor POV */
4255 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4256 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4257 return connector_status_disconnected;
0e32b39c
DA
4258 }
4259
beb60608 4260 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4261
d410b56d
CW
4262 /* Can't disconnect eDP, but you can close the lid... */
4263 if (is_edp(intel_dp))
4264 status = edp_detect(intel_dp);
4265 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4266 status = ironlake_dp_detect(intel_dp);
4267 else
4268 status = g4x_dp_detect(intel_dp);
4269 if (status != connector_status_connected)
c8c8fb33 4270 goto out;
a9756bb5 4271
0d198328
AJ
4272 intel_dp_probe_oui(intel_dp);
4273
0e32b39c
DA
4274 ret = intel_dp_probe_mst(intel_dp);
4275 if (ret) {
4276 /* if we are in MST mode then this connector
4277 won't appear connected or have anything with EDID on it */
4278 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4279 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4280 status = connector_status_disconnected;
4281 goto out;
4282 }
4283
beb60608 4284 intel_dp_set_edid(intel_dp);
a9756bb5 4285
d63885da
PZ
4286 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4287 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4288 status = connector_status_connected;
4289
4290out:
beb60608 4291 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4292 return status;
a4fc5ed6
KP
4293}
4294
beb60608
CW
4295static void
4296intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4297{
df0e9248 4298 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4299 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4300 enum intel_display_power_domain power_domain;
a4fc5ed6 4301
beb60608
CW
4302 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4303 connector->base.id, connector->name);
4304 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4305
beb60608
CW
4306 if (connector->status != connector_status_connected)
4307 return;
671dedd2 4308
beb60608
CW
4309 power_domain = intel_dp_power_get(intel_dp);
4310
4311 intel_dp_set_edid(intel_dp);
4312
4313 intel_dp_power_put(intel_dp, power_domain);
4314
4315 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4316 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4317}
4318
4319static int intel_dp_get_modes(struct drm_connector *connector)
4320{
4321 struct intel_connector *intel_connector = to_intel_connector(connector);
4322 struct edid *edid;
4323
4324 edid = intel_connector->detect_edid;
4325 if (edid) {
4326 int ret = intel_connector_update_modes(connector, edid);
4327 if (ret)
4328 return ret;
4329 }
32f9d658 4330
f8779fda 4331 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4332 if (is_edp(intel_attached_dp(connector)) &&
4333 intel_connector->panel.fixed_mode) {
f8779fda 4334 struct drm_display_mode *mode;
beb60608
CW
4335
4336 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4337 intel_connector->panel.fixed_mode);
f8779fda 4338 if (mode) {
32f9d658
ZW
4339 drm_mode_probed_add(connector, mode);
4340 return 1;
4341 }
4342 }
beb60608 4343
32f9d658 4344 return 0;
a4fc5ed6
KP
4345}
4346
1aad7ac0
CW
4347static bool
4348intel_dp_detect_audio(struct drm_connector *connector)
4349{
1aad7ac0 4350 bool has_audio = false;
beb60608 4351 struct edid *edid;
1aad7ac0 4352
beb60608
CW
4353 edid = to_intel_connector(connector)->detect_edid;
4354 if (edid)
1aad7ac0 4355 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4356
1aad7ac0
CW
4357 return has_audio;
4358}
4359
f684960e
CW
4360static int
4361intel_dp_set_property(struct drm_connector *connector,
4362 struct drm_property *property,
4363 uint64_t val)
4364{
e953fd7b 4365 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4366 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4367 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4368 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4369 int ret;
4370
662595df 4371 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4372 if (ret)
4373 return ret;
4374
3f43c48d 4375 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4376 int i = val;
4377 bool has_audio;
4378
4379 if (i == intel_dp->force_audio)
f684960e
CW
4380 return 0;
4381
1aad7ac0 4382 intel_dp->force_audio = i;
f684960e 4383
c3e5f67b 4384 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4385 has_audio = intel_dp_detect_audio(connector);
4386 else
c3e5f67b 4387 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4388
4389 if (has_audio == intel_dp->has_audio)
f684960e
CW
4390 return 0;
4391
1aad7ac0 4392 intel_dp->has_audio = has_audio;
f684960e
CW
4393 goto done;
4394 }
4395
e953fd7b 4396 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4397 bool old_auto = intel_dp->color_range_auto;
4398 uint32_t old_range = intel_dp->color_range;
4399
55bc60db
VS
4400 switch (val) {
4401 case INTEL_BROADCAST_RGB_AUTO:
4402 intel_dp->color_range_auto = true;
4403 break;
4404 case INTEL_BROADCAST_RGB_FULL:
4405 intel_dp->color_range_auto = false;
4406 intel_dp->color_range = 0;
4407 break;
4408 case INTEL_BROADCAST_RGB_LIMITED:
4409 intel_dp->color_range_auto = false;
4410 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4411 break;
4412 default:
4413 return -EINVAL;
4414 }
ae4edb80
DV
4415
4416 if (old_auto == intel_dp->color_range_auto &&
4417 old_range == intel_dp->color_range)
4418 return 0;
4419
e953fd7b
CW
4420 goto done;
4421 }
4422
53b41837
YN
4423 if (is_edp(intel_dp) &&
4424 property == connector->dev->mode_config.scaling_mode_property) {
4425 if (val == DRM_MODE_SCALE_NONE) {
4426 DRM_DEBUG_KMS("no scaling not supported\n");
4427 return -EINVAL;
4428 }
4429
4430 if (intel_connector->panel.fitting_mode == val) {
4431 /* the eDP scaling property is not changed */
4432 return 0;
4433 }
4434 intel_connector->panel.fitting_mode = val;
4435
4436 goto done;
4437 }
4438
f684960e
CW
4439 return -EINVAL;
4440
4441done:
c0c36b94
CW
4442 if (intel_encoder->base.crtc)
4443 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4444
4445 return 0;
4446}
4447
a4fc5ed6 4448static void
73845adf 4449intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4450{
1d508706 4451 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4452
10e972d3 4453 kfree(intel_connector->detect_edid);
beb60608 4454
9cd300e0
JN
4455 if (!IS_ERR_OR_NULL(intel_connector->edid))
4456 kfree(intel_connector->edid);
4457
acd8db10
PZ
4458 /* Can't call is_edp() since the encoder may have been destroyed
4459 * already. */
4460 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4461 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4462
a4fc5ed6 4463 drm_connector_cleanup(connector);
55f78c43 4464 kfree(connector);
a4fc5ed6
KP
4465}
4466
00c09d70 4467void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4468{
da63a9f2
PZ
4469 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4470 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4471
4f71d0cb 4472 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4473 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4474 if (is_edp(intel_dp)) {
4475 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4476 /*
4477 * vdd might still be enabled do to the delayed vdd off.
4478 * Make sure vdd is actually turned off here.
4479 */
773538e8 4480 pps_lock(intel_dp);
4be73780 4481 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4482 pps_unlock(intel_dp);
4483
01527b31
CT
4484 if (intel_dp->edp_notifier.notifier_call) {
4485 unregister_reboot_notifier(&intel_dp->edp_notifier);
4486 intel_dp->edp_notifier.notifier_call = NULL;
4487 }
bd943159 4488 }
c8bd0e49 4489 drm_encoder_cleanup(encoder);
da63a9f2 4490 kfree(intel_dig_port);
24d05927
DV
4491}
4492
07f9cd0b
ID
4493static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4494{
4495 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4496
4497 if (!is_edp(intel_dp))
4498 return;
4499
951468f3
VS
4500 /*
4501 * vdd might still be enabled do to the delayed vdd off.
4502 * Make sure vdd is actually turned off here.
4503 */
afa4e53a 4504 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4505 pps_lock(intel_dp);
07f9cd0b 4506 edp_panel_vdd_off_sync(intel_dp);
773538e8 4507 pps_unlock(intel_dp);
07f9cd0b
ID
4508}
4509
49e6bc51
VS
4510static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4511{
4512 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4513 struct drm_device *dev = intel_dig_port->base.base.dev;
4514 struct drm_i915_private *dev_priv = dev->dev_private;
4515 enum intel_display_power_domain power_domain;
4516
4517 lockdep_assert_held(&dev_priv->pps_mutex);
4518
4519 if (!edp_have_panel_vdd(intel_dp))
4520 return;
4521
4522 /*
4523 * The VDD bit needs a power domain reference, so if the bit is
4524 * already enabled when we boot or resume, grab this reference and
4525 * schedule a vdd off, so we don't hold on to the reference
4526 * indefinitely.
4527 */
4528 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4529 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4530 intel_display_power_get(dev_priv, power_domain);
4531
4532 edp_panel_vdd_schedule_off(intel_dp);
4533}
4534
6d93c0c4
ID
4535static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4536{
49e6bc51
VS
4537 struct intel_dp *intel_dp;
4538
4539 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4540 return;
4541
4542 intel_dp = enc_to_intel_dp(encoder);
4543
4544 pps_lock(intel_dp);
4545
4546 /*
4547 * Read out the current power sequencer assignment,
4548 * in case the BIOS did something with it.
4549 */
4550 if (IS_VALLEYVIEW(encoder->dev))
4551 vlv_initial_power_sequencer_setup(intel_dp);
4552
4553 intel_edp_panel_vdd_sanitize(intel_dp);
4554
4555 pps_unlock(intel_dp);
6d93c0c4
ID
4556}
4557
a4fc5ed6 4558static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4559 .dpms = intel_connector_dpms,
a4fc5ed6 4560 .detect = intel_dp_detect,
beb60608 4561 .force = intel_dp_force,
a4fc5ed6 4562 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4563 .set_property = intel_dp_set_property,
2545e4a6 4564 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4565 .destroy = intel_dp_connector_destroy,
c6f95f27 4566 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4567};
4568
4569static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4570 .get_modes = intel_dp_get_modes,
4571 .mode_valid = intel_dp_mode_valid,
df0e9248 4572 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4573};
4574
a4fc5ed6 4575static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4576 .reset = intel_dp_encoder_reset,
24d05927 4577 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4578};
4579
0e32b39c 4580void
21d40d37 4581intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4582{
0e32b39c 4583 return;
c8110e52 4584}
6207937d 4585
b2c5c181 4586enum irqreturn
13cf5504
DA
4587intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4588{
4589 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4590 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4591 struct drm_device *dev = intel_dig_port->base.base.dev;
4592 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4593 enum intel_display_power_domain power_domain;
b2c5c181 4594 enum irqreturn ret = IRQ_NONE;
1c767b33 4595
0e32b39c
DA
4596 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4597 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4598
7a7f84cc
VS
4599 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4600 /*
4601 * vdd off can generate a long pulse on eDP which
4602 * would require vdd on to handle it, and thus we
4603 * would end up in an endless cycle of
4604 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4605 */
4606 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4607 port_name(intel_dig_port->port));
a8b3d52f 4608 return IRQ_HANDLED;
7a7f84cc
VS
4609 }
4610
26fbb774
VS
4611 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4612 port_name(intel_dig_port->port),
0e32b39c 4613 long_hpd ? "long" : "short");
13cf5504 4614
1c767b33
ID
4615 power_domain = intel_display_port_power_domain(intel_encoder);
4616 intel_display_power_get(dev_priv, power_domain);
4617
0e32b39c 4618 if (long_hpd) {
2a592bec
DA
4619
4620 if (HAS_PCH_SPLIT(dev)) {
4621 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4622 goto mst_fail;
4623 } else {
4624 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4625 goto mst_fail;
4626 }
0e32b39c
DA
4627
4628 if (!intel_dp_get_dpcd(intel_dp)) {
4629 goto mst_fail;
4630 }
4631
4632 intel_dp_probe_oui(intel_dp);
4633
4634 if (!intel_dp_probe_mst(intel_dp))
4635 goto mst_fail;
4636
4637 } else {
4638 if (intel_dp->is_mst) {
1c767b33 4639 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4640 goto mst_fail;
4641 }
4642
4643 if (!intel_dp->is_mst) {
4644 /*
4645 * we'll check the link status via the normal hot plug path later -
4646 * but for short hpds we should check it now
4647 */
5b215bcf 4648 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4649 intel_dp_check_link_status(intel_dp);
5b215bcf 4650 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4651 }
4652 }
b2c5c181
DV
4653
4654 ret = IRQ_HANDLED;
4655
1c767b33 4656 goto put_power;
0e32b39c
DA
4657mst_fail:
4658 /* if we were in MST mode, and device is not there get out of MST mode */
4659 if (intel_dp->is_mst) {
4660 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4661 intel_dp->is_mst = false;
4662 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4663 }
1c767b33
ID
4664put_power:
4665 intel_display_power_put(dev_priv, power_domain);
4666
4667 return ret;
13cf5504
DA
4668}
4669
e3421a18
ZW
4670/* Return which DP Port should be selected for Transcoder DP control */
4671int
0206e353 4672intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4673{
4674 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4675 struct intel_encoder *intel_encoder;
4676 struct intel_dp *intel_dp;
e3421a18 4677
fa90ecef
PZ
4678 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4679 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4680
fa90ecef
PZ
4681 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4682 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4683 return intel_dp->output_reg;
e3421a18 4684 }
ea5b213a 4685
e3421a18
ZW
4686 return -1;
4687}
4688
36e83a18 4689/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4690bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4691{
4692 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4693 union child_device_config *p_child;
36e83a18 4694 int i;
5d8a7752
VS
4695 static const short port_mapping[] = {
4696 [PORT_B] = PORT_IDPB,
4697 [PORT_C] = PORT_IDPC,
4698 [PORT_D] = PORT_IDPD,
4699 };
36e83a18 4700
3b32a35b
VS
4701 if (port == PORT_A)
4702 return true;
4703
41aa3448 4704 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4705 return false;
4706
41aa3448
RV
4707 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4708 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4709
5d8a7752 4710 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4711 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4712 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4713 return true;
4714 }
4715 return false;
4716}
4717
0e32b39c 4718void
f684960e
CW
4719intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4720{
53b41837
YN
4721 struct intel_connector *intel_connector = to_intel_connector(connector);
4722
3f43c48d 4723 intel_attach_force_audio_property(connector);
e953fd7b 4724 intel_attach_broadcast_rgb_property(connector);
55bc60db 4725 intel_dp->color_range_auto = true;
53b41837
YN
4726
4727 if (is_edp(intel_dp)) {
4728 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4729 drm_object_attach_property(
4730 &connector->base,
53b41837 4731 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4732 DRM_MODE_SCALE_ASPECT);
4733 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4734 }
f684960e
CW
4735}
4736
dada1a9f
ID
4737static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4738{
4739 intel_dp->last_power_cycle = jiffies;
4740 intel_dp->last_power_on = jiffies;
4741 intel_dp->last_backlight_off = jiffies;
4742}
4743
67a54566
DV
4744static void
4745intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4746 struct intel_dp *intel_dp)
67a54566
DV
4747{
4748 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4749 struct edp_power_seq cur, vbt, spec,
4750 *final = &intel_dp->pps_delays;
67a54566 4751 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4752 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4753
e39b999a
VS
4754 lockdep_assert_held(&dev_priv->pps_mutex);
4755
81ddbc69
VS
4756 /* already initialized? */
4757 if (final->t11_t12 != 0)
4758 return;
4759
453c5420 4760 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4761 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4762 pp_on_reg = PCH_PP_ON_DELAYS;
4763 pp_off_reg = PCH_PP_OFF_DELAYS;
4764 pp_div_reg = PCH_PP_DIVISOR;
4765 } else {
bf13e81b
JN
4766 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4767
4768 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4769 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4770 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4771 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4772 }
67a54566
DV
4773
4774 /* Workaround: Need to write PP_CONTROL with the unlock key as
4775 * the very first thing. */
453c5420 4776 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4777 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4778
453c5420
JB
4779 pp_on = I915_READ(pp_on_reg);
4780 pp_off = I915_READ(pp_off_reg);
4781 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4782
4783 /* Pull timing values out of registers */
4784 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4785 PANEL_POWER_UP_DELAY_SHIFT;
4786
4787 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4788 PANEL_LIGHT_ON_DELAY_SHIFT;
4789
4790 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4791 PANEL_LIGHT_OFF_DELAY_SHIFT;
4792
4793 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4794 PANEL_POWER_DOWN_DELAY_SHIFT;
4795
4796 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4797 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4798
4799 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4800 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4801
41aa3448 4802 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4803
4804 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4805 * our hw here, which are all in 100usec. */
4806 spec.t1_t3 = 210 * 10;
4807 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4808 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4809 spec.t10 = 500 * 10;
4810 /* This one is special and actually in units of 100ms, but zero
4811 * based in the hw (so we need to add 100 ms). But the sw vbt
4812 * table multiplies it with 1000 to make it in units of 100usec,
4813 * too. */
4814 spec.t11_t12 = (510 + 100) * 10;
4815
4816 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4817 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4818
4819 /* Use the max of the register settings and vbt. If both are
4820 * unset, fall back to the spec limits. */
36b5f425 4821#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4822 spec.field : \
4823 max(cur.field, vbt.field))
4824 assign_final(t1_t3);
4825 assign_final(t8);
4826 assign_final(t9);
4827 assign_final(t10);
4828 assign_final(t11_t12);
4829#undef assign_final
4830
36b5f425 4831#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4832 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4833 intel_dp->backlight_on_delay = get_delay(t8);
4834 intel_dp->backlight_off_delay = get_delay(t9);
4835 intel_dp->panel_power_down_delay = get_delay(t10);
4836 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4837#undef get_delay
4838
f30d26e4
JN
4839 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4840 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4841 intel_dp->panel_power_cycle_delay);
4842
4843 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4844 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4845}
4846
4847static void
4848intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4849 struct intel_dp *intel_dp)
f30d26e4
JN
4850{
4851 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4852 u32 pp_on, pp_off, pp_div, port_sel = 0;
4853 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4854 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4855 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4856 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4857
e39b999a 4858 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4859
4860 if (HAS_PCH_SPLIT(dev)) {
4861 pp_on_reg = PCH_PP_ON_DELAYS;
4862 pp_off_reg = PCH_PP_OFF_DELAYS;
4863 pp_div_reg = PCH_PP_DIVISOR;
4864 } else {
bf13e81b
JN
4865 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4866
4867 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4868 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4869 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4870 }
4871
b2f19d1a
PZ
4872 /*
4873 * And finally store the new values in the power sequencer. The
4874 * backlight delays are set to 1 because we do manual waits on them. For
4875 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4876 * we'll end up waiting for the backlight off delay twice: once when we
4877 * do the manual sleep, and once when we disable the panel and wait for
4878 * the PP_STATUS bit to become zero.
4879 */
f30d26e4 4880 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4881 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4882 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4883 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4884 /* Compute the divisor for the pp clock, simply match the Bspec
4885 * formula. */
453c5420 4886 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4887 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4888 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4889
4890 /* Haswell doesn't have any port selection bits for the panel
4891 * power sequencer any more. */
bc7d38a4 4892 if (IS_VALLEYVIEW(dev)) {
ad933b56 4893 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4894 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4895 if (port == PORT_A)
a24c144c 4896 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4897 else
a24c144c 4898 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4899 }
4900
453c5420
JB
4901 pp_on |= port_sel;
4902
4903 I915_WRITE(pp_on_reg, pp_on);
4904 I915_WRITE(pp_off_reg, pp_off);
4905 I915_WRITE(pp_div_reg, pp_div);
67a54566 4906
67a54566 4907 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4908 I915_READ(pp_on_reg),
4909 I915_READ(pp_off_reg),
4910 I915_READ(pp_div_reg));
f684960e
CW
4911}
4912
b33a2815
VK
4913/**
4914 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4915 * @dev: DRM device
4916 * @refresh_rate: RR to be programmed
4917 *
4918 * This function gets called when refresh rate (RR) has to be changed from
4919 * one frequency to another. Switches can be between high and low RR
4920 * supported by the panel or to any other RR based on media playback (in
4921 * this case, RR value needs to be passed from user space).
4922 *
4923 * The caller of this function needs to take a lock on dev_priv->drrs.
4924 */
96178eeb 4925static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4926{
4927 struct drm_i915_private *dev_priv = dev->dev_private;
4928 struct intel_encoder *encoder;
96178eeb
VK
4929 struct intel_digital_port *dig_port = NULL;
4930 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4931 struct intel_crtc_state *config = NULL;
439d7ac0 4932 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4933 u32 reg, val;
96178eeb 4934 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4935
4936 if (refresh_rate <= 0) {
4937 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4938 return;
4939 }
4940
96178eeb
VK
4941 if (intel_dp == NULL) {
4942 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4943 return;
4944 }
4945
1fcc9d1c 4946 /*
e4d59f6b
RV
4947 * FIXME: This needs proper synchronization with psr state for some
4948 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4949 */
439d7ac0 4950
96178eeb
VK
4951 dig_port = dp_to_dig_port(intel_dp);
4952 encoder = &dig_port->base;
439d7ac0
PB
4953 intel_crtc = encoder->new_crtc;
4954
4955 if (!intel_crtc) {
4956 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4957 return;
4958 }
4959
6e3c9717 4960 config = intel_crtc->config;
439d7ac0 4961
96178eeb 4962 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
4963 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4964 return;
4965 }
4966
96178eeb
VK
4967 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4968 refresh_rate)
439d7ac0
PB
4969 index = DRRS_LOW_RR;
4970
96178eeb 4971 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
4972 DRM_DEBUG_KMS(
4973 "DRRS requested for previously set RR...ignoring\n");
4974 return;
4975 }
4976
4977 if (!intel_crtc->active) {
4978 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4979 return;
4980 }
4981
44395bfe 4982 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
4983 switch (index) {
4984 case DRRS_HIGH_RR:
4985 intel_dp_set_m_n(intel_crtc, M1_N1);
4986 break;
4987 case DRRS_LOW_RR:
4988 intel_dp_set_m_n(intel_crtc, M2_N2);
4989 break;
4990 case DRRS_MAX_RR:
4991 default:
4992 DRM_ERROR("Unsupported refreshrate type\n");
4993 }
4994 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 4995 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 4996 val = I915_READ(reg);
a4c30b1d 4997
439d7ac0 4998 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
4999 if (IS_VALLEYVIEW(dev))
5000 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5001 else
5002 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5003 } else {
6fa7aec1
VK
5004 if (IS_VALLEYVIEW(dev))
5005 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5006 else
5007 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5008 }
5009 I915_WRITE(reg, val);
5010 }
5011
4e9ac947
VK
5012 dev_priv->drrs.refresh_rate_type = index;
5013
5014 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5015}
5016
b33a2815
VK
5017/**
5018 * intel_edp_drrs_enable - init drrs struct if supported
5019 * @intel_dp: DP struct
5020 *
5021 * Initializes frontbuffer_bits and drrs.dp
5022 */
c395578e
VK
5023void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5024{
5025 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5026 struct drm_i915_private *dev_priv = dev->dev_private;
5027 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5028 struct drm_crtc *crtc = dig_port->base.base.crtc;
5029 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5030
5031 if (!intel_crtc->config->has_drrs) {
5032 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5033 return;
5034 }
5035
5036 mutex_lock(&dev_priv->drrs.mutex);
5037 if (WARN_ON(dev_priv->drrs.dp)) {
5038 DRM_ERROR("DRRS already enabled\n");
5039 goto unlock;
5040 }
5041
5042 dev_priv->drrs.busy_frontbuffer_bits = 0;
5043
5044 dev_priv->drrs.dp = intel_dp;
5045
5046unlock:
5047 mutex_unlock(&dev_priv->drrs.mutex);
5048}
5049
b33a2815
VK
5050/**
5051 * intel_edp_drrs_disable - Disable DRRS
5052 * @intel_dp: DP struct
5053 *
5054 */
c395578e
VK
5055void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5056{
5057 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5058 struct drm_i915_private *dev_priv = dev->dev_private;
5059 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5060 struct drm_crtc *crtc = dig_port->base.base.crtc;
5061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5062
5063 if (!intel_crtc->config->has_drrs)
5064 return;
5065
5066 mutex_lock(&dev_priv->drrs.mutex);
5067 if (!dev_priv->drrs.dp) {
5068 mutex_unlock(&dev_priv->drrs.mutex);
5069 return;
5070 }
5071
5072 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5073 intel_dp_set_drrs_state(dev_priv->dev,
5074 intel_dp->attached_connector->panel.
5075 fixed_mode->vrefresh);
5076
5077 dev_priv->drrs.dp = NULL;
5078 mutex_unlock(&dev_priv->drrs.mutex);
5079
5080 cancel_delayed_work_sync(&dev_priv->drrs.work);
5081}
5082
4e9ac947
VK
5083static void intel_edp_drrs_downclock_work(struct work_struct *work)
5084{
5085 struct drm_i915_private *dev_priv =
5086 container_of(work, typeof(*dev_priv), drrs.work.work);
5087 struct intel_dp *intel_dp;
5088
5089 mutex_lock(&dev_priv->drrs.mutex);
5090
5091 intel_dp = dev_priv->drrs.dp;
5092
5093 if (!intel_dp)
5094 goto unlock;
5095
439d7ac0 5096 /*
4e9ac947
VK
5097 * The delayed work can race with an invalidate hence we need to
5098 * recheck.
439d7ac0
PB
5099 */
5100
4e9ac947
VK
5101 if (dev_priv->drrs.busy_frontbuffer_bits)
5102 goto unlock;
439d7ac0 5103
4e9ac947
VK
5104 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5105 intel_dp_set_drrs_state(dev_priv->dev,
5106 intel_dp->attached_connector->panel.
5107 downclock_mode->vrefresh);
439d7ac0 5108
4e9ac947 5109unlock:
439d7ac0 5110
4e9ac947 5111 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5112}
5113
b33a2815
VK
5114/**
5115 * intel_edp_drrs_invalidate - Invalidate DRRS
5116 * @dev: DRM device
5117 * @frontbuffer_bits: frontbuffer plane tracking bits
5118 *
5119 * When there is a disturbance on screen (due to cursor movement/time
5120 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5121 * high RR.
5122 *
5123 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5124 */
a93fad0f
VK
5125void intel_edp_drrs_invalidate(struct drm_device *dev,
5126 unsigned frontbuffer_bits)
5127{
5128 struct drm_i915_private *dev_priv = dev->dev_private;
5129 struct drm_crtc *crtc;
5130 enum pipe pipe;
5131
5132 if (!dev_priv->drrs.dp)
5133 return;
5134
3954e733
R
5135 cancel_delayed_work_sync(&dev_priv->drrs.work);
5136
a93fad0f
VK
5137 mutex_lock(&dev_priv->drrs.mutex);
5138 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5139 pipe = to_intel_crtc(crtc)->pipe;
5140
5141 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5142 intel_dp_set_drrs_state(dev_priv->dev,
5143 dev_priv->drrs.dp->attached_connector->panel.
5144 fixed_mode->vrefresh);
5145 }
5146
5147 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5148
5149 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5150 mutex_unlock(&dev_priv->drrs.mutex);
5151}
5152
b33a2815
VK
5153/**
5154 * intel_edp_drrs_flush - Flush DRRS
5155 * @dev: DRM device
5156 * @frontbuffer_bits: frontbuffer plane tracking bits
5157 *
5158 * When there is no movement on screen, DRRS work can be scheduled.
5159 * This DRRS work is responsible for setting relevant registers after a
5160 * timeout of 1 second.
5161 *
5162 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5163 */
a93fad0f
VK
5164void intel_edp_drrs_flush(struct drm_device *dev,
5165 unsigned frontbuffer_bits)
5166{
5167 struct drm_i915_private *dev_priv = dev->dev_private;
5168 struct drm_crtc *crtc;
5169 enum pipe pipe;
5170
5171 if (!dev_priv->drrs.dp)
5172 return;
5173
3954e733
R
5174 cancel_delayed_work_sync(&dev_priv->drrs.work);
5175
a93fad0f
VK
5176 mutex_lock(&dev_priv->drrs.mutex);
5177 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5178 pipe = to_intel_crtc(crtc)->pipe;
5179 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5180
a93fad0f
VK
5181 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5182 !dev_priv->drrs.busy_frontbuffer_bits)
5183 schedule_delayed_work(&dev_priv->drrs.work,
5184 msecs_to_jiffies(1000));
5185 mutex_unlock(&dev_priv->drrs.mutex);
5186}
5187
b33a2815
VK
5188/**
5189 * DOC: Display Refresh Rate Switching (DRRS)
5190 *
5191 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5192 * which enables swtching between low and high refresh rates,
5193 * dynamically, based on the usage scenario. This feature is applicable
5194 * for internal panels.
5195 *
5196 * Indication that the panel supports DRRS is given by the panel EDID, which
5197 * would list multiple refresh rates for one resolution.
5198 *
5199 * DRRS is of 2 types - static and seamless.
5200 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5201 * (may appear as a blink on screen) and is used in dock-undock scenario.
5202 * Seamless DRRS involves changing RR without any visual effect to the user
5203 * and can be used during normal system usage. This is done by programming
5204 * certain registers.
5205 *
5206 * Support for static/seamless DRRS may be indicated in the VBT based on
5207 * inputs from the panel spec.
5208 *
5209 * DRRS saves power by switching to low RR based on usage scenarios.
5210 *
5211 * eDP DRRS:-
5212 * The implementation is based on frontbuffer tracking implementation.
5213 * When there is a disturbance on the screen triggered by user activity or a
5214 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5215 * When there is no movement on screen, after a timeout of 1 second, a switch
5216 * to low RR is made.
5217 * For integration with frontbuffer tracking code,
5218 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5219 *
5220 * DRRS can be further extended to support other internal panels and also
5221 * the scenario of video playback wherein RR is set based on the rate
5222 * requested by userspace.
5223 */
5224
5225/**
5226 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5227 * @intel_connector: eDP connector
5228 * @fixed_mode: preferred mode of panel
5229 *
5230 * This function is called only once at driver load to initialize basic
5231 * DRRS stuff.
5232 *
5233 * Returns:
5234 * Downclock mode if panel supports it, else return NULL.
5235 * DRRS support is determined by the presence of downclock mode (apart
5236 * from VBT setting).
5237 */
4f9db5b5 5238static struct drm_display_mode *
96178eeb
VK
5239intel_dp_drrs_init(struct intel_connector *intel_connector,
5240 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5241{
5242 struct drm_connector *connector = &intel_connector->base;
96178eeb 5243 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5244 struct drm_i915_private *dev_priv = dev->dev_private;
5245 struct drm_display_mode *downclock_mode = NULL;
5246
5247 if (INTEL_INFO(dev)->gen <= 6) {
5248 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5249 return NULL;
5250 }
5251
5252 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5253 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5254 return NULL;
5255 }
5256
5257 downclock_mode = intel_find_panel_downclock
5258 (dev, fixed_mode, connector);
5259
5260 if (!downclock_mode) {
a1d26342 5261 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5262 return NULL;
5263 }
5264
4e9ac947
VK
5265 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5266
96178eeb 5267 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5268
96178eeb 5269 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5270
96178eeb 5271 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5272 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5273 return downclock_mode;
5274}
5275
ed92f0b2 5276static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5277 struct intel_connector *intel_connector)
ed92f0b2
PZ
5278{
5279 struct drm_connector *connector = &intel_connector->base;
5280 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5281 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5282 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5283 struct drm_i915_private *dev_priv = dev->dev_private;
5284 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5285 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5286 bool has_dpcd;
5287 struct drm_display_mode *scan;
5288 struct edid *edid;
6517d273 5289 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5290
96178eeb 5291 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5292
ed92f0b2
PZ
5293 if (!is_edp(intel_dp))
5294 return true;
5295
49e6bc51
VS
5296 pps_lock(intel_dp);
5297 intel_edp_panel_vdd_sanitize(intel_dp);
5298 pps_unlock(intel_dp);
63635217 5299
ed92f0b2 5300 /* Cache DPCD and EDID for edp. */
ed92f0b2 5301 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5302
5303 if (has_dpcd) {
5304 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5305 dev_priv->no_aux_handshake =
5306 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5307 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5308 } else {
5309 /* if this fails, presume the device is a ghost */
5310 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5311 return false;
5312 }
5313
5314 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5315 pps_lock(intel_dp);
36b5f425 5316 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5317 pps_unlock(intel_dp);
ed92f0b2 5318
060c8778 5319 mutex_lock(&dev->mode_config.mutex);
0b99836f 5320 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5321 if (edid) {
5322 if (drm_add_edid_modes(connector, edid)) {
5323 drm_mode_connector_update_edid_property(connector,
5324 edid);
5325 drm_edid_to_eld(connector, edid);
5326 } else {
5327 kfree(edid);
5328 edid = ERR_PTR(-EINVAL);
5329 }
5330 } else {
5331 edid = ERR_PTR(-ENOENT);
5332 }
5333 intel_connector->edid = edid;
5334
5335 /* prefer fixed mode from EDID if available */
5336 list_for_each_entry(scan, &connector->probed_modes, head) {
5337 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5338 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5339 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5340 intel_connector, fixed_mode);
ed92f0b2
PZ
5341 break;
5342 }
5343 }
5344
5345 /* fallback to VBT if available for eDP */
5346 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5347 fixed_mode = drm_mode_duplicate(dev,
5348 dev_priv->vbt.lfp_lvds_vbt_mode);
5349 if (fixed_mode)
5350 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5351 }
060c8778 5352 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5353
01527b31
CT
5354 if (IS_VALLEYVIEW(dev)) {
5355 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5356 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5357
5358 /*
5359 * Figure out the current pipe for the initial backlight setup.
5360 * If the current pipe isn't valid, try the PPS pipe, and if that
5361 * fails just assume pipe A.
5362 */
5363 if (IS_CHERRYVIEW(dev))
5364 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5365 else
5366 pipe = PORT_TO_PIPE(intel_dp->DP);
5367
5368 if (pipe != PIPE_A && pipe != PIPE_B)
5369 pipe = intel_dp->pps_pipe;
5370
5371 if (pipe != PIPE_A && pipe != PIPE_B)
5372 pipe = PIPE_A;
5373
5374 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5375 pipe_name(pipe));
01527b31
CT
5376 }
5377
4f9db5b5 5378 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5379 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5380 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5381
5382 return true;
5383}
5384
16c25533 5385bool
f0fec3f2
PZ
5386intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5387 struct intel_connector *intel_connector)
a4fc5ed6 5388{
f0fec3f2
PZ
5389 struct drm_connector *connector = &intel_connector->base;
5390 struct intel_dp *intel_dp = &intel_dig_port->dp;
5391 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5392 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5393 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5394 enum port port = intel_dig_port->port;
0b99836f 5395 int type;
a4fc5ed6 5396
a4a5d2f8
VS
5397 intel_dp->pps_pipe = INVALID_PIPE;
5398
ec5b01dd 5399 /* intel_dp vfuncs */
b6b5e383
DL
5400 if (INTEL_INFO(dev)->gen >= 9)
5401 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5402 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5403 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5404 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5405 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5406 else if (HAS_PCH_SPLIT(dev))
5407 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5408 else
5409 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5410
b9ca5fad
DL
5411 if (INTEL_INFO(dev)->gen >= 9)
5412 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5413 else
5414 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5415
0767935e
DV
5416 /* Preserve the current hw state. */
5417 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5418 intel_dp->attached_connector = intel_connector;
3d3dc149 5419
3b32a35b 5420 if (intel_dp_is_edp(dev, port))
b329530c 5421 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5422 else
5423 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5424
f7d24902
ID
5425 /*
5426 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5427 * for DP the encoder type can be set by the caller to
5428 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5429 */
5430 if (type == DRM_MODE_CONNECTOR_eDP)
5431 intel_encoder->type = INTEL_OUTPUT_EDP;
5432
c17ed5b5
VS
5433 /* eDP only on port B and/or C on vlv/chv */
5434 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5435 port != PORT_B && port != PORT_C))
5436 return false;
5437
e7281eab
ID
5438 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5439 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5440 port_name(port));
5441
b329530c 5442 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5443 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5444
a4fc5ed6
KP
5445 connector->interlace_allowed = true;
5446 connector->doublescan_allowed = 0;
5447
f0fec3f2 5448 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5449 edp_panel_vdd_work);
a4fc5ed6 5450
df0e9248 5451 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5452 drm_connector_register(connector);
a4fc5ed6 5453
affa9354 5454 if (HAS_DDI(dev))
bcbc889b
PZ
5455 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5456 else
5457 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5458 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5459
0b99836f 5460 /* Set up the hotplug pin. */
ab9d7c30
PZ
5461 switch (port) {
5462 case PORT_A:
1d843f9d 5463 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5464 break;
5465 case PORT_B:
1d843f9d 5466 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5467 break;
5468 case PORT_C:
1d843f9d 5469 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5470 break;
5471 case PORT_D:
1d843f9d 5472 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5473 break;
5474 default:
ad1c0b19 5475 BUG();
5eb08b69
ZW
5476 }
5477
dada1a9f 5478 if (is_edp(intel_dp)) {
773538e8 5479 pps_lock(intel_dp);
1e74a324
VS
5480 intel_dp_init_panel_power_timestamps(intel_dp);
5481 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5482 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5483 else
36b5f425 5484 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5485 pps_unlock(intel_dp);
dada1a9f 5486 }
0095e6dc 5487
9d1a1031 5488 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5489
0e32b39c 5490 /* init MST on ports that can support it */
c86ea3d0 5491 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5492 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5493 intel_dp_mst_encoder_init(intel_dig_port,
5494 intel_connector->base.base.id);
0e32b39c
DA
5495 }
5496 }
5497
36b5f425 5498 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5499 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5500 if (is_edp(intel_dp)) {
5501 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5502 /*
5503 * vdd might still be enabled do to the delayed vdd off.
5504 * Make sure vdd is actually turned off here.
5505 */
773538e8 5506 pps_lock(intel_dp);
4be73780 5507 edp_panel_vdd_off_sync(intel_dp);
773538e8 5508 pps_unlock(intel_dp);
15b1d171 5509 }
34ea3d38 5510 drm_connector_unregister(connector);
b2f246a8 5511 drm_connector_cleanup(connector);
16c25533 5512 return false;
b2f246a8 5513 }
32f9d658 5514
f684960e
CW
5515 intel_dp_add_properties(intel_dp, connector);
5516
a4fc5ed6
KP
5517 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5518 * 0xd. Failure to do so will result in spurious interrupts being
5519 * generated on the port when a cable is not attached.
5520 */
5521 if (IS_G4X(dev) && !IS_GM45(dev)) {
5522 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5523 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5524 }
16c25533
PZ
5525
5526 return true;
a4fc5ed6 5527}
f0fec3f2
PZ
5528
5529void
5530intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5531{
13cf5504 5532 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5533 struct intel_digital_port *intel_dig_port;
5534 struct intel_encoder *intel_encoder;
5535 struct drm_encoder *encoder;
5536 struct intel_connector *intel_connector;
5537
b14c5679 5538 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5539 if (!intel_dig_port)
5540 return;
5541
b14c5679 5542 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5543 if (!intel_connector) {
5544 kfree(intel_dig_port);
5545 return;
5546 }
5547
5548 intel_encoder = &intel_dig_port->base;
5549 encoder = &intel_encoder->base;
5550
5551 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5552 DRM_MODE_ENCODER_TMDS);
5553
5bfe2ac0 5554 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5555 intel_encoder->disable = intel_disable_dp;
00c09d70 5556 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5557 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5558 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5559 if (IS_CHERRYVIEW(dev)) {
9197c88b 5560 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5561 intel_encoder->pre_enable = chv_pre_enable_dp;
5562 intel_encoder->enable = vlv_enable_dp;
580d3811 5563 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5564 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5565 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5566 intel_encoder->pre_enable = vlv_pre_enable_dp;
5567 intel_encoder->enable = vlv_enable_dp;
49277c31 5568 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5569 } else {
ecff4f3b
JN
5570 intel_encoder->pre_enable = g4x_pre_enable_dp;
5571 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5572 if (INTEL_INFO(dev)->gen >= 5)
5573 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5574 }
f0fec3f2 5575
174edf1f 5576 intel_dig_port->port = port;
f0fec3f2
PZ
5577 intel_dig_port->dp.output_reg = output_reg;
5578
00c09d70 5579 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5580 if (IS_CHERRYVIEW(dev)) {
5581 if (port == PORT_D)
5582 intel_encoder->crtc_mask = 1 << 2;
5583 else
5584 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5585 } else {
5586 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5587 }
bc079e8b 5588 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5589 intel_encoder->hot_plug = intel_dp_hot_plug;
5590
13cf5504
DA
5591 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5592 dev_priv->hpd_irq_port[port] = intel_dig_port;
5593
15b1d171
PZ
5594 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5595 drm_encoder_cleanup(encoder);
5596 kfree(intel_dig_port);
b2f246a8 5597 kfree(intel_connector);
15b1d171 5598 }
f0fec3f2 5599}
0e32b39c
DA
5600
5601void intel_dp_mst_suspend(struct drm_device *dev)
5602{
5603 struct drm_i915_private *dev_priv = dev->dev_private;
5604 int i;
5605
5606 /* disable MST */
5607 for (i = 0; i < I915_MAX_PORTS; i++) {
5608 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5609 if (!intel_dig_port)
5610 continue;
5611
5612 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5613 if (!intel_dig_port->dp.can_mst)
5614 continue;
5615 if (intel_dig_port->dp.is_mst)
5616 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5617 }
5618 }
5619}
5620
5621void intel_dp_mst_resume(struct drm_device *dev)
5622{
5623 struct drm_i915_private *dev_priv = dev->dev_private;
5624 int i;
5625
5626 for (i = 0; i < I915_MAX_PORTS; i++) {
5627 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5628 if (!intel_dig_port)
5629 continue;
5630 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5631 int ret;
5632
5633 if (!intel_dig_port->dp.can_mst)
5634 continue;
5635
5636 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5637 if (ret != 0) {
5638 intel_dp_check_mst_status(&intel_dig_port->dp);
5639 }
5640 }
5641 }
5642}