]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Make debugfs/i915_gem_request more friendly
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
fe51bfb9
VS
90static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
f4896f15 93static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 94
cfcb0fc9
JB
95/**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102static bool is_edp(struct intel_dp *intel_dp)
103{
da63a9f2
PZ
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
107}
108
68b4d824 109static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 110{
68b4d824
ID
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
114}
115
df0e9248
CW
116static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117{
fa90ecef 118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
119}
120
ea5b213a 121static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 122static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 123static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 124static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
125static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
a4fc5ed6 127
ed4e9c1d
VS
128static int
129intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 130{
7183dc29 131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
1db10e28 136 case DP_LINK_BW_5_4:
d4eead50 137 break;
a4fc5ed6 138 default:
d4eead50
ID
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
a4fc5ed6
KP
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145}
146
eeb6324d
PZ
147static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148{
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161}
162
cd9dde44
AJ
163/*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
a4fc5ed6 180static int
c898261c 181intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 182{
cd9dde44 183 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
184}
185
fe27d53e
DA
186static int
187intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188{
189 return (max_link_clock * max_lanes * 8) / 10;
190}
191
c19de8eb 192static enum drm_mode_status
a4fc5ed6
KP
193intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195{
df0e9248 196 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 201
dd06f90e
JN
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
204 return MODE_PANEL;
205
dd06f90e 206 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 207 return MODE_PANEL;
03afc4a2
DV
208
209 target_clock = fixed_mode->clock;
7de56f43
ZY
210 }
211
50fec21a 212 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 213 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
c4867936 219 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
0af78a2b
DV
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
a4fc5ed6
KP
227 return MODE_OK;
228}
229
a4f1289e 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
231{
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240}
241
c2af70e2 242static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
243{
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249}
250
fb0f8fbf
KP
251/* hrawclock is 1/4 the FSB frequency */
252static int
253intel_hrawclk(struct drm_device *dev)
254{
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
9473c8f4
VP
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
fb0f8fbf
KP
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283}
284
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b
JN
288static void
289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 290 struct intel_dp *intel_dp);
bf13e81b 291
773538e8
VS
292static void pps_lock(struct intel_dp *intel_dp)
293{
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308}
309
310static void pps_unlock(struct intel_dp *intel_dp)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322}
323
961a0db0
VS
324static void
325vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326{
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 331 bool pll_enabled;
961a0db0
VS
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
d288f65f
VS
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
961a0db0
VS
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
961a0db0
VS
382}
383
bf13e81b
JN
384static enum pipe
385vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386{
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 392 enum pipe pipe;
bf13e81b 393
e39b999a 394 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 395
a8c3344e
VS
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
a4a5d2f8
VS
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
a8c3344e
VS
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
a4a5d2f8 427
a8c3344e
VS
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
36b5f425
VS
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 438
961a0db0
VS
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
444
445 return intel_dp->pps_pipe;
446}
447
6491ab27
VS
448typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453{
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455}
456
457static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461}
462
463static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return true;
467}
bf13e81b 468
a4a5d2f8 469static enum pipe
6491ab27
VS
470vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
a4a5d2f8
VS
473{
474 enum pipe pipe;
bf13e81b 475
bf13e81b
JN
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
6491ab27
VS
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
a4a5d2f8 486 return pipe;
bf13e81b
JN
487 }
488
a4a5d2f8
VS
489 return INVALID_PIPE;
490}
491
492static void
493vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494{
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
6491ab27
VS
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
a4a5d2f8
VS
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
bf13e81b
JN
520 }
521
a4a5d2f8
VS
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
36b5f425
VS
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
527}
528
773538e8
VS
529void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530{
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
bf13e81b
JN
556}
557
558static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566}
567
568static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569{
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576}
577
01527b31
CT
578/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582{
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
773538e8 593 pps_lock(intel_dp);
e39b999a 594
01527b31 595 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
01527b31
CT
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
773538e8 609 pps_unlock(intel_dp);
e39b999a 610
01527b31
CT
611 return 0;
612}
613
4be73780 614static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 615{
30add22d 616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
e39b999a
VS
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
9a42356b
VS
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
bf13e81b 625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
626}
627
4be73780 628static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 629{
30add22d 630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
e39b999a
VS
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
9a42356b
VS
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
773538e8 639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
640}
641
9b984dae
KP
642static void
643intel_dp_check_edp(struct intel_dp *intel_dp)
644{
30add22d 645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 646 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 647
9b984dae
KP
648 if (!is_edp(intel_dp))
649 return;
453c5420 650
4be73780 651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
656 }
657}
658
9ee32fea
DV
659static uint32_t
660intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661{
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
666 uint32_t status;
667 bool done;
668
ef04f00d 669#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 670 if (has_aux_irq)
b18ac466 671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 672 msecs_to_jiffies_timeout(10));
9ee32fea
DV
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678#undef C
679
680 return status;
681}
682
ec5b01dd 683static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 684{
174edf1f
PZ
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 687
ec5b01dd
DL
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 691 */
ec5b01dd
DL
692 return index ? 0 : intel_hrawclk(dev) / 2;
693}
694
695static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 699 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
700
701 if (index)
702 return 0;
703
704 if (intel_dig_port->port == PORT_A) {
469d4b2a 705 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
ec5b01dd
DL
706 } else {
707 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
708 }
709}
710
711static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
712{
713 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
714 struct drm_device *dev = intel_dig_port->base.base.dev;
715 struct drm_i915_private *dev_priv = dev->dev_private;
716
717 if (intel_dig_port->port == PORT_A) {
718 if (index)
719 return 0;
1652d19e 720 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
2c55c336
JN
721 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
722 /* Workaround for non-ULT HSW */
bc86625a
CW
723 switch (index) {
724 case 0: return 63;
725 case 1: return 72;
726 default: return 0;
727 }
ec5b01dd 728 } else {
bc86625a 729 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 730 }
b84a1cf8
RV
731}
732
ec5b01dd
DL
733static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
734{
735 return index ? 0 : 100;
736}
737
b6b5e383
DL
738static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
739{
740 /*
741 * SKL doesn't need us to program the AUX clock divider (Hardware will
742 * derive the clock from CDCLK automatically). We still implement the
743 * get_aux_clock_divider vfunc to plug-in into the existing code.
744 */
745 return index ? 0 : 1;
746}
747
5ed12a19
DL
748static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
749 bool has_aux_irq,
750 int send_bytes,
751 uint32_t aux_clock_divider)
752{
753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
754 struct drm_device *dev = intel_dig_port->base.base.dev;
755 uint32_t precharge, timeout;
756
757 if (IS_GEN6(dev))
758 precharge = 3;
759 else
760 precharge = 5;
761
762 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
763 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
764 else
765 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
766
767 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 768 DP_AUX_CH_CTL_DONE |
5ed12a19 769 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 770 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 771 timeout |
788d4433 772 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
773 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
774 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 775 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
776}
777
b9ca5fad
DL
778static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
779 bool has_aux_irq,
780 int send_bytes,
781 uint32_t unused)
782{
783 return DP_AUX_CH_CTL_SEND_BUSY |
784 DP_AUX_CH_CTL_DONE |
785 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
786 DP_AUX_CH_CTL_TIME_OUT_ERROR |
787 DP_AUX_CH_CTL_TIME_OUT_1600us |
788 DP_AUX_CH_CTL_RECEIVE_ERROR |
789 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
790 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
791}
792
b84a1cf8
RV
793static int
794intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 795 const uint8_t *send, int send_bytes,
b84a1cf8
RV
796 uint8_t *recv, int recv_size)
797{
798 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
799 struct drm_device *dev = intel_dig_port->base.base.dev;
800 struct drm_i915_private *dev_priv = dev->dev_private;
801 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
802 uint32_t ch_data = ch_ctl + 4;
bc86625a 803 uint32_t aux_clock_divider;
b84a1cf8
RV
804 int i, ret, recv_bytes;
805 uint32_t status;
5ed12a19 806 int try, clock = 0;
4e6b788c 807 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
808 bool vdd;
809
773538e8 810 pps_lock(intel_dp);
e39b999a 811
72c3500a
VS
812 /*
813 * We will be called with VDD already enabled for dpcd/edid/oui reads.
814 * In such cases we want to leave VDD enabled and it's up to upper layers
815 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
816 * ourselves.
817 */
1e0560e0 818 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
819
820 /* dp aux is extremely sensitive to irq latency, hence request the
821 * lowest possible wakeup latency and so prevent the cpu from going into
822 * deep sleep states.
823 */
824 pm_qos_update_request(&dev_priv->pm_qos, 0);
825
826 intel_dp_check_edp(intel_dp);
5eb08b69 827
c67a470b
PZ
828 intel_aux_display_runtime_get(dev_priv);
829
11bee43e
JB
830 /* Try to wait for any previous AUX channel activity */
831 for (try = 0; try < 3; try++) {
ef04f00d 832 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
833 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
834 break;
835 msleep(1);
836 }
837
838 if (try == 3) {
839 WARN(1, "dp_aux_ch not started status 0x%08x\n",
840 I915_READ(ch_ctl));
9ee32fea
DV
841 ret = -EBUSY;
842 goto out;
4f7f7b7e
CW
843 }
844
46a5ae9f
PZ
845 /* Only 5 data registers! */
846 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
847 ret = -E2BIG;
848 goto out;
849 }
850
ec5b01dd 851 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
852 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
853 has_aux_irq,
854 send_bytes,
855 aux_clock_divider);
5ed12a19 856
bc86625a
CW
857 /* Must try at least 3 times according to DP spec */
858 for (try = 0; try < 5; try++) {
859 /* Load the send data into the aux channel data registers */
860 for (i = 0; i < send_bytes; i += 4)
861 I915_WRITE(ch_data + i,
a4f1289e
RV
862 intel_dp_pack_aux(send + i,
863 send_bytes - i));
bc86625a
CW
864
865 /* Send the command and wait for it to complete */
5ed12a19 866 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
867
868 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
869
870 /* Clear done status and any errors */
871 I915_WRITE(ch_ctl,
872 status |
873 DP_AUX_CH_CTL_DONE |
874 DP_AUX_CH_CTL_TIME_OUT_ERROR |
875 DP_AUX_CH_CTL_RECEIVE_ERROR);
876
877 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
878 DP_AUX_CH_CTL_RECEIVE_ERROR))
879 continue;
880 if (status & DP_AUX_CH_CTL_DONE)
881 break;
882 }
4f7f7b7e 883 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
884 break;
885 }
886
a4fc5ed6 887 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 888 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
889 ret = -EBUSY;
890 goto out;
a4fc5ed6
KP
891 }
892
893 /* Check for timeout or receive error.
894 * Timeouts occur when the sink is not connected
895 */
a5b3da54 896 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 897 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
898 ret = -EIO;
899 goto out;
a5b3da54 900 }
1ae8c0a5
KP
901
902 /* Timeouts occur when the device isn't connected, so they're
903 * "normal" -- don't fill the kernel log with these */
a5b3da54 904 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 905 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
906 ret = -ETIMEDOUT;
907 goto out;
a4fc5ed6
KP
908 }
909
910 /* Unload any bytes sent back from the other side */
911 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
912 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
913 if (recv_bytes > recv_size)
914 recv_bytes = recv_size;
0206e353 915
4f7f7b7e 916 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
917 intel_dp_unpack_aux(I915_READ(ch_data + i),
918 recv + i, recv_bytes - i);
a4fc5ed6 919
9ee32fea
DV
920 ret = recv_bytes;
921out:
922 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 923 intel_aux_display_runtime_put(dev_priv);
9ee32fea 924
884f19e9
JN
925 if (vdd)
926 edp_panel_vdd_off(intel_dp, false);
927
773538e8 928 pps_unlock(intel_dp);
e39b999a 929
9ee32fea 930 return ret;
a4fc5ed6
KP
931}
932
a6c8aff0
JN
933#define BARE_ADDRESS_SIZE 3
934#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
935static ssize_t
936intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 937{
9d1a1031
JN
938 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
939 uint8_t txbuf[20], rxbuf[20];
940 size_t txsize, rxsize;
a4fc5ed6 941 int ret;
a4fc5ed6 942
d2d9cbbd
VS
943 txbuf[0] = (msg->request << 4) |
944 ((msg->address >> 16) & 0xf);
945 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
946 txbuf[2] = msg->address & 0xff;
947 txbuf[3] = msg->size - 1;
46a5ae9f 948
9d1a1031
JN
949 switch (msg->request & ~DP_AUX_I2C_MOT) {
950 case DP_AUX_NATIVE_WRITE:
951 case DP_AUX_I2C_WRITE:
a6c8aff0 952 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 953 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 954
9d1a1031
JN
955 if (WARN_ON(txsize > 20))
956 return -E2BIG;
a4fc5ed6 957
9d1a1031 958 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 959
9d1a1031
JN
960 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
961 if (ret > 0) {
962 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 963
a1ddefd8
JN
964 if (ret > 1) {
965 /* Number of bytes written in a short write. */
966 ret = clamp_t(int, rxbuf[1], 0, msg->size);
967 } else {
968 /* Return payload size. */
969 ret = msg->size;
970 }
9d1a1031
JN
971 }
972 break;
46a5ae9f 973
9d1a1031
JN
974 case DP_AUX_NATIVE_READ:
975 case DP_AUX_I2C_READ:
a6c8aff0 976 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 977 rxsize = msg->size + 1;
a4fc5ed6 978
9d1a1031
JN
979 if (WARN_ON(rxsize > 20))
980 return -E2BIG;
a4fc5ed6 981
9d1a1031
JN
982 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
983 if (ret > 0) {
984 msg->reply = rxbuf[0] >> 4;
985 /*
986 * Assume happy day, and copy the data. The caller is
987 * expected to check msg->reply before touching it.
988 *
989 * Return payload size.
990 */
991 ret--;
992 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 993 }
9d1a1031
JN
994 break;
995
996 default:
997 ret = -EINVAL;
998 break;
a4fc5ed6 999 }
f51a44b9 1000
9d1a1031 1001 return ret;
a4fc5ed6
KP
1002}
1003
9d1a1031
JN
1004static void
1005intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1006{
1007 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1008 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1009 enum port port = intel_dig_port->port;
0b99836f 1010 const char *name = NULL;
ab2c0672
DA
1011 int ret;
1012
33ad6626
JN
1013 switch (port) {
1014 case PORT_A:
1015 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1016 name = "DPDDC-A";
ab2c0672 1017 break;
33ad6626
JN
1018 case PORT_B:
1019 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1020 name = "DPDDC-B";
ab2c0672 1021 break;
33ad6626
JN
1022 case PORT_C:
1023 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1024 name = "DPDDC-C";
ab2c0672 1025 break;
33ad6626
JN
1026 case PORT_D:
1027 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1028 name = "DPDDC-D";
33ad6626
JN
1029 break;
1030 default:
1031 BUG();
ab2c0672
DA
1032 }
1033
1b1aad75
DL
1034 /*
1035 * The AUX_CTL register is usually DP_CTL + 0x10.
1036 *
1037 * On Haswell and Broadwell though:
1038 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1039 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1040 *
1041 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1042 */
1043 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1044 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1045
0b99836f 1046 intel_dp->aux.name = name;
9d1a1031
JN
1047 intel_dp->aux.dev = dev->dev;
1048 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1049
0b99836f
JN
1050 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1051 connector->base.kdev->kobj.name);
8316f337 1052
4f71d0cb 1053 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1054 if (ret < 0) {
4f71d0cb 1055 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1056 name, ret);
1057 return;
ab2c0672 1058 }
8a5e6aeb 1059
0b99836f
JN
1060 ret = sysfs_create_link(&connector->base.kdev->kobj,
1061 &intel_dp->aux.ddc.dev.kobj,
1062 intel_dp->aux.ddc.dev.kobj.name);
1063 if (ret < 0) {
1064 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1065 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1066 }
a4fc5ed6
KP
1067}
1068
80f65de3
ID
1069static void
1070intel_dp_connector_unregister(struct intel_connector *intel_connector)
1071{
1072 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1073
0e32b39c
DA
1074 if (!intel_connector->mst_port)
1075 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1076 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1077 intel_connector_unregister(intel_connector);
1078}
1079
5416d871 1080static void
c3346ef6 1081skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1082{
1083 u32 ctrl1;
1084
1085 pipe_config->ddi_pll_sel = SKL_DPLL0;
1086 pipe_config->dpll_hw_state.cfgcr1 = 0;
1087 pipe_config->dpll_hw_state.cfgcr2 = 0;
1088
1089 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1090 switch (link_clock / 2) {
1091 case 81000:
5416d871
DL
1092 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1093 SKL_DPLL0);
1094 break;
c3346ef6 1095 case 135000:
5416d871
DL
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1097 SKL_DPLL0);
1098 break;
c3346ef6 1099 case 270000:
5416d871
DL
1100 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1101 SKL_DPLL0);
1102 break;
c3346ef6
SJ
1103 case 162000:
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1105 SKL_DPLL0);
1106 break;
1107 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1108 results in CDCLK change. Need to handle the change of CDCLK by
1109 disabling pipes and re-enabling them */
1110 case 108000:
1111 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1112 SKL_DPLL0);
1113 break;
1114 case 216000:
1115 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1116 SKL_DPLL0);
1117 break;
1118
5416d871
DL
1119 }
1120 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1121}
1122
0e50338c 1123static void
5cec258b 1124hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1125{
1126 switch (link_bw) {
1127 case DP_LINK_BW_1_62:
1128 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1129 break;
1130 case DP_LINK_BW_2_7:
1131 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1132 break;
1133 case DP_LINK_BW_5_4:
1134 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1135 break;
1136 }
1137}
1138
fc0f8e25 1139static int
12f6a2e2 1140intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1141{
94ca719e
VS
1142 if (intel_dp->num_sink_rates) {
1143 *sink_rates = intel_dp->sink_rates;
1144 return intel_dp->num_sink_rates;
fc0f8e25 1145 }
12f6a2e2
VS
1146
1147 *sink_rates = default_rates;
1148
1149 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1150}
1151
a8f3ef61 1152static int
1db10e28 1153intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1154{
636280ba
VS
1155 if (INTEL_INFO(dev)->gen >= 9) {
1156 *source_rates = gen9_rates;
1157 return ARRAY_SIZE(gen9_rates);
fe51bfb9
VS
1158 } else if (IS_CHERRYVIEW(dev)) {
1159 *source_rates = chv_rates;
1160 return ARRAY_SIZE(chv_rates);
a8f3ef61 1161 }
636280ba
VS
1162
1163 *source_rates = default_rates;
1164
1db10e28
VS
1165 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1166 /* WaDisableHBR2:skl */
1167 return (DP_LINK_BW_2_7 >> 3) + 1;
1168 else if (INTEL_INFO(dev)->gen >= 8 ||
1169 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1170 return (DP_LINK_BW_5_4 >> 3) + 1;
1171 else
1172 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1173}
1174
c6bb3538
DV
1175static void
1176intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1177 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1178{
1179 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1180 const struct dp_link_dpll *divisor = NULL;
1181 int i, count = 0;
c6bb3538
DV
1182
1183 if (IS_G4X(dev)) {
9dd4ffdf
CML
1184 divisor = gen4_dpll;
1185 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1186 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1187 divisor = pch_dpll;
1188 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1189 } else if (IS_CHERRYVIEW(dev)) {
1190 divisor = chv_dpll;
1191 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1192 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1193 divisor = vlv_dpll;
1194 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1195 }
9dd4ffdf
CML
1196
1197 if (divisor && count) {
1198 for (i = 0; i < count; i++) {
1199 if (link_bw == divisor[i].link_bw) {
1200 pipe_config->dpll = divisor[i].dpll;
1201 pipe_config->clock_set = true;
1202 break;
1203 }
1204 }
c6bb3538
DV
1205 }
1206}
1207
2ecae76a
VS
1208static int intersect_rates(const int *source_rates, int source_len,
1209 const int *sink_rates, int sink_len,
94ca719e 1210 int *common_rates)
a8f3ef61
SJ
1211{
1212 int i = 0, j = 0, k = 0;
1213
a8f3ef61
SJ
1214 while (i < source_len && j < sink_len) {
1215 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1216 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1217 return k;
94ca719e 1218 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1219 ++k;
1220 ++i;
1221 ++j;
1222 } else if (source_rates[i] < sink_rates[j]) {
1223 ++i;
1224 } else {
1225 ++j;
1226 }
1227 }
1228 return k;
1229}
1230
94ca719e
VS
1231static int intel_dp_common_rates(struct intel_dp *intel_dp,
1232 int *common_rates)
2ecae76a
VS
1233{
1234 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1235 const int *source_rates, *sink_rates;
1236 int source_len, sink_len;
1237
1238 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1239 source_len = intel_dp_source_rates(dev, &source_rates);
1240
1241 return intersect_rates(source_rates, source_len,
1242 sink_rates, sink_len,
94ca719e 1243 common_rates);
2ecae76a
VS
1244}
1245
0336400e
VS
1246static void snprintf_int_array(char *str, size_t len,
1247 const int *array, int nelem)
1248{
1249 int i;
1250
1251 str[0] = '\0';
1252
1253 for (i = 0; i < nelem; i++) {
1254 int r = snprintf(str, len, "%d,", array[i]);
1255 if (r >= len)
1256 return;
1257 str += r;
1258 len -= r;
1259 }
1260}
1261
1262static void intel_dp_print_rates(struct intel_dp *intel_dp)
1263{
1264 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1265 const int *source_rates, *sink_rates;
94ca719e
VS
1266 int source_len, sink_len, common_len;
1267 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1268 char str[128]; /* FIXME: too big for stack? */
1269
1270 if ((drm_debug & DRM_UT_KMS) == 0)
1271 return;
1272
1273 source_len = intel_dp_source_rates(dev, &source_rates);
1274 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1275 DRM_DEBUG_KMS("source rates: %s\n", str);
1276
1277 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1278 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1279 DRM_DEBUG_KMS("sink rates: %s\n", str);
1280
94ca719e
VS
1281 common_len = intel_dp_common_rates(intel_dp, common_rates);
1282 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1283 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1284}
1285
f4896f15 1286static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1287{
1288 int i = 0;
1289
1290 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1291 if (find == rates[i])
1292 break;
1293
1294 return i;
1295}
1296
50fec21a
VS
1297int
1298intel_dp_max_link_rate(struct intel_dp *intel_dp)
1299{
1300 int rates[DP_MAX_SUPPORTED_RATES] = {};
1301 int len;
1302
94ca719e 1303 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1304 if (WARN_ON(len <= 0))
1305 return 162000;
1306
1307 return rates[rate_to_index(0, rates) - 1];
1308}
1309
ed4e9c1d
VS
1310int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1311{
94ca719e 1312 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1313}
1314
00c09d70 1315bool
5bfe2ac0 1316intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1317 struct intel_crtc_state *pipe_config)
a4fc5ed6 1318{
5bfe2ac0 1319 struct drm_device *dev = encoder->base.dev;
36008365 1320 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1321 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1322 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1323 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1324 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1325 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1326 int lane_count, clock;
56071a20 1327 int min_lane_count = 1;
eeb6324d 1328 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1329 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1330 int min_clock = 0;
a8f3ef61 1331 int max_clock;
083f9560 1332 int bpp, mode_rate;
ff9a6750 1333 int link_avail, link_clock;
94ca719e
VS
1334 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1335 int common_len;
a8f3ef61 1336
94ca719e 1337 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1338
1339 /* No common link rates between source and sink */
94ca719e 1340 WARN_ON(common_len <= 0);
a8f3ef61 1341
94ca719e 1342 max_clock = common_len - 1;
a4fc5ed6 1343
bc7d38a4 1344 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1345 pipe_config->has_pch_encoder = true;
1346
03afc4a2 1347 pipe_config->has_dp_encoder = true;
f769cd24 1348 pipe_config->has_drrs = false;
9ed109a7 1349 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1350
dd06f90e
JN
1351 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1352 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1353 adjusted_mode);
2dd24552
JB
1354 if (!HAS_PCH_SPLIT(dev))
1355 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1356 intel_connector->panel.fitting_mode);
1357 else
b074cec8
JB
1358 intel_pch_panel_fitting(intel_crtc, pipe_config,
1359 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1360 }
1361
cb1793ce 1362 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1363 return false;
1364
083f9560 1365 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1366 "max bw %d pixel clock %iKHz\n",
94ca719e 1367 max_lane_count, common_rates[max_clock],
241bfc38 1368 adjusted_mode->crtc_clock);
083f9560 1369
36008365
DV
1370 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1371 * bpc in between. */
3e7ca985 1372 bpp = pipe_config->pipe_bpp;
56071a20
JN
1373 if (is_edp(intel_dp)) {
1374 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1375 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1376 dev_priv->vbt.edp_bpp);
1377 bpp = dev_priv->vbt.edp_bpp;
1378 }
1379
344c5bbc
JN
1380 /*
1381 * Use the maximum clock and number of lanes the eDP panel
1382 * advertizes being capable of. The panels are generally
1383 * designed to support only a single clock and lane
1384 * configuration, and typically these values correspond to the
1385 * native resolution of the panel.
1386 */
1387 min_lane_count = max_lane_count;
1388 min_clock = max_clock;
7984211e 1389 }
657445fe 1390
36008365 1391 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1392 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1393 bpp);
36008365 1394
c6930992 1395 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1396 for (lane_count = min_lane_count;
1397 lane_count <= max_lane_count;
1398 lane_count <<= 1) {
1399
94ca719e 1400 link_clock = common_rates[clock];
36008365
DV
1401 link_avail = intel_dp_max_data_rate(link_clock,
1402 lane_count);
1403
1404 if (mode_rate <= link_avail) {
1405 goto found;
1406 }
1407 }
1408 }
1409 }
c4867936 1410
36008365 1411 return false;
3685a8f3 1412
36008365 1413found:
55bc60db
VS
1414 if (intel_dp->color_range_auto) {
1415 /*
1416 * See:
1417 * CEA-861-E - 5.1 Default Encoding Parameters
1418 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1419 */
18316c8c 1420 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1421 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1422 else
1423 intel_dp->color_range = 0;
1424 }
1425
3685a8f3 1426 if (intel_dp->color_range)
50f3b016 1427 pipe_config->limited_color_range = true;
a4fc5ed6 1428
36008365 1429 intel_dp->lane_count = lane_count;
a8f3ef61 1430
94ca719e 1431 if (intel_dp->num_sink_rates) {
bc27b7d3 1432 intel_dp->link_bw = 0;
a8f3ef61 1433 intel_dp->rate_select =
94ca719e 1434 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1435 } else {
1436 intel_dp->link_bw =
94ca719e 1437 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1438 intel_dp->rate_select = 0;
a8f3ef61
SJ
1439 }
1440
657445fe 1441 pipe_config->pipe_bpp = bpp;
94ca719e 1442 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1443
36008365
DV
1444 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1445 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1446 pipe_config->port_clock, bpp);
36008365
DV
1447 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1448 mode_rate, link_avail);
a4fc5ed6 1449
03afc4a2 1450 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1451 adjusted_mode->crtc_clock,
1452 pipe_config->port_clock,
03afc4a2 1453 &pipe_config->dp_m_n);
9d1a455b 1454
439d7ac0 1455 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1456 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1457 pipe_config->has_drrs = true;
439d7ac0
PB
1458 intel_link_compute_m_n(bpp, lane_count,
1459 intel_connector->panel.downclock_mode->clock,
1460 pipe_config->port_clock,
1461 &pipe_config->dp_m2_n2);
1462 }
1463
5416d871 1464 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1465 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
5416d871 1466 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1467 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1468 else
1469 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1470
03afc4a2 1471 return true;
a4fc5ed6
KP
1472}
1473
7c62a164 1474static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1475{
7c62a164
DV
1476 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1477 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1478 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1479 struct drm_i915_private *dev_priv = dev->dev_private;
1480 u32 dpa_ctl;
1481
6e3c9717
ACO
1482 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1483 crtc->config->port_clock);
ea9b6006
DV
1484 dpa_ctl = I915_READ(DP_A);
1485 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1486
6e3c9717 1487 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1488 /* For a long time we've carried around a ILK-DevA w/a for the
1489 * 160MHz clock. If we're really unlucky, it's still required.
1490 */
1491 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1492 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1493 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1494 } else {
1495 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1496 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1497 }
1ce17038 1498
ea9b6006
DV
1499 I915_WRITE(DP_A, dpa_ctl);
1500
1501 POSTING_READ(DP_A);
1502 udelay(500);
1503}
1504
8ac33ed3 1505static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1506{
b934223d 1507 struct drm_device *dev = encoder->base.dev;
417e822d 1508 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1510 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1511 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1512 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1513
417e822d 1514 /*
1a2eb460 1515 * There are four kinds of DP registers:
417e822d
KP
1516 *
1517 * IBX PCH
1a2eb460
KP
1518 * SNB CPU
1519 * IVB CPU
417e822d
KP
1520 * CPT PCH
1521 *
1522 * IBX PCH and CPU are the same for almost everything,
1523 * except that the CPU DP PLL is configured in this
1524 * register
1525 *
1526 * CPT PCH is quite different, having many bits moved
1527 * to the TRANS_DP_CTL register instead. That
1528 * configuration happens (oddly) in ironlake_pch_enable
1529 */
9c9e7927 1530
417e822d
KP
1531 /* Preserve the BIOS-computed detected bit. This is
1532 * supposed to be read-only.
1533 */
1534 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1535
417e822d 1536 /* Handle DP bits in common between all three register formats */
417e822d 1537 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1538 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1539
6e3c9717 1540 if (crtc->config->has_audio)
ea5b213a 1541 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1542
417e822d 1543 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1544
bc7d38a4 1545 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1546 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1547 intel_dp->DP |= DP_SYNC_HS_HIGH;
1548 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1549 intel_dp->DP |= DP_SYNC_VS_HIGH;
1550 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1551
6aba5b6c 1552 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1553 intel_dp->DP |= DP_ENHANCED_FRAMING;
1554
7c62a164 1555 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1556 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1557 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1558 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1559
1560 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1561 intel_dp->DP |= DP_SYNC_HS_HIGH;
1562 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1563 intel_dp->DP |= DP_SYNC_VS_HIGH;
1564 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1565
6aba5b6c 1566 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1567 intel_dp->DP |= DP_ENHANCED_FRAMING;
1568
44f37d1f
CML
1569 if (!IS_CHERRYVIEW(dev)) {
1570 if (crtc->pipe == 1)
1571 intel_dp->DP |= DP_PIPEB_SELECT;
1572 } else {
1573 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1574 }
417e822d
KP
1575 } else {
1576 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1577 }
a4fc5ed6
KP
1578}
1579
ffd6749d
PZ
1580#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1581#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1582
1a5ef5b7
PZ
1583#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1584#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1585
ffd6749d
PZ
1586#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1587#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1588
4be73780 1589static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1590 u32 mask,
1591 u32 value)
bd943159 1592{
30add22d 1593 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1594 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1595 u32 pp_stat_reg, pp_ctrl_reg;
1596
e39b999a
VS
1597 lockdep_assert_held(&dev_priv->pps_mutex);
1598
bf13e81b
JN
1599 pp_stat_reg = _pp_stat_reg(intel_dp);
1600 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1601
99ea7127 1602 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1603 mask, value,
1604 I915_READ(pp_stat_reg),
1605 I915_READ(pp_ctrl_reg));
32ce697c 1606
453c5420 1607 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1608 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1609 I915_READ(pp_stat_reg),
1610 I915_READ(pp_ctrl_reg));
32ce697c 1611 }
54c136d4
CW
1612
1613 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1614}
32ce697c 1615
4be73780 1616static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1617{
1618 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1619 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1620}
1621
4be73780 1622static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1623{
1624 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1625 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1626}
1627
4be73780 1628static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1629{
1630 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1631
1632 /* When we disable the VDD override bit last we have to do the manual
1633 * wait. */
1634 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1635 intel_dp->panel_power_cycle_delay);
1636
4be73780 1637 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1638}
1639
4be73780 1640static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1641{
1642 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1643 intel_dp->backlight_on_delay);
1644}
1645
4be73780 1646static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1647{
1648 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1649 intel_dp->backlight_off_delay);
1650}
99ea7127 1651
832dd3c1
KP
1652/* Read the current pp_control value, unlocking the register if it
1653 * is locked
1654 */
1655
453c5420 1656static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1657{
453c5420
JB
1658 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1659 struct drm_i915_private *dev_priv = dev->dev_private;
1660 u32 control;
832dd3c1 1661
e39b999a
VS
1662 lockdep_assert_held(&dev_priv->pps_mutex);
1663
bf13e81b 1664 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1665 control &= ~PANEL_UNLOCK_MASK;
1666 control |= PANEL_UNLOCK_REGS;
1667 return control;
bd943159
KP
1668}
1669
951468f3
VS
1670/*
1671 * Must be paired with edp_panel_vdd_off().
1672 * Must hold pps_mutex around the whole on/off sequence.
1673 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1674 */
1e0560e0 1675static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1676{
30add22d 1677 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1678 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1679 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1680 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1681 enum intel_display_power_domain power_domain;
5d613501 1682 u32 pp;
453c5420 1683 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1684 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1685
e39b999a
VS
1686 lockdep_assert_held(&dev_priv->pps_mutex);
1687
97af61f5 1688 if (!is_edp(intel_dp))
adddaaf4 1689 return false;
bd943159 1690
2c623c11 1691 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1692 intel_dp->want_panel_vdd = true;
99ea7127 1693
4be73780 1694 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1695 return need_to_disable;
b0665d57 1696
4e6e1a54
ID
1697 power_domain = intel_display_port_power_domain(intel_encoder);
1698 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1699
3936fcf4
VS
1700 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1701 port_name(intel_dig_port->port));
bd943159 1702
4be73780
DV
1703 if (!edp_have_panel_power(intel_dp))
1704 wait_panel_power_cycle(intel_dp);
99ea7127 1705
453c5420 1706 pp = ironlake_get_pp_control(intel_dp);
5d613501 1707 pp |= EDP_FORCE_VDD;
ebf33b18 1708
bf13e81b
JN
1709 pp_stat_reg = _pp_stat_reg(intel_dp);
1710 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1711
1712 I915_WRITE(pp_ctrl_reg, pp);
1713 POSTING_READ(pp_ctrl_reg);
1714 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1715 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1716 /*
1717 * If the panel wasn't on, delay before accessing aux channel
1718 */
4be73780 1719 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1720 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1721 port_name(intel_dig_port->port));
f01eca2e 1722 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1723 }
adddaaf4
JN
1724
1725 return need_to_disable;
1726}
1727
951468f3
VS
1728/*
1729 * Must be paired with intel_edp_panel_vdd_off() or
1730 * intel_edp_panel_off().
1731 * Nested calls to these functions are not allowed since
1732 * we drop the lock. Caller must use some higher level
1733 * locking to prevent nested calls from other threads.
1734 */
b80d6c78 1735void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1736{
c695b6b6 1737 bool vdd;
adddaaf4 1738
c695b6b6
VS
1739 if (!is_edp(intel_dp))
1740 return;
1741
773538e8 1742 pps_lock(intel_dp);
c695b6b6 1743 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1744 pps_unlock(intel_dp);
c695b6b6 1745
e2c719b7 1746 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1747 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1748}
1749
4be73780 1750static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1751{
30add22d 1752 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1753 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1754 struct intel_digital_port *intel_dig_port =
1755 dp_to_dig_port(intel_dp);
1756 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1757 enum intel_display_power_domain power_domain;
5d613501 1758 u32 pp;
453c5420 1759 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1760
e39b999a 1761 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1762
15e899a0 1763 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1764
15e899a0 1765 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1766 return;
b0665d57 1767
3936fcf4
VS
1768 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1769 port_name(intel_dig_port->port));
bd943159 1770
be2c9196
VS
1771 pp = ironlake_get_pp_control(intel_dp);
1772 pp &= ~EDP_FORCE_VDD;
453c5420 1773
be2c9196
VS
1774 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1775 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1776
be2c9196
VS
1777 I915_WRITE(pp_ctrl_reg, pp);
1778 POSTING_READ(pp_ctrl_reg);
90791a5c 1779
be2c9196
VS
1780 /* Make sure sequencer is idle before allowing subsequent activity */
1781 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1782 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1783
be2c9196
VS
1784 if ((pp & POWER_TARGET_ON) == 0)
1785 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1786
be2c9196
VS
1787 power_domain = intel_display_port_power_domain(intel_encoder);
1788 intel_display_power_put(dev_priv, power_domain);
bd943159 1789}
5d613501 1790
4be73780 1791static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1792{
1793 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1794 struct intel_dp, panel_vdd_work);
bd943159 1795
773538e8 1796 pps_lock(intel_dp);
15e899a0
VS
1797 if (!intel_dp->want_panel_vdd)
1798 edp_panel_vdd_off_sync(intel_dp);
773538e8 1799 pps_unlock(intel_dp);
bd943159
KP
1800}
1801
aba86890
ID
1802static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1803{
1804 unsigned long delay;
1805
1806 /*
1807 * Queue the timer to fire a long time from now (relative to the power
1808 * down delay) to keep the panel power up across a sequence of
1809 * operations.
1810 */
1811 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1812 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1813}
1814
951468f3
VS
1815/*
1816 * Must be paired with edp_panel_vdd_on().
1817 * Must hold pps_mutex around the whole on/off sequence.
1818 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1819 */
4be73780 1820static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1821{
e39b999a
VS
1822 struct drm_i915_private *dev_priv =
1823 intel_dp_to_dev(intel_dp)->dev_private;
1824
1825 lockdep_assert_held(&dev_priv->pps_mutex);
1826
97af61f5
KP
1827 if (!is_edp(intel_dp))
1828 return;
5d613501 1829
e2c719b7 1830 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1831 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1832
bd943159
KP
1833 intel_dp->want_panel_vdd = false;
1834
aba86890 1835 if (sync)
4be73780 1836 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1837 else
1838 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1839}
1840
9f0fb5be 1841static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1842{
30add22d 1843 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1844 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1845 u32 pp;
453c5420 1846 u32 pp_ctrl_reg;
9934c132 1847
9f0fb5be
VS
1848 lockdep_assert_held(&dev_priv->pps_mutex);
1849
97af61f5 1850 if (!is_edp(intel_dp))
bd943159 1851 return;
99ea7127 1852
3936fcf4
VS
1853 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1854 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1855
e7a89ace
VS
1856 if (WARN(edp_have_panel_power(intel_dp),
1857 "eDP port %c panel power already on\n",
1858 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1859 return;
9934c132 1860
4be73780 1861 wait_panel_power_cycle(intel_dp);
37c6c9b0 1862
bf13e81b 1863 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1864 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1865 if (IS_GEN5(dev)) {
1866 /* ILK workaround: disable reset around power sequence */
1867 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1868 I915_WRITE(pp_ctrl_reg, pp);
1869 POSTING_READ(pp_ctrl_reg);
05ce1a49 1870 }
37c6c9b0 1871
1c0ae80a 1872 pp |= POWER_TARGET_ON;
99ea7127
KP
1873 if (!IS_GEN5(dev))
1874 pp |= PANEL_POWER_RESET;
1875
453c5420
JB
1876 I915_WRITE(pp_ctrl_reg, pp);
1877 POSTING_READ(pp_ctrl_reg);
9934c132 1878
4be73780 1879 wait_panel_on(intel_dp);
dce56b3c 1880 intel_dp->last_power_on = jiffies;
9934c132 1881
05ce1a49
KP
1882 if (IS_GEN5(dev)) {
1883 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1884 I915_WRITE(pp_ctrl_reg, pp);
1885 POSTING_READ(pp_ctrl_reg);
05ce1a49 1886 }
9f0fb5be 1887}
e39b999a 1888
9f0fb5be
VS
1889void intel_edp_panel_on(struct intel_dp *intel_dp)
1890{
1891 if (!is_edp(intel_dp))
1892 return;
1893
1894 pps_lock(intel_dp);
1895 edp_panel_on(intel_dp);
773538e8 1896 pps_unlock(intel_dp);
9934c132
JB
1897}
1898
9f0fb5be
VS
1899
1900static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1901{
4e6e1a54
ID
1902 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1903 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1904 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1905 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1906 enum intel_display_power_domain power_domain;
99ea7127 1907 u32 pp;
453c5420 1908 u32 pp_ctrl_reg;
9934c132 1909
9f0fb5be
VS
1910 lockdep_assert_held(&dev_priv->pps_mutex);
1911
97af61f5
KP
1912 if (!is_edp(intel_dp))
1913 return;
37c6c9b0 1914
3936fcf4
VS
1915 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1916 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1917
3936fcf4
VS
1918 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1919 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1920
453c5420 1921 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1922 /* We need to switch off panel power _and_ force vdd, for otherwise some
1923 * panels get very unhappy and cease to work. */
b3064154
PJ
1924 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1925 EDP_BLC_ENABLE);
453c5420 1926
bf13e81b 1927 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1928
849e39f5
PZ
1929 intel_dp->want_panel_vdd = false;
1930
453c5420
JB
1931 I915_WRITE(pp_ctrl_reg, pp);
1932 POSTING_READ(pp_ctrl_reg);
9934c132 1933
dce56b3c 1934 intel_dp->last_power_cycle = jiffies;
4be73780 1935 wait_panel_off(intel_dp);
849e39f5
PZ
1936
1937 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1938 power_domain = intel_display_port_power_domain(intel_encoder);
1939 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1940}
e39b999a 1941
9f0fb5be
VS
1942void intel_edp_panel_off(struct intel_dp *intel_dp)
1943{
1944 if (!is_edp(intel_dp))
1945 return;
e39b999a 1946
9f0fb5be
VS
1947 pps_lock(intel_dp);
1948 edp_panel_off(intel_dp);
773538e8 1949 pps_unlock(intel_dp);
9934c132
JB
1950}
1951
1250d107
JN
1952/* Enable backlight in the panel power control. */
1953static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1954{
da63a9f2
PZ
1955 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1956 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1957 struct drm_i915_private *dev_priv = dev->dev_private;
1958 u32 pp;
453c5420 1959 u32 pp_ctrl_reg;
32f9d658 1960
01cb9ea6
JB
1961 /*
1962 * If we enable the backlight right away following a panel power
1963 * on, we may see slight flicker as the panel syncs with the eDP
1964 * link. So delay a bit to make sure the image is solid before
1965 * allowing it to appear.
1966 */
4be73780 1967 wait_backlight_on(intel_dp);
e39b999a 1968
773538e8 1969 pps_lock(intel_dp);
e39b999a 1970
453c5420 1971 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1972 pp |= EDP_BLC_ENABLE;
453c5420 1973
bf13e81b 1974 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1975
1976 I915_WRITE(pp_ctrl_reg, pp);
1977 POSTING_READ(pp_ctrl_reg);
e39b999a 1978
773538e8 1979 pps_unlock(intel_dp);
32f9d658
ZW
1980}
1981
1250d107
JN
1982/* Enable backlight PWM and backlight PP control. */
1983void intel_edp_backlight_on(struct intel_dp *intel_dp)
1984{
1985 if (!is_edp(intel_dp))
1986 return;
1987
1988 DRM_DEBUG_KMS("\n");
1989
1990 intel_panel_enable_backlight(intel_dp->attached_connector);
1991 _intel_edp_backlight_on(intel_dp);
1992}
1993
1994/* Disable backlight in the panel power control. */
1995static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1996{
30add22d 1997 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1998 struct drm_i915_private *dev_priv = dev->dev_private;
1999 u32 pp;
453c5420 2000 u32 pp_ctrl_reg;
32f9d658 2001
f01eca2e
KP
2002 if (!is_edp(intel_dp))
2003 return;
2004
773538e8 2005 pps_lock(intel_dp);
e39b999a 2006
453c5420 2007 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2008 pp &= ~EDP_BLC_ENABLE;
453c5420 2009
bf13e81b 2010 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2011
2012 I915_WRITE(pp_ctrl_reg, pp);
2013 POSTING_READ(pp_ctrl_reg);
f7d2323c 2014
773538e8 2015 pps_unlock(intel_dp);
e39b999a
VS
2016
2017 intel_dp->last_backlight_off = jiffies;
f7d2323c 2018 edp_wait_backlight_off(intel_dp);
1250d107 2019}
f7d2323c 2020
1250d107
JN
2021/* Disable backlight PP control and backlight PWM. */
2022void intel_edp_backlight_off(struct intel_dp *intel_dp)
2023{
2024 if (!is_edp(intel_dp))
2025 return;
2026
2027 DRM_DEBUG_KMS("\n");
f7d2323c 2028
1250d107 2029 _intel_edp_backlight_off(intel_dp);
f7d2323c 2030 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2031}
a4fc5ed6 2032
73580fb7
JN
2033/*
2034 * Hook for controlling the panel power control backlight through the bl_power
2035 * sysfs attribute. Take care to handle multiple calls.
2036 */
2037static void intel_edp_backlight_power(struct intel_connector *connector,
2038 bool enable)
2039{
2040 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2041 bool is_enabled;
2042
773538e8 2043 pps_lock(intel_dp);
e39b999a 2044 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2045 pps_unlock(intel_dp);
73580fb7
JN
2046
2047 if (is_enabled == enable)
2048 return;
2049
23ba9373
JN
2050 DRM_DEBUG_KMS("panel power control backlight %s\n",
2051 enable ? "enable" : "disable");
73580fb7
JN
2052
2053 if (enable)
2054 _intel_edp_backlight_on(intel_dp);
2055 else
2056 _intel_edp_backlight_off(intel_dp);
2057}
2058
2bd2ad64 2059static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2060{
da63a9f2
PZ
2061 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2062 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2063 struct drm_device *dev = crtc->dev;
d240f20f
JB
2064 struct drm_i915_private *dev_priv = dev->dev_private;
2065 u32 dpa_ctl;
2066
2bd2ad64
DV
2067 assert_pipe_disabled(dev_priv,
2068 to_intel_crtc(crtc)->pipe);
2069
d240f20f
JB
2070 DRM_DEBUG_KMS("\n");
2071 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2072 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2073 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2074
2075 /* We don't adjust intel_dp->DP while tearing down the link, to
2076 * facilitate link retraining (e.g. after hotplug). Hence clear all
2077 * enable bits here to ensure that we don't enable too much. */
2078 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2079 intel_dp->DP |= DP_PLL_ENABLE;
2080 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2081 POSTING_READ(DP_A);
2082 udelay(200);
d240f20f
JB
2083}
2084
2bd2ad64 2085static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2086{
da63a9f2
PZ
2087 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2088 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2089 struct drm_device *dev = crtc->dev;
d240f20f
JB
2090 struct drm_i915_private *dev_priv = dev->dev_private;
2091 u32 dpa_ctl;
2092
2bd2ad64
DV
2093 assert_pipe_disabled(dev_priv,
2094 to_intel_crtc(crtc)->pipe);
2095
d240f20f 2096 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2097 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2098 "dp pll off, should be on\n");
2099 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2100
2101 /* We can't rely on the value tracked for the DP register in
2102 * intel_dp->DP because link_down must not change that (otherwise link
2103 * re-training will fail. */
298b0b39 2104 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2105 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2106 POSTING_READ(DP_A);
d240f20f
JB
2107 udelay(200);
2108}
2109
c7ad3810 2110/* If the sink supports it, try to set the power state appropriately */
c19b0669 2111void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2112{
2113 int ret, i;
2114
2115 /* Should have a valid DPCD by this point */
2116 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2117 return;
2118
2119 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2120 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2121 DP_SET_POWER_D3);
c7ad3810
JB
2122 } else {
2123 /*
2124 * When turning on, we need to retry for 1ms to give the sink
2125 * time to wake up.
2126 */
2127 for (i = 0; i < 3; i++) {
9d1a1031
JN
2128 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2129 DP_SET_POWER_D0);
c7ad3810
JB
2130 if (ret == 1)
2131 break;
2132 msleep(1);
2133 }
2134 }
f9cac721
JN
2135
2136 if (ret != 1)
2137 DRM_DEBUG_KMS("failed to %s sink power state\n",
2138 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2139}
2140
19d8fe15
DV
2141static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2142 enum pipe *pipe)
d240f20f 2143{
19d8fe15 2144 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2145 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2146 struct drm_device *dev = encoder->base.dev;
2147 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2148 enum intel_display_power_domain power_domain;
2149 u32 tmp;
2150
2151 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2152 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2153 return false;
2154
2155 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2156
2157 if (!(tmp & DP_PORT_EN))
2158 return false;
2159
bc7d38a4 2160 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2161 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2162 } else if (IS_CHERRYVIEW(dev)) {
2163 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2164 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2165 *pipe = PORT_TO_PIPE(tmp);
2166 } else {
2167 u32 trans_sel;
2168 u32 trans_dp;
2169 int i;
2170
2171 switch (intel_dp->output_reg) {
2172 case PCH_DP_B:
2173 trans_sel = TRANS_DP_PORT_SEL_B;
2174 break;
2175 case PCH_DP_C:
2176 trans_sel = TRANS_DP_PORT_SEL_C;
2177 break;
2178 case PCH_DP_D:
2179 trans_sel = TRANS_DP_PORT_SEL_D;
2180 break;
2181 default:
2182 return true;
2183 }
2184
055e393f 2185 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2186 trans_dp = I915_READ(TRANS_DP_CTL(i));
2187 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2188 *pipe = i;
2189 return true;
2190 }
2191 }
19d8fe15 2192
4a0833ec
DV
2193 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2194 intel_dp->output_reg);
2195 }
d240f20f 2196
19d8fe15
DV
2197 return true;
2198}
d240f20f 2199
045ac3b5 2200static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2201 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2202{
2203 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2204 u32 tmp, flags = 0;
63000ef6
XZ
2205 struct drm_device *dev = encoder->base.dev;
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2207 enum port port = dp_to_dig_port(intel_dp)->port;
2208 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2209 int dotclock;
045ac3b5 2210
9ed109a7
DV
2211 tmp = I915_READ(intel_dp->output_reg);
2212 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2213 pipe_config->has_audio = true;
2214
63000ef6 2215 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2216 if (tmp & DP_SYNC_HS_HIGH)
2217 flags |= DRM_MODE_FLAG_PHSYNC;
2218 else
2219 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2220
63000ef6
XZ
2221 if (tmp & DP_SYNC_VS_HIGH)
2222 flags |= DRM_MODE_FLAG_PVSYNC;
2223 else
2224 flags |= DRM_MODE_FLAG_NVSYNC;
2225 } else {
2226 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2227 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2228 flags |= DRM_MODE_FLAG_PHSYNC;
2229 else
2230 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2231
63000ef6
XZ
2232 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2233 flags |= DRM_MODE_FLAG_PVSYNC;
2234 else
2235 flags |= DRM_MODE_FLAG_NVSYNC;
2236 }
045ac3b5 2237
2d112de7 2238 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2239
8c875fca
VS
2240 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2241 tmp & DP_COLOR_RANGE_16_235)
2242 pipe_config->limited_color_range = true;
2243
eb14cb74
VS
2244 pipe_config->has_dp_encoder = true;
2245
2246 intel_dp_get_m_n(crtc, pipe_config);
2247
18442d08 2248 if (port == PORT_A) {
f1f644dc
JB
2249 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2250 pipe_config->port_clock = 162000;
2251 else
2252 pipe_config->port_clock = 270000;
2253 }
18442d08
VS
2254
2255 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2256 &pipe_config->dp_m_n);
2257
2258 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2259 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2260
2d112de7 2261 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2262
c6cd2ee2
JN
2263 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2264 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2265 /*
2266 * This is a big fat ugly hack.
2267 *
2268 * Some machines in UEFI boot mode provide us a VBT that has 18
2269 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2270 * unknown we fail to light up. Yet the same BIOS boots up with
2271 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2272 * max, not what it tells us to use.
2273 *
2274 * Note: This will still be broken if the eDP panel is not lit
2275 * up by the BIOS, and thus we can't get the mode at module
2276 * load.
2277 */
2278 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2279 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2280 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2281 }
045ac3b5
JB
2282}
2283
e8cb4558 2284static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2285{
e8cb4558 2286 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2287 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2288 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2289
6e3c9717 2290 if (crtc->config->has_audio)
495a5bb8 2291 intel_audio_codec_disable(encoder);
6cb49835 2292
b32c6f48
RV
2293 if (HAS_PSR(dev) && !HAS_DDI(dev))
2294 intel_psr_disable(intel_dp);
2295
6cb49835
DV
2296 /* Make sure the panel is off before trying to change the mode. But also
2297 * ensure that we have vdd while we switch off the panel. */
24f3e092 2298 intel_edp_panel_vdd_on(intel_dp);
4be73780 2299 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2300 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2301 intel_edp_panel_off(intel_dp);
3739850b 2302
08aff3fe
VS
2303 /* disable the port before the pipe on g4x */
2304 if (INTEL_INFO(dev)->gen < 5)
3739850b 2305 intel_dp_link_down(intel_dp);
d240f20f
JB
2306}
2307
08aff3fe 2308static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2309{
2bd2ad64 2310 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2311 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2312
49277c31 2313 intel_dp_link_down(intel_dp);
08aff3fe
VS
2314 if (port == PORT_A)
2315 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2316}
2317
2318static void vlv_post_disable_dp(struct intel_encoder *encoder)
2319{
2320 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2321
2322 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2323}
2324
580d3811
VS
2325static void chv_post_disable_dp(struct intel_encoder *encoder)
2326{
2327 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2328 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2329 struct drm_device *dev = encoder->base.dev;
2330 struct drm_i915_private *dev_priv = dev->dev_private;
2331 struct intel_crtc *intel_crtc =
2332 to_intel_crtc(encoder->base.crtc);
2333 enum dpio_channel ch = vlv_dport_to_channel(dport);
2334 enum pipe pipe = intel_crtc->pipe;
2335 u32 val;
2336
2337 intel_dp_link_down(intel_dp);
2338
2339 mutex_lock(&dev_priv->dpio_lock);
2340
2341 /* Propagate soft reset to data lane reset */
97fd4d5c 2342 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2343 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2344 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2345
97fd4d5c
VS
2346 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2347 val |= CHV_PCS_REQ_SOFTRESET_EN;
2348 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2349
2350 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2351 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2352 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2353
2354 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2355 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2356 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2357
2358 mutex_unlock(&dev_priv->dpio_lock);
2359}
2360
7b13b58a
VS
2361static void
2362_intel_dp_set_link_train(struct intel_dp *intel_dp,
2363 uint32_t *DP,
2364 uint8_t dp_train_pat)
2365{
2366 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2367 struct drm_device *dev = intel_dig_port->base.base.dev;
2368 struct drm_i915_private *dev_priv = dev->dev_private;
2369 enum port port = intel_dig_port->port;
2370
2371 if (HAS_DDI(dev)) {
2372 uint32_t temp = I915_READ(DP_TP_CTL(port));
2373
2374 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2375 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2376 else
2377 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2378
2379 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2380 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2381 case DP_TRAINING_PATTERN_DISABLE:
2382 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2383
2384 break;
2385 case DP_TRAINING_PATTERN_1:
2386 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2387 break;
2388 case DP_TRAINING_PATTERN_2:
2389 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2390 break;
2391 case DP_TRAINING_PATTERN_3:
2392 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2393 break;
2394 }
2395 I915_WRITE(DP_TP_CTL(port), temp);
2396
2397 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2398 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2399
2400 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2401 case DP_TRAINING_PATTERN_DISABLE:
2402 *DP |= DP_LINK_TRAIN_OFF_CPT;
2403 break;
2404 case DP_TRAINING_PATTERN_1:
2405 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2406 break;
2407 case DP_TRAINING_PATTERN_2:
2408 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2409 break;
2410 case DP_TRAINING_PATTERN_3:
2411 DRM_ERROR("DP training pattern 3 not supported\n");
2412 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2413 break;
2414 }
2415
2416 } else {
2417 if (IS_CHERRYVIEW(dev))
2418 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2419 else
2420 *DP &= ~DP_LINK_TRAIN_MASK;
2421
2422 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2423 case DP_TRAINING_PATTERN_DISABLE:
2424 *DP |= DP_LINK_TRAIN_OFF;
2425 break;
2426 case DP_TRAINING_PATTERN_1:
2427 *DP |= DP_LINK_TRAIN_PAT_1;
2428 break;
2429 case DP_TRAINING_PATTERN_2:
2430 *DP |= DP_LINK_TRAIN_PAT_2;
2431 break;
2432 case DP_TRAINING_PATTERN_3:
2433 if (IS_CHERRYVIEW(dev)) {
2434 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2435 } else {
2436 DRM_ERROR("DP training pattern 3 not supported\n");
2437 *DP |= DP_LINK_TRAIN_PAT_2;
2438 }
2439 break;
2440 }
2441 }
2442}
2443
2444static void intel_dp_enable_port(struct intel_dp *intel_dp)
2445{
2446 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2447 struct drm_i915_private *dev_priv = dev->dev_private;
2448
7b13b58a
VS
2449 /* enable with pattern 1 (as per spec) */
2450 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2451 DP_TRAINING_PATTERN_1);
2452
2453 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2454 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2455
2456 /*
2457 * Magic for VLV/CHV. We _must_ first set up the register
2458 * without actually enabling the port, and then do another
2459 * write to enable the port. Otherwise link training will
2460 * fail when the power sequencer is freshly used for this port.
2461 */
2462 intel_dp->DP |= DP_PORT_EN;
2463
2464 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2465 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2466}
2467
e8cb4558 2468static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2469{
e8cb4558
DV
2470 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2471 struct drm_device *dev = encoder->base.dev;
2472 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2473 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2474 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2475
0c33d8d7
DV
2476 if (WARN_ON(dp_reg & DP_PORT_EN))
2477 return;
5d613501 2478
093e3f13
VS
2479 pps_lock(intel_dp);
2480
2481 if (IS_VALLEYVIEW(dev))
2482 vlv_init_panel_power_sequencer(intel_dp);
2483
7b13b58a 2484 intel_dp_enable_port(intel_dp);
093e3f13
VS
2485
2486 edp_panel_vdd_on(intel_dp);
2487 edp_panel_on(intel_dp);
2488 edp_panel_vdd_off(intel_dp, true);
2489
2490 pps_unlock(intel_dp);
2491
61234fa5
VS
2492 if (IS_VALLEYVIEW(dev))
2493 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2494
f01eca2e 2495 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2496 intel_dp_start_link_train(intel_dp);
33a34e4e 2497 intel_dp_complete_link_train(intel_dp);
3ab9c637 2498 intel_dp_stop_link_train(intel_dp);
c1dec79a 2499
6e3c9717 2500 if (crtc->config->has_audio) {
c1dec79a
JN
2501 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2502 pipe_name(crtc->pipe));
2503 intel_audio_codec_enable(encoder);
2504 }
ab1f90f9 2505}
89b667f8 2506
ecff4f3b
JN
2507static void g4x_enable_dp(struct intel_encoder *encoder)
2508{
828f5c6e
JN
2509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2510
ecff4f3b 2511 intel_enable_dp(encoder);
4be73780 2512 intel_edp_backlight_on(intel_dp);
ab1f90f9 2513}
89b667f8 2514
ab1f90f9
JN
2515static void vlv_enable_dp(struct intel_encoder *encoder)
2516{
828f5c6e
JN
2517 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2518
4be73780 2519 intel_edp_backlight_on(intel_dp);
b32c6f48 2520 intel_psr_enable(intel_dp);
d240f20f
JB
2521}
2522
ecff4f3b 2523static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2524{
2525 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2526 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2527
8ac33ed3
DV
2528 intel_dp_prepare(encoder);
2529
d41f1efb
DV
2530 /* Only ilk+ has port A */
2531 if (dport->port == PORT_A) {
2532 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2533 ironlake_edp_pll_on(intel_dp);
d41f1efb 2534 }
ab1f90f9
JN
2535}
2536
83b84597
VS
2537static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2538{
2539 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2540 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2541 enum pipe pipe = intel_dp->pps_pipe;
2542 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2543
2544 edp_panel_vdd_off_sync(intel_dp);
2545
2546 /*
2547 * VLV seems to get confused when multiple power seqeuencers
2548 * have the same port selected (even if only one has power/vdd
2549 * enabled). The failure manifests as vlv_wait_port_ready() failing
2550 * CHV on the other hand doesn't seem to mind having the same port
2551 * selected in multiple power seqeuencers, but let's clear the
2552 * port select always when logically disconnecting a power sequencer
2553 * from a port.
2554 */
2555 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2556 pipe_name(pipe), port_name(intel_dig_port->port));
2557 I915_WRITE(pp_on_reg, 0);
2558 POSTING_READ(pp_on_reg);
2559
2560 intel_dp->pps_pipe = INVALID_PIPE;
2561}
2562
a4a5d2f8
VS
2563static void vlv_steal_power_sequencer(struct drm_device *dev,
2564 enum pipe pipe)
2565{
2566 struct drm_i915_private *dev_priv = dev->dev_private;
2567 struct intel_encoder *encoder;
2568
2569 lockdep_assert_held(&dev_priv->pps_mutex);
2570
ac3c12e4
VS
2571 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2572 return;
2573
a4a5d2f8
VS
2574 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2575 base.head) {
2576 struct intel_dp *intel_dp;
773538e8 2577 enum port port;
a4a5d2f8
VS
2578
2579 if (encoder->type != INTEL_OUTPUT_EDP)
2580 continue;
2581
2582 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2583 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2584
2585 if (intel_dp->pps_pipe != pipe)
2586 continue;
2587
2588 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2589 pipe_name(pipe), port_name(port));
a4a5d2f8 2590
034e43c6
VS
2591 WARN(encoder->connectors_active,
2592 "stealing pipe %c power sequencer from active eDP port %c\n",
2593 pipe_name(pipe), port_name(port));
a4a5d2f8 2594
a4a5d2f8 2595 /* make sure vdd is off before we steal it */
83b84597 2596 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2597 }
2598}
2599
2600static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2601{
2602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2603 struct intel_encoder *encoder = &intel_dig_port->base;
2604 struct drm_device *dev = encoder->base.dev;
2605 struct drm_i915_private *dev_priv = dev->dev_private;
2606 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2607
2608 lockdep_assert_held(&dev_priv->pps_mutex);
2609
093e3f13
VS
2610 if (!is_edp(intel_dp))
2611 return;
2612
a4a5d2f8
VS
2613 if (intel_dp->pps_pipe == crtc->pipe)
2614 return;
2615
2616 /*
2617 * If another power sequencer was being used on this
2618 * port previously make sure to turn off vdd there while
2619 * we still have control of it.
2620 */
2621 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2622 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2623
2624 /*
2625 * We may be stealing the power
2626 * sequencer from another port.
2627 */
2628 vlv_steal_power_sequencer(dev, crtc->pipe);
2629
2630 /* now it's all ours */
2631 intel_dp->pps_pipe = crtc->pipe;
2632
2633 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2634 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2635
2636 /* init power sequencer on this pipe and port */
36b5f425
VS
2637 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2638 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2639}
2640
ab1f90f9 2641static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2642{
2bd2ad64 2643 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2644 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2645 struct drm_device *dev = encoder->base.dev;
89b667f8 2646 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2647 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2648 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2649 int pipe = intel_crtc->pipe;
2650 u32 val;
a4fc5ed6 2651
ab1f90f9 2652 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2653
ab3c759a 2654 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2655 val = 0;
2656 if (pipe)
2657 val |= (1<<21);
2658 else
2659 val &= ~(1<<21);
2660 val |= 0x001000c4;
ab3c759a
CML
2661 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2662 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2664
ab1f90f9
JN
2665 mutex_unlock(&dev_priv->dpio_lock);
2666
2667 intel_enable_dp(encoder);
89b667f8
JB
2668}
2669
ecff4f3b 2670static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2671{
2672 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2673 struct drm_device *dev = encoder->base.dev;
2674 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2675 struct intel_crtc *intel_crtc =
2676 to_intel_crtc(encoder->base.crtc);
e4607fcf 2677 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2678 int pipe = intel_crtc->pipe;
89b667f8 2679
8ac33ed3
DV
2680 intel_dp_prepare(encoder);
2681
89b667f8 2682 /* Program Tx lane resets to default */
0980a60f 2683 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2684 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2685 DPIO_PCS_TX_LANE2_RESET |
2686 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2687 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2688 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2689 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2690 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2691 DPIO_PCS_CLK_SOFT_RESET);
2692
2693 /* Fix up inter-pair skew failure */
ab3c759a
CML
2694 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2695 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2696 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2697 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2698}
2699
e4a1d846
CML
2700static void chv_pre_enable_dp(struct intel_encoder *encoder)
2701{
2702 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2703 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2704 struct drm_device *dev = encoder->base.dev;
2705 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2706 struct intel_crtc *intel_crtc =
2707 to_intel_crtc(encoder->base.crtc);
2708 enum dpio_channel ch = vlv_dport_to_channel(dport);
2709 int pipe = intel_crtc->pipe;
2710 int data, i;
949c1d43 2711 u32 val;
e4a1d846 2712
e4a1d846 2713 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2714
570e2a74
VS
2715 /* allow hardware to manage TX FIFO reset source */
2716 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2717 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2718 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2719
2720 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2721 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2722 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2723
949c1d43 2724 /* Deassert soft data lane reset*/
97fd4d5c 2725 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2726 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2727 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2728
2729 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2730 val |= CHV_PCS_REQ_SOFTRESET_EN;
2731 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2732
2733 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2734 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2735 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2736
97fd4d5c 2737 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2738 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2739 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2740
2741 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2742 for (i = 0; i < 4; i++) {
2743 /* Set the latency optimal bit */
2744 data = (i == 1) ? 0x0 : 0x6;
2745 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2746 data << DPIO_FRC_LATENCY_SHFIT);
2747
2748 /* Set the upar bit */
2749 data = (i == 1) ? 0x0 : 0x1;
2750 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2751 data << DPIO_UPAR_SHIFT);
2752 }
2753
2754 /* Data lane stagger programming */
2755 /* FIXME: Fix up value only after power analysis */
2756
2757 mutex_unlock(&dev_priv->dpio_lock);
2758
e4a1d846 2759 intel_enable_dp(encoder);
e4a1d846
CML
2760}
2761
9197c88b
VS
2762static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2763{
2764 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2765 struct drm_device *dev = encoder->base.dev;
2766 struct drm_i915_private *dev_priv = dev->dev_private;
2767 struct intel_crtc *intel_crtc =
2768 to_intel_crtc(encoder->base.crtc);
2769 enum dpio_channel ch = vlv_dport_to_channel(dport);
2770 enum pipe pipe = intel_crtc->pipe;
2771 u32 val;
2772
625695f8
VS
2773 intel_dp_prepare(encoder);
2774
9197c88b
VS
2775 mutex_lock(&dev_priv->dpio_lock);
2776
b9e5ac3c
VS
2777 /* program left/right clock distribution */
2778 if (pipe != PIPE_B) {
2779 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2780 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2781 if (ch == DPIO_CH0)
2782 val |= CHV_BUFLEFTENA1_FORCE;
2783 if (ch == DPIO_CH1)
2784 val |= CHV_BUFRIGHTENA1_FORCE;
2785 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2786 } else {
2787 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2788 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2789 if (ch == DPIO_CH0)
2790 val |= CHV_BUFLEFTENA2_FORCE;
2791 if (ch == DPIO_CH1)
2792 val |= CHV_BUFRIGHTENA2_FORCE;
2793 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2794 }
2795
9197c88b
VS
2796 /* program clock channel usage */
2797 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2798 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2799 if (pipe != PIPE_B)
2800 val &= ~CHV_PCS_USEDCLKCHANNEL;
2801 else
2802 val |= CHV_PCS_USEDCLKCHANNEL;
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2804
2805 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2806 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2807 if (pipe != PIPE_B)
2808 val &= ~CHV_PCS_USEDCLKCHANNEL;
2809 else
2810 val |= CHV_PCS_USEDCLKCHANNEL;
2811 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2812
2813 /*
2814 * This a a bit weird since generally CL
2815 * matches the pipe, but here we need to
2816 * pick the CL based on the port.
2817 */
2818 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2819 if (pipe != PIPE_B)
2820 val &= ~CHV_CMN_USEDCLKCHANNEL;
2821 else
2822 val |= CHV_CMN_USEDCLKCHANNEL;
2823 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2824
2825 mutex_unlock(&dev_priv->dpio_lock);
2826}
2827
a4fc5ed6 2828/*
df0c237d
JB
2829 * Native read with retry for link status and receiver capability reads for
2830 * cases where the sink may still be asleep.
9d1a1031
JN
2831 *
2832 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2833 * supposed to retry 3 times per the spec.
a4fc5ed6 2834 */
9d1a1031
JN
2835static ssize_t
2836intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2837 void *buffer, size_t size)
a4fc5ed6 2838{
9d1a1031
JN
2839 ssize_t ret;
2840 int i;
61da5fab 2841
f6a19066
VS
2842 /*
2843 * Sometime we just get the same incorrect byte repeated
2844 * over the entire buffer. Doing just one throw away read
2845 * initially seems to "solve" it.
2846 */
2847 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2848
61da5fab 2849 for (i = 0; i < 3; i++) {
9d1a1031
JN
2850 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2851 if (ret == size)
2852 return ret;
61da5fab
JB
2853 msleep(1);
2854 }
a4fc5ed6 2855
9d1a1031 2856 return ret;
a4fc5ed6
KP
2857}
2858
2859/*
2860 * Fetch AUX CH registers 0x202 - 0x207 which contain
2861 * link status information
2862 */
2863static bool
93f62dad 2864intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2865{
9d1a1031
JN
2866 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2867 DP_LANE0_1_STATUS,
2868 link_status,
2869 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2870}
2871
1100244e 2872/* These are source-specific values. */
a4fc5ed6 2873static uint8_t
1a2eb460 2874intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2875{
30add22d 2876 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2877 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2878 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2879
7ad14a29
SJ
2880 if (INTEL_INFO(dev)->gen >= 9) {
2881 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2882 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2883 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2884 } else if (IS_VALLEYVIEW(dev))
bd60018a 2885 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2886 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2887 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2888 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2889 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2890 else
bd60018a 2891 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2892}
2893
2894static uint8_t
2895intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2896{
30add22d 2897 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2898 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2899
5a9d1f1a
DL
2900 if (INTEL_INFO(dev)->gen >= 9) {
2901 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2902 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2903 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2904 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2905 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2906 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2907 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2908 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2909 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2910 default:
2911 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2912 }
2913 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2914 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2915 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2916 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2917 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2918 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2919 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2920 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2922 default:
bd60018a 2923 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2924 }
e2fa6fba
P
2925 } else if (IS_VALLEYVIEW(dev)) {
2926 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2928 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2930 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2931 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2932 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2934 default:
bd60018a 2935 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2936 }
bc7d38a4 2937 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2938 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2940 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2942 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2943 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2944 default:
bd60018a 2945 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2946 }
2947 } else {
2948 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2949 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2950 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2952 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2953 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2954 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2956 default:
bd60018a 2957 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2958 }
a4fc5ed6
KP
2959 }
2960}
2961
e2fa6fba
P
2962static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2963{
2964 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2965 struct drm_i915_private *dev_priv = dev->dev_private;
2966 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2967 struct intel_crtc *intel_crtc =
2968 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2969 unsigned long demph_reg_value, preemph_reg_value,
2970 uniqtranscale_reg_value;
2971 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2972 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2973 int pipe = intel_crtc->pipe;
e2fa6fba
P
2974
2975 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2976 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2977 preemph_reg_value = 0x0004000;
2978 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2980 demph_reg_value = 0x2B405555;
2981 uniqtranscale_reg_value = 0x552AB83A;
2982 break;
bd60018a 2983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2984 demph_reg_value = 0x2B404040;
2985 uniqtranscale_reg_value = 0x5548B83A;
2986 break;
bd60018a 2987 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2988 demph_reg_value = 0x2B245555;
2989 uniqtranscale_reg_value = 0x5560B83A;
2990 break;
bd60018a 2991 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2992 demph_reg_value = 0x2B405555;
2993 uniqtranscale_reg_value = 0x5598DA3A;
2994 break;
2995 default:
2996 return 0;
2997 }
2998 break;
bd60018a 2999 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3000 preemph_reg_value = 0x0002000;
3001 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3002 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3003 demph_reg_value = 0x2B404040;
3004 uniqtranscale_reg_value = 0x5552B83A;
3005 break;
bd60018a 3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3007 demph_reg_value = 0x2B404848;
3008 uniqtranscale_reg_value = 0x5580B83A;
3009 break;
bd60018a 3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3011 demph_reg_value = 0x2B404040;
3012 uniqtranscale_reg_value = 0x55ADDA3A;
3013 break;
3014 default:
3015 return 0;
3016 }
3017 break;
bd60018a 3018 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3019 preemph_reg_value = 0x0000000;
3020 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3021 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3022 demph_reg_value = 0x2B305555;
3023 uniqtranscale_reg_value = 0x5570B83A;
3024 break;
bd60018a 3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3026 demph_reg_value = 0x2B2B4040;
3027 uniqtranscale_reg_value = 0x55ADDA3A;
3028 break;
3029 default:
3030 return 0;
3031 }
3032 break;
bd60018a 3033 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3034 preemph_reg_value = 0x0006000;
3035 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3036 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3037 demph_reg_value = 0x1B405555;
3038 uniqtranscale_reg_value = 0x55ADDA3A;
3039 break;
3040 default:
3041 return 0;
3042 }
3043 break;
3044 default:
3045 return 0;
3046 }
3047
0980a60f 3048 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3049 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3050 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3051 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3052 uniqtranscale_reg_value);
ab3c759a
CML
3053 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3054 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3055 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3056 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3057 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3058
3059 return 0;
3060}
3061
e4a1d846
CML
3062static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3063{
3064 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3065 struct drm_i915_private *dev_priv = dev->dev_private;
3066 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3067 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3068 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3069 uint8_t train_set = intel_dp->train_set[0];
3070 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3071 enum pipe pipe = intel_crtc->pipe;
3072 int i;
e4a1d846
CML
3073
3074 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3075 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3076 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3077 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3078 deemph_reg_value = 128;
3079 margin_reg_value = 52;
3080 break;
bd60018a 3081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3082 deemph_reg_value = 128;
3083 margin_reg_value = 77;
3084 break;
bd60018a 3085 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3086 deemph_reg_value = 128;
3087 margin_reg_value = 102;
3088 break;
bd60018a 3089 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3090 deemph_reg_value = 128;
3091 margin_reg_value = 154;
3092 /* FIXME extra to set for 1200 */
3093 break;
3094 default:
3095 return 0;
3096 }
3097 break;
bd60018a 3098 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3099 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3100 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3101 deemph_reg_value = 85;
3102 margin_reg_value = 78;
3103 break;
bd60018a 3104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3105 deemph_reg_value = 85;
3106 margin_reg_value = 116;
3107 break;
bd60018a 3108 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3109 deemph_reg_value = 85;
3110 margin_reg_value = 154;
3111 break;
3112 default:
3113 return 0;
3114 }
3115 break;
bd60018a 3116 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3117 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3119 deemph_reg_value = 64;
3120 margin_reg_value = 104;
3121 break;
bd60018a 3122 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3123 deemph_reg_value = 64;
3124 margin_reg_value = 154;
3125 break;
3126 default:
3127 return 0;
3128 }
3129 break;
bd60018a 3130 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3131 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3133 deemph_reg_value = 43;
3134 margin_reg_value = 154;
3135 break;
3136 default:
3137 return 0;
3138 }
3139 break;
3140 default:
3141 return 0;
3142 }
3143
3144 mutex_lock(&dev_priv->dpio_lock);
3145
3146 /* Clear calc init */
1966e59e
VS
3147 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3148 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3149 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3150 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3151 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3152
3153 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3154 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3155 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3156 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3157 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3158
a02ef3c7
VS
3159 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3160 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3161 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3162 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3163
3164 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3165 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3166 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3167 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3168
e4a1d846 3169 /* Program swing deemph */
f72df8db
VS
3170 for (i = 0; i < 4; i++) {
3171 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3172 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3173 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3174 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3175 }
e4a1d846
CML
3176
3177 /* Program swing margin */
f72df8db
VS
3178 for (i = 0; i < 4; i++) {
3179 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3180 val &= ~DPIO_SWING_MARGIN000_MASK;
3181 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3182 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3183 }
e4a1d846
CML
3184
3185 /* Disable unique transition scale */
f72df8db
VS
3186 for (i = 0; i < 4; i++) {
3187 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3188 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3189 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3190 }
e4a1d846
CML
3191
3192 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3193 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3194 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3195 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3196
3197 /*
3198 * The document said it needs to set bit 27 for ch0 and bit 26
3199 * for ch1. Might be a typo in the doc.
3200 * For now, for this unique transition scale selection, set bit
3201 * 27 for ch0 and ch1.
3202 */
f72df8db
VS
3203 for (i = 0; i < 4; i++) {
3204 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3205 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3206 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3207 }
e4a1d846 3208
f72df8db
VS
3209 for (i = 0; i < 4; i++) {
3210 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3211 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3212 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3213 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3214 }
e4a1d846
CML
3215 }
3216
3217 /* Start swing calculation */
1966e59e
VS
3218 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3219 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3220 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3221
3222 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3223 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3224 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3225
3226 /* LRC Bypass */
3227 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3228 val |= DPIO_LRC_BYPASS;
3229 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3230
3231 mutex_unlock(&dev_priv->dpio_lock);
3232
3233 return 0;
3234}
3235
a4fc5ed6 3236static void
0301b3ac
JN
3237intel_get_adjust_train(struct intel_dp *intel_dp,
3238 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3239{
3240 uint8_t v = 0;
3241 uint8_t p = 0;
3242 int lane;
1a2eb460
KP
3243 uint8_t voltage_max;
3244 uint8_t preemph_max;
a4fc5ed6 3245
33a34e4e 3246 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3247 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3248 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3249
3250 if (this_v > v)
3251 v = this_v;
3252 if (this_p > p)
3253 p = this_p;
3254 }
3255
1a2eb460 3256 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3257 if (v >= voltage_max)
3258 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3259
1a2eb460
KP
3260 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3261 if (p >= preemph_max)
3262 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3263
3264 for (lane = 0; lane < 4; lane++)
33a34e4e 3265 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3266}
3267
3268static uint32_t
f0a3424e 3269intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3270{
3cf2efb1 3271 uint32_t signal_levels = 0;
a4fc5ed6 3272
3cf2efb1 3273 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3275 default:
3276 signal_levels |= DP_VOLTAGE_0_4;
3277 break;
bd60018a 3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3279 signal_levels |= DP_VOLTAGE_0_6;
3280 break;
bd60018a 3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3282 signal_levels |= DP_VOLTAGE_0_8;
3283 break;
bd60018a 3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3285 signal_levels |= DP_VOLTAGE_1_2;
3286 break;
3287 }
3cf2efb1 3288 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3289 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3290 default:
3291 signal_levels |= DP_PRE_EMPHASIS_0;
3292 break;
bd60018a 3293 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3294 signal_levels |= DP_PRE_EMPHASIS_3_5;
3295 break;
bd60018a 3296 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3297 signal_levels |= DP_PRE_EMPHASIS_6;
3298 break;
bd60018a 3299 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3300 signal_levels |= DP_PRE_EMPHASIS_9_5;
3301 break;
3302 }
3303 return signal_levels;
3304}
3305
e3421a18
ZW
3306/* Gen6's DP voltage swing and pre-emphasis control */
3307static uint32_t
3308intel_gen6_edp_signal_levels(uint8_t train_set)
3309{
3c5a62b5
YL
3310 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3311 DP_TRAIN_PRE_EMPHASIS_MASK);
3312 switch (signal_levels) {
bd60018a
SJ
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3315 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3317 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3320 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3323 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3326 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3327 default:
3c5a62b5
YL
3328 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3329 "0x%x\n", signal_levels);
3330 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3331 }
3332}
3333
1a2eb460
KP
3334/* Gen7's DP voltage swing and pre-emphasis control */
3335static uint32_t
3336intel_gen7_edp_signal_levels(uint8_t train_set)
3337{
3338 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3339 DP_TRAIN_PRE_EMPHASIS_MASK);
3340 switch (signal_levels) {
bd60018a 3341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3342 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3344 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3346 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3347
bd60018a 3348 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3349 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3351 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3352
bd60018a 3353 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3354 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3355 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3356 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3357
3358 default:
3359 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3360 "0x%x\n", signal_levels);
3361 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3362 }
3363}
3364
d6c0d722
PZ
3365/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3366static uint32_t
f0a3424e 3367intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3368{
d6c0d722
PZ
3369 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3370 DP_TRAIN_PRE_EMPHASIS_MASK);
3371 switch (signal_levels) {
bd60018a 3372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3373 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3375 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3377 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3379 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3380
bd60018a 3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3382 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3384 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3386 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3387
bd60018a 3388 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3389 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3391 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3392
3393 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3394 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3395 default:
3396 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3397 "0x%x\n", signal_levels);
c5fe6a06 3398 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3399 }
a4fc5ed6
KP
3400}
3401
f0a3424e
PZ
3402/* Properly updates "DP" with the correct signal levels. */
3403static void
3404intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3405{
3406 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3407 enum port port = intel_dig_port->port;
f0a3424e
PZ
3408 struct drm_device *dev = intel_dig_port->base.base.dev;
3409 uint32_t signal_levels, mask;
3410 uint8_t train_set = intel_dp->train_set[0];
3411
5a9d1f1a 3412 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3413 signal_levels = intel_hsw_signal_levels(train_set);
3414 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3415 } else if (IS_CHERRYVIEW(dev)) {
3416 signal_levels = intel_chv_signal_levels(intel_dp);
3417 mask = 0;
e2fa6fba
P
3418 } else if (IS_VALLEYVIEW(dev)) {
3419 signal_levels = intel_vlv_signal_levels(intel_dp);
3420 mask = 0;
bc7d38a4 3421 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3422 signal_levels = intel_gen7_edp_signal_levels(train_set);
3423 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3424 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3425 signal_levels = intel_gen6_edp_signal_levels(train_set);
3426 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3427 } else {
3428 signal_levels = intel_gen4_signal_levels(train_set);
3429 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3430 }
3431
3432 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3433
3434 *DP = (*DP & ~mask) | signal_levels;
3435}
3436
a4fc5ed6 3437static bool
ea5b213a 3438intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3439 uint32_t *DP,
58e10eb9 3440 uint8_t dp_train_pat)
a4fc5ed6 3441{
174edf1f
PZ
3442 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3443 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3444 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3445 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3446 int ret, len;
a4fc5ed6 3447
7b13b58a 3448 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3449
70aff66c 3450 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3451 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3452
2cdfe6c8
JN
3453 buf[0] = dp_train_pat;
3454 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3455 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3456 /* don't write DP_TRAINING_LANEx_SET on disable */
3457 len = 1;
3458 } else {
3459 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3460 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3461 len = intel_dp->lane_count + 1;
47ea7542 3462 }
a4fc5ed6 3463
9d1a1031
JN
3464 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3465 buf, len);
2cdfe6c8
JN
3466
3467 return ret == len;
a4fc5ed6
KP
3468}
3469
70aff66c
JN
3470static bool
3471intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3472 uint8_t dp_train_pat)
3473{
953d22e8 3474 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3475 intel_dp_set_signal_levels(intel_dp, DP);
3476 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3477}
3478
3479static bool
3480intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3481 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3482{
3483 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3484 struct drm_device *dev = intel_dig_port->base.base.dev;
3485 struct drm_i915_private *dev_priv = dev->dev_private;
3486 int ret;
3487
3488 intel_get_adjust_train(intel_dp, link_status);
3489 intel_dp_set_signal_levels(intel_dp, DP);
3490
3491 I915_WRITE(intel_dp->output_reg, *DP);
3492 POSTING_READ(intel_dp->output_reg);
3493
9d1a1031
JN
3494 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3495 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3496
3497 return ret == intel_dp->lane_count;
3498}
3499
3ab9c637
ID
3500static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3501{
3502 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3503 struct drm_device *dev = intel_dig_port->base.base.dev;
3504 struct drm_i915_private *dev_priv = dev->dev_private;
3505 enum port port = intel_dig_port->port;
3506 uint32_t val;
3507
3508 if (!HAS_DDI(dev))
3509 return;
3510
3511 val = I915_READ(DP_TP_CTL(port));
3512 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3513 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3514 I915_WRITE(DP_TP_CTL(port), val);
3515
3516 /*
3517 * On PORT_A we can have only eDP in SST mode. There the only reason
3518 * we need to set idle transmission mode is to work around a HW issue
3519 * where we enable the pipe while not in idle link-training mode.
3520 * In this case there is requirement to wait for a minimum number of
3521 * idle patterns to be sent.
3522 */
3523 if (port == PORT_A)
3524 return;
3525
3526 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3527 1))
3528 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3529}
3530
33a34e4e 3531/* Enable corresponding port and start training pattern 1 */
c19b0669 3532void
33a34e4e 3533intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3534{
da63a9f2 3535 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3536 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3537 int i;
3538 uint8_t voltage;
cdb0e95b 3539 int voltage_tries, loop_tries;
ea5b213a 3540 uint32_t DP = intel_dp->DP;
6aba5b6c 3541 uint8_t link_config[2];
a4fc5ed6 3542
affa9354 3543 if (HAS_DDI(dev))
c19b0669
PZ
3544 intel_ddi_prepare_link_retrain(encoder);
3545
3cf2efb1 3546 /* Write the link configuration data */
6aba5b6c
JN
3547 link_config[0] = intel_dp->link_bw;
3548 link_config[1] = intel_dp->lane_count;
3549 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3550 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3551 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3552 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3553 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3554 &intel_dp->rate_select, 1);
6aba5b6c
JN
3555
3556 link_config[0] = 0;
3557 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3558 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3559
3560 DP |= DP_PORT_EN;
1a2eb460 3561
70aff66c
JN
3562 /* clock recovery */
3563 if (!intel_dp_reset_link_train(intel_dp, &DP,
3564 DP_TRAINING_PATTERN_1 |
3565 DP_LINK_SCRAMBLING_DISABLE)) {
3566 DRM_ERROR("failed to enable link training\n");
3567 return;
3568 }
3569
a4fc5ed6 3570 voltage = 0xff;
cdb0e95b
KP
3571 voltage_tries = 0;
3572 loop_tries = 0;
a4fc5ed6 3573 for (;;) {
70aff66c 3574 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3575
a7c9655f 3576 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3577 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3578 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3579 break;
93f62dad 3580 }
a4fc5ed6 3581
01916270 3582 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3583 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3584 break;
3585 }
3586
3587 /* Check to see if we've tried the max voltage */
3588 for (i = 0; i < intel_dp->lane_count; i++)
3589 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3590 break;
3b4f819d 3591 if (i == intel_dp->lane_count) {
b06fbda3
DV
3592 ++loop_tries;
3593 if (loop_tries == 5) {
3def84b3 3594 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3595 break;
3596 }
70aff66c
JN
3597 intel_dp_reset_link_train(intel_dp, &DP,
3598 DP_TRAINING_PATTERN_1 |
3599 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3600 voltage_tries = 0;
3601 continue;
3602 }
a4fc5ed6 3603
3cf2efb1 3604 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3605 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3606 ++voltage_tries;
b06fbda3 3607 if (voltage_tries == 5) {
3def84b3 3608 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3609 break;
3610 }
3611 } else
3612 voltage_tries = 0;
3613 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3614
70aff66c
JN
3615 /* Update training set as requested by target */
3616 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3617 DRM_ERROR("failed to update link training\n");
3618 break;
3619 }
a4fc5ed6
KP
3620 }
3621
33a34e4e
JB
3622 intel_dp->DP = DP;
3623}
3624
c19b0669 3625void
33a34e4e
JB
3626intel_dp_complete_link_train(struct intel_dp *intel_dp)
3627{
33a34e4e 3628 bool channel_eq = false;
37f80975 3629 int tries, cr_tries;
33a34e4e 3630 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3631 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3632
3633 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3634 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3635 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3636
a4fc5ed6 3637 /* channel equalization */
70aff66c 3638 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3639 training_pattern |
70aff66c
JN
3640 DP_LINK_SCRAMBLING_DISABLE)) {
3641 DRM_ERROR("failed to start channel equalization\n");
3642 return;
3643 }
3644
a4fc5ed6 3645 tries = 0;
37f80975 3646 cr_tries = 0;
a4fc5ed6
KP
3647 channel_eq = false;
3648 for (;;) {
70aff66c 3649 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3650
37f80975
JB
3651 if (cr_tries > 5) {
3652 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3653 break;
3654 }
3655
a7c9655f 3656 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3657 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3658 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3659 break;
70aff66c 3660 }
a4fc5ed6 3661
37f80975 3662 /* Make sure clock is still ok */
01916270 3663 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3664 intel_dp_start_link_train(intel_dp);
70aff66c 3665 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3666 training_pattern |
70aff66c 3667 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3668 cr_tries++;
3669 continue;
3670 }
3671
1ffdff13 3672 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3673 channel_eq = true;
3674 break;
3675 }
a4fc5ed6 3676
37f80975
JB
3677 /* Try 5 times, then try clock recovery if that fails */
3678 if (tries > 5) {
37f80975 3679 intel_dp_start_link_train(intel_dp);
70aff66c 3680 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3681 training_pattern |
70aff66c 3682 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3683 tries = 0;
3684 cr_tries++;
3685 continue;
3686 }
a4fc5ed6 3687
70aff66c
JN
3688 /* Update training set as requested by target */
3689 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3690 DRM_ERROR("failed to update link training\n");
3691 break;
3692 }
3cf2efb1 3693 ++tries;
869184a6 3694 }
3cf2efb1 3695
3ab9c637
ID
3696 intel_dp_set_idle_link_train(intel_dp);
3697
3698 intel_dp->DP = DP;
3699
d6c0d722 3700 if (channel_eq)
07f42258 3701 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3702
3ab9c637
ID
3703}
3704
3705void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3706{
70aff66c 3707 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3708 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3709}
3710
3711static void
ea5b213a 3712intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3713{
da63a9f2 3714 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3715 enum port port = intel_dig_port->port;
da63a9f2 3716 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3717 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3718 uint32_t DP = intel_dp->DP;
a4fc5ed6 3719
bc76e320 3720 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3721 return;
3722
0c33d8d7 3723 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3724 return;
3725
28c97730 3726 DRM_DEBUG_KMS("\n");
32f9d658 3727
bc7d38a4 3728 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3729 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3730 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3731 } else {
aad3d14d
VS
3732 if (IS_CHERRYVIEW(dev))
3733 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3734 else
3735 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3736 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3737 }
fe255d00 3738 POSTING_READ(intel_dp->output_reg);
5eb08b69 3739
493a7081 3740 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3741 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3742 /* Hardware workaround: leaving our transcoder select
3743 * set to transcoder B while it's off will prevent the
3744 * corresponding HDMI output on transcoder A.
3745 *
3746 * Combine this with another hardware workaround:
3747 * transcoder select bit can only be cleared while the
3748 * port is enabled.
3749 */
3750 DP &= ~DP_PIPEB_SELECT;
3751 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3752 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3753 }
3754
832afda6 3755 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3756 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3757 POSTING_READ(intel_dp->output_reg);
f01eca2e 3758 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3759}
3760
26d61aad
KP
3761static bool
3762intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3763{
a031d709
RV
3764 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3765 struct drm_device *dev = dig_port->base.base.dev;
3766 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3767 uint8_t rev;
a031d709 3768
9d1a1031
JN
3769 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3770 sizeof(intel_dp->dpcd)) < 0)
edb39244 3771 return false; /* aux transfer failed */
92fd8fd1 3772
a8e98153 3773 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3774
edb39244
AJ
3775 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3776 return false; /* DPCD not present */
3777
2293bb5c
SK
3778 /* Check if the panel supports PSR */
3779 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3780 if (is_edp(intel_dp)) {
9d1a1031
JN
3781 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3782 intel_dp->psr_dpcd,
3783 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3784 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3785 dev_priv->psr.sink_support = true;
50003939 3786 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3787 }
50003939
JN
3788 }
3789
7809a611 3790 /* Training Pattern 3 support, both source and sink */
06ea66b6 3791 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3792 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3793 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3794 intel_dp->use_tps3 = true;
f8d8a672 3795 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3796 } else
3797 intel_dp->use_tps3 = false;
3798
fc0f8e25
SJ
3799 /* Intermediate frequency support */
3800 if (is_edp(intel_dp) &&
3801 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3802 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3803 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3804 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3805 int i;
3806
fc0f8e25
SJ
3807 intel_dp_dpcd_read_wake(&intel_dp->aux,
3808 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3809 sink_rates,
3810 sizeof(sink_rates));
ea2d8a42 3811
94ca719e
VS
3812 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3813 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3814
3815 if (val == 0)
3816 break;
3817
94ca719e 3818 intel_dp->sink_rates[i] = val * 200;
ea2d8a42 3819 }
94ca719e 3820 intel_dp->num_sink_rates = i;
fc0f8e25 3821 }
0336400e
VS
3822
3823 intel_dp_print_rates(intel_dp);
3824
edb39244
AJ
3825 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3826 DP_DWN_STRM_PORT_PRESENT))
3827 return true; /* native DP sink */
3828
3829 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3830 return true; /* no per-port downstream info */
3831
9d1a1031
JN
3832 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3833 intel_dp->downstream_ports,
3834 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3835 return false; /* downstream port status fetch failed */
3836
3837 return true;
92fd8fd1
KP
3838}
3839
0d198328
AJ
3840static void
3841intel_dp_probe_oui(struct intel_dp *intel_dp)
3842{
3843 u8 buf[3];
3844
3845 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3846 return;
3847
9d1a1031 3848 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3849 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3850 buf[0], buf[1], buf[2]);
3851
9d1a1031 3852 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3853 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3854 buf[0], buf[1], buf[2]);
3855}
3856
0e32b39c
DA
3857static bool
3858intel_dp_probe_mst(struct intel_dp *intel_dp)
3859{
3860 u8 buf[1];
3861
3862 if (!intel_dp->can_mst)
3863 return false;
3864
3865 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3866 return false;
3867
0e32b39c
DA
3868 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3869 if (buf[0] & DP_MST_CAP) {
3870 DRM_DEBUG_KMS("Sink is MST capable\n");
3871 intel_dp->is_mst = true;
3872 } else {
3873 DRM_DEBUG_KMS("Sink is not MST capable\n");
3874 intel_dp->is_mst = false;
3875 }
3876 }
0e32b39c
DA
3877
3878 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3879 return intel_dp->is_mst;
3880}
3881
d2e216d0
RV
3882int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3883{
3884 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3885 struct drm_device *dev = intel_dig_port->base.base.dev;
3886 struct intel_crtc *intel_crtc =
3887 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3888 u8 buf;
3889 int test_crc_count;
3890 int attempts = 6;
d2e216d0 3891
ad9dc91b 3892 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3893 return -EIO;
d2e216d0 3894
ad9dc91b 3895 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3896 return -ENOTTY;
3897
1dda5f93
RV
3898 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3899 return -EIO;
3900
9d1a1031 3901 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3902 buf | DP_TEST_SINK_START) < 0)
bda0381e 3903 return -EIO;
d2e216d0 3904
1dda5f93 3905 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3906 return -EIO;
ad9dc91b 3907 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3908
ad9dc91b 3909 do {
1dda5f93
RV
3910 if (drm_dp_dpcd_readb(&intel_dp->aux,
3911 DP_TEST_SINK_MISC, &buf) < 0)
3912 return -EIO;
ad9dc91b
RV
3913 intel_wait_for_vblank(dev, intel_crtc->pipe);
3914 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3915
3916 if (attempts == 0) {
90bd1f46
DV
3917 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3918 return -ETIMEDOUT;
ad9dc91b 3919 }
d2e216d0 3920
9d1a1031 3921 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3922 return -EIO;
d2e216d0 3923
1dda5f93
RV
3924 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3925 return -EIO;
3926 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3927 buf & ~DP_TEST_SINK_START) < 0)
3928 return -EIO;
ce31d9f4 3929
d2e216d0
RV
3930 return 0;
3931}
3932
a60f0e38
JB
3933static bool
3934intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3935{
9d1a1031
JN
3936 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3937 DP_DEVICE_SERVICE_IRQ_VECTOR,
3938 sink_irq_vector, 1) == 1;
a60f0e38
JB
3939}
3940
0e32b39c
DA
3941static bool
3942intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3943{
3944 int ret;
3945
3946 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3947 DP_SINK_COUNT_ESI,
3948 sink_irq_vector, 14);
3949 if (ret != 14)
3950 return false;
3951
3952 return true;
3953}
3954
a60f0e38
JB
3955static void
3956intel_dp_handle_test_request(struct intel_dp *intel_dp)
3957{
3958 /* NAK by default */
9d1a1031 3959 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3960}
3961
0e32b39c
DA
3962static int
3963intel_dp_check_mst_status(struct intel_dp *intel_dp)
3964{
3965 bool bret;
3966
3967 if (intel_dp->is_mst) {
3968 u8 esi[16] = { 0 };
3969 int ret = 0;
3970 int retry;
3971 bool handled;
3972 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3973go_again:
3974 if (bret == true) {
3975
3976 /* check link status - esi[10] = 0x200c */
3977 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3978 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3979 intel_dp_start_link_train(intel_dp);
3980 intel_dp_complete_link_train(intel_dp);
3981 intel_dp_stop_link_train(intel_dp);
3982 }
3983
6f34cc39 3984 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3985 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3986
3987 if (handled) {
3988 for (retry = 0; retry < 3; retry++) {
3989 int wret;
3990 wret = drm_dp_dpcd_write(&intel_dp->aux,
3991 DP_SINK_COUNT_ESI+1,
3992 &esi[1], 3);
3993 if (wret == 3) {
3994 break;
3995 }
3996 }
3997
3998 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3999 if (bret == true) {
6f34cc39 4000 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4001 goto go_again;
4002 }
4003 } else
4004 ret = 0;
4005
4006 return ret;
4007 } else {
4008 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4009 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4010 intel_dp->is_mst = false;
4011 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4012 /* send a hotplug event */
4013 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4014 }
4015 }
4016 return -EINVAL;
4017}
4018
a4fc5ed6
KP
4019/*
4020 * According to DP spec
4021 * 5.1.2:
4022 * 1. Read DPCD
4023 * 2. Configure link according to Receiver Capabilities
4024 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4025 * 4. Check link status on receipt of hot-plug interrupt
4026 */
a5146200 4027static void
ea5b213a 4028intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4029{
5b215bcf 4030 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4031 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4032 u8 sink_irq_vector;
93f62dad 4033 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4034
5b215bcf
DA
4035 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4036
da63a9f2 4037 if (!intel_encoder->connectors_active)
d2b996ac 4038 return;
59cd09e1 4039
da63a9f2 4040 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4041 return;
4042
1a125d8a
ID
4043 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4044 return;
4045
92fd8fd1 4046 /* Try to read receiver status if the link appears to be up */
93f62dad 4047 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4048 return;
4049 }
4050
92fd8fd1 4051 /* Now read the DPCD to see if it's actually running */
26d61aad 4052 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4053 return;
4054 }
4055
a60f0e38
JB
4056 /* Try to read the source of the interrupt */
4057 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4058 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4059 /* Clear interrupt source */
9d1a1031
JN
4060 drm_dp_dpcd_writeb(&intel_dp->aux,
4061 DP_DEVICE_SERVICE_IRQ_VECTOR,
4062 sink_irq_vector);
a60f0e38
JB
4063
4064 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4065 intel_dp_handle_test_request(intel_dp);
4066 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4067 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4068 }
4069
1ffdff13 4070 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4071 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4072 intel_encoder->base.name);
33a34e4e
JB
4073 intel_dp_start_link_train(intel_dp);
4074 intel_dp_complete_link_train(intel_dp);
3ab9c637 4075 intel_dp_stop_link_train(intel_dp);
33a34e4e 4076 }
a4fc5ed6 4077}
a4fc5ed6 4078
caf9ab24 4079/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4080static enum drm_connector_status
26d61aad 4081intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4082{
caf9ab24 4083 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4084 uint8_t type;
4085
4086 if (!intel_dp_get_dpcd(intel_dp))
4087 return connector_status_disconnected;
4088
4089 /* if there's no downstream port, we're done */
4090 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4091 return connector_status_connected;
caf9ab24
AJ
4092
4093 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4094 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4095 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4096 uint8_t reg;
9d1a1031
JN
4097
4098 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4099 &reg, 1) < 0)
caf9ab24 4100 return connector_status_unknown;
9d1a1031 4101
23235177
AJ
4102 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4103 : connector_status_disconnected;
caf9ab24
AJ
4104 }
4105
4106 /* If no HPD, poke DDC gently */
0b99836f 4107 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4108 return connector_status_connected;
caf9ab24
AJ
4109
4110 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4111 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4112 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4113 if (type == DP_DS_PORT_TYPE_VGA ||
4114 type == DP_DS_PORT_TYPE_NON_EDID)
4115 return connector_status_unknown;
4116 } else {
4117 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4118 DP_DWN_STRM_PORT_TYPE_MASK;
4119 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4120 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4121 return connector_status_unknown;
4122 }
caf9ab24
AJ
4123
4124 /* Anything else is out of spec, warn and ignore */
4125 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4126 return connector_status_disconnected;
71ba9000
AJ
4127}
4128
d410b56d
CW
4129static enum drm_connector_status
4130edp_detect(struct intel_dp *intel_dp)
4131{
4132 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4133 enum drm_connector_status status;
4134
4135 status = intel_panel_detect(dev);
4136 if (status == connector_status_unknown)
4137 status = connector_status_connected;
4138
4139 return status;
4140}
4141
5eb08b69 4142static enum drm_connector_status
a9756bb5 4143ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4144{
30add22d 4145 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4146 struct drm_i915_private *dev_priv = dev->dev_private;
4147 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4148
1b469639
DL
4149 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4150 return connector_status_disconnected;
4151
26d61aad 4152 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4153}
4154
2a592bec
DA
4155static int g4x_digital_port_connected(struct drm_device *dev,
4156 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4157{
a4fc5ed6 4158 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4159 uint32_t bit;
5eb08b69 4160
232a6ee9
TP
4161 if (IS_VALLEYVIEW(dev)) {
4162 switch (intel_dig_port->port) {
4163 case PORT_B:
4164 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4165 break;
4166 case PORT_C:
4167 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4168 break;
4169 case PORT_D:
4170 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4171 break;
4172 default:
2a592bec 4173 return -EINVAL;
232a6ee9
TP
4174 }
4175 } else {
4176 switch (intel_dig_port->port) {
4177 case PORT_B:
4178 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4179 break;
4180 case PORT_C:
4181 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4182 break;
4183 case PORT_D:
4184 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4185 break;
4186 default:
2a592bec 4187 return -EINVAL;
232a6ee9 4188 }
a4fc5ed6
KP
4189 }
4190
10f76a38 4191 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4192 return 0;
4193 return 1;
4194}
4195
4196static enum drm_connector_status
4197g4x_dp_detect(struct intel_dp *intel_dp)
4198{
4199 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4200 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4201 int ret;
4202
4203 /* Can't disconnect eDP, but you can close the lid... */
4204 if (is_edp(intel_dp)) {
4205 enum drm_connector_status status;
4206
4207 status = intel_panel_detect(dev);
4208 if (status == connector_status_unknown)
4209 status = connector_status_connected;
4210 return status;
4211 }
4212
4213 ret = g4x_digital_port_connected(dev, intel_dig_port);
4214 if (ret == -EINVAL)
4215 return connector_status_unknown;
4216 else if (ret == 0)
a4fc5ed6
KP
4217 return connector_status_disconnected;
4218
26d61aad 4219 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4220}
4221
8c241fef 4222static struct edid *
beb60608 4223intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4224{
beb60608 4225 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4226
9cd300e0
JN
4227 /* use cached edid if we have one */
4228 if (intel_connector->edid) {
9cd300e0
JN
4229 /* invalid edid */
4230 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4231 return NULL;
4232
55e9edeb 4233 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4234 } else
4235 return drm_get_edid(&intel_connector->base,
4236 &intel_dp->aux.ddc);
4237}
8c241fef 4238
beb60608
CW
4239static void
4240intel_dp_set_edid(struct intel_dp *intel_dp)
4241{
4242 struct intel_connector *intel_connector = intel_dp->attached_connector;
4243 struct edid *edid;
8c241fef 4244
beb60608
CW
4245 edid = intel_dp_get_edid(intel_dp);
4246 intel_connector->detect_edid = edid;
4247
4248 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4249 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4250 else
4251 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4252}
4253
beb60608
CW
4254static void
4255intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4256{
beb60608 4257 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4258
beb60608
CW
4259 kfree(intel_connector->detect_edid);
4260 intel_connector->detect_edid = NULL;
9cd300e0 4261
beb60608
CW
4262 intel_dp->has_audio = false;
4263}
d6f24d0f 4264
beb60608
CW
4265static enum intel_display_power_domain
4266intel_dp_power_get(struct intel_dp *dp)
4267{
4268 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4269 enum intel_display_power_domain power_domain;
4270
4271 power_domain = intel_display_port_power_domain(encoder);
4272 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4273
4274 return power_domain;
4275}
d6f24d0f 4276
beb60608
CW
4277static void
4278intel_dp_power_put(struct intel_dp *dp,
4279 enum intel_display_power_domain power_domain)
4280{
4281 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4282 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4283}
4284
a9756bb5
ZW
4285static enum drm_connector_status
4286intel_dp_detect(struct drm_connector *connector, bool force)
4287{
4288 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4289 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4290 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4291 struct drm_device *dev = connector->dev;
a9756bb5 4292 enum drm_connector_status status;
671dedd2 4293 enum intel_display_power_domain power_domain;
0e32b39c 4294 bool ret;
a9756bb5 4295
164c8598 4296 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4297 connector->base.id, connector->name);
beb60608 4298 intel_dp_unset_edid(intel_dp);
164c8598 4299
0e32b39c
DA
4300 if (intel_dp->is_mst) {
4301 /* MST devices are disconnected from a monitor POV */
4302 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4303 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4304 return connector_status_disconnected;
0e32b39c
DA
4305 }
4306
beb60608 4307 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4308
d410b56d
CW
4309 /* Can't disconnect eDP, but you can close the lid... */
4310 if (is_edp(intel_dp))
4311 status = edp_detect(intel_dp);
4312 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4313 status = ironlake_dp_detect(intel_dp);
4314 else
4315 status = g4x_dp_detect(intel_dp);
4316 if (status != connector_status_connected)
c8c8fb33 4317 goto out;
a9756bb5 4318
0d198328
AJ
4319 intel_dp_probe_oui(intel_dp);
4320
0e32b39c
DA
4321 ret = intel_dp_probe_mst(intel_dp);
4322 if (ret) {
4323 /* if we are in MST mode then this connector
4324 won't appear connected or have anything with EDID on it */
4325 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4326 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4327 status = connector_status_disconnected;
4328 goto out;
4329 }
4330
beb60608 4331 intel_dp_set_edid(intel_dp);
a9756bb5 4332
d63885da
PZ
4333 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4334 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4335 status = connector_status_connected;
4336
4337out:
beb60608 4338 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4339 return status;
a4fc5ed6
KP
4340}
4341
beb60608
CW
4342static void
4343intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4344{
df0e9248 4345 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4346 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4347 enum intel_display_power_domain power_domain;
a4fc5ed6 4348
beb60608
CW
4349 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4350 connector->base.id, connector->name);
4351 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4352
beb60608
CW
4353 if (connector->status != connector_status_connected)
4354 return;
671dedd2 4355
beb60608
CW
4356 power_domain = intel_dp_power_get(intel_dp);
4357
4358 intel_dp_set_edid(intel_dp);
4359
4360 intel_dp_power_put(intel_dp, power_domain);
4361
4362 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4363 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4364}
4365
4366static int intel_dp_get_modes(struct drm_connector *connector)
4367{
4368 struct intel_connector *intel_connector = to_intel_connector(connector);
4369 struct edid *edid;
4370
4371 edid = intel_connector->detect_edid;
4372 if (edid) {
4373 int ret = intel_connector_update_modes(connector, edid);
4374 if (ret)
4375 return ret;
4376 }
32f9d658 4377
f8779fda 4378 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4379 if (is_edp(intel_attached_dp(connector)) &&
4380 intel_connector->panel.fixed_mode) {
f8779fda 4381 struct drm_display_mode *mode;
beb60608
CW
4382
4383 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4384 intel_connector->panel.fixed_mode);
f8779fda 4385 if (mode) {
32f9d658
ZW
4386 drm_mode_probed_add(connector, mode);
4387 return 1;
4388 }
4389 }
beb60608 4390
32f9d658 4391 return 0;
a4fc5ed6
KP
4392}
4393
1aad7ac0
CW
4394static bool
4395intel_dp_detect_audio(struct drm_connector *connector)
4396{
1aad7ac0 4397 bool has_audio = false;
beb60608 4398 struct edid *edid;
1aad7ac0 4399
beb60608
CW
4400 edid = to_intel_connector(connector)->detect_edid;
4401 if (edid)
1aad7ac0 4402 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4403
1aad7ac0
CW
4404 return has_audio;
4405}
4406
f684960e
CW
4407static int
4408intel_dp_set_property(struct drm_connector *connector,
4409 struct drm_property *property,
4410 uint64_t val)
4411{
e953fd7b 4412 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4413 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4414 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4415 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4416 int ret;
4417
662595df 4418 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4419 if (ret)
4420 return ret;
4421
3f43c48d 4422 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4423 int i = val;
4424 bool has_audio;
4425
4426 if (i == intel_dp->force_audio)
f684960e
CW
4427 return 0;
4428
1aad7ac0 4429 intel_dp->force_audio = i;
f684960e 4430
c3e5f67b 4431 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4432 has_audio = intel_dp_detect_audio(connector);
4433 else
c3e5f67b 4434 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4435
4436 if (has_audio == intel_dp->has_audio)
f684960e
CW
4437 return 0;
4438
1aad7ac0 4439 intel_dp->has_audio = has_audio;
f684960e
CW
4440 goto done;
4441 }
4442
e953fd7b 4443 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4444 bool old_auto = intel_dp->color_range_auto;
4445 uint32_t old_range = intel_dp->color_range;
4446
55bc60db
VS
4447 switch (val) {
4448 case INTEL_BROADCAST_RGB_AUTO:
4449 intel_dp->color_range_auto = true;
4450 break;
4451 case INTEL_BROADCAST_RGB_FULL:
4452 intel_dp->color_range_auto = false;
4453 intel_dp->color_range = 0;
4454 break;
4455 case INTEL_BROADCAST_RGB_LIMITED:
4456 intel_dp->color_range_auto = false;
4457 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4458 break;
4459 default:
4460 return -EINVAL;
4461 }
ae4edb80
DV
4462
4463 if (old_auto == intel_dp->color_range_auto &&
4464 old_range == intel_dp->color_range)
4465 return 0;
4466
e953fd7b
CW
4467 goto done;
4468 }
4469
53b41837
YN
4470 if (is_edp(intel_dp) &&
4471 property == connector->dev->mode_config.scaling_mode_property) {
4472 if (val == DRM_MODE_SCALE_NONE) {
4473 DRM_DEBUG_KMS("no scaling not supported\n");
4474 return -EINVAL;
4475 }
4476
4477 if (intel_connector->panel.fitting_mode == val) {
4478 /* the eDP scaling property is not changed */
4479 return 0;
4480 }
4481 intel_connector->panel.fitting_mode = val;
4482
4483 goto done;
4484 }
4485
f684960e
CW
4486 return -EINVAL;
4487
4488done:
c0c36b94
CW
4489 if (intel_encoder->base.crtc)
4490 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4491
4492 return 0;
4493}
4494
a4fc5ed6 4495static void
73845adf 4496intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4497{
1d508706 4498 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4499
10e972d3 4500 kfree(intel_connector->detect_edid);
beb60608 4501
9cd300e0
JN
4502 if (!IS_ERR_OR_NULL(intel_connector->edid))
4503 kfree(intel_connector->edid);
4504
acd8db10
PZ
4505 /* Can't call is_edp() since the encoder may have been destroyed
4506 * already. */
4507 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4508 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4509
a4fc5ed6 4510 drm_connector_cleanup(connector);
55f78c43 4511 kfree(connector);
a4fc5ed6
KP
4512}
4513
00c09d70 4514void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4515{
da63a9f2
PZ
4516 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4517 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4518
4f71d0cb 4519 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4520 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4521 if (is_edp(intel_dp)) {
4522 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4523 /*
4524 * vdd might still be enabled do to the delayed vdd off.
4525 * Make sure vdd is actually turned off here.
4526 */
773538e8 4527 pps_lock(intel_dp);
4be73780 4528 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4529 pps_unlock(intel_dp);
4530
01527b31
CT
4531 if (intel_dp->edp_notifier.notifier_call) {
4532 unregister_reboot_notifier(&intel_dp->edp_notifier);
4533 intel_dp->edp_notifier.notifier_call = NULL;
4534 }
bd943159 4535 }
c8bd0e49 4536 drm_encoder_cleanup(encoder);
da63a9f2 4537 kfree(intel_dig_port);
24d05927
DV
4538}
4539
07f9cd0b
ID
4540static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4541{
4542 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4543
4544 if (!is_edp(intel_dp))
4545 return;
4546
951468f3
VS
4547 /*
4548 * vdd might still be enabled do to the delayed vdd off.
4549 * Make sure vdd is actually turned off here.
4550 */
afa4e53a 4551 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4552 pps_lock(intel_dp);
07f9cd0b 4553 edp_panel_vdd_off_sync(intel_dp);
773538e8 4554 pps_unlock(intel_dp);
07f9cd0b
ID
4555}
4556
49e6bc51
VS
4557static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4558{
4559 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4560 struct drm_device *dev = intel_dig_port->base.base.dev;
4561 struct drm_i915_private *dev_priv = dev->dev_private;
4562 enum intel_display_power_domain power_domain;
4563
4564 lockdep_assert_held(&dev_priv->pps_mutex);
4565
4566 if (!edp_have_panel_vdd(intel_dp))
4567 return;
4568
4569 /*
4570 * The VDD bit needs a power domain reference, so if the bit is
4571 * already enabled when we boot or resume, grab this reference and
4572 * schedule a vdd off, so we don't hold on to the reference
4573 * indefinitely.
4574 */
4575 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4576 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4577 intel_display_power_get(dev_priv, power_domain);
4578
4579 edp_panel_vdd_schedule_off(intel_dp);
4580}
4581
6d93c0c4
ID
4582static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4583{
49e6bc51
VS
4584 struct intel_dp *intel_dp;
4585
4586 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4587 return;
4588
4589 intel_dp = enc_to_intel_dp(encoder);
4590
4591 pps_lock(intel_dp);
4592
4593 /*
4594 * Read out the current power sequencer assignment,
4595 * in case the BIOS did something with it.
4596 */
4597 if (IS_VALLEYVIEW(encoder->dev))
4598 vlv_initial_power_sequencer_setup(intel_dp);
4599
4600 intel_edp_panel_vdd_sanitize(intel_dp);
4601
4602 pps_unlock(intel_dp);
6d93c0c4
ID
4603}
4604
a4fc5ed6 4605static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4606 .dpms = intel_connector_dpms,
a4fc5ed6 4607 .detect = intel_dp_detect,
beb60608 4608 .force = intel_dp_force,
a4fc5ed6 4609 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4610 .set_property = intel_dp_set_property,
2545e4a6 4611 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4612 .destroy = intel_dp_connector_destroy,
c6f95f27 4613 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4614 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4615};
4616
4617static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4618 .get_modes = intel_dp_get_modes,
4619 .mode_valid = intel_dp_mode_valid,
df0e9248 4620 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4621};
4622
a4fc5ed6 4623static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4624 .reset = intel_dp_encoder_reset,
24d05927 4625 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4626};
4627
0e32b39c 4628void
21d40d37 4629intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4630{
0e32b39c 4631 return;
c8110e52 4632}
6207937d 4633
b2c5c181 4634enum irqreturn
13cf5504
DA
4635intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4636{
4637 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4638 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4639 struct drm_device *dev = intel_dig_port->base.base.dev;
4640 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4641 enum intel_display_power_domain power_domain;
b2c5c181 4642 enum irqreturn ret = IRQ_NONE;
1c767b33 4643
0e32b39c
DA
4644 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4645 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4646
7a7f84cc
VS
4647 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4648 /*
4649 * vdd off can generate a long pulse on eDP which
4650 * would require vdd on to handle it, and thus we
4651 * would end up in an endless cycle of
4652 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4653 */
4654 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4655 port_name(intel_dig_port->port));
a8b3d52f 4656 return IRQ_HANDLED;
7a7f84cc
VS
4657 }
4658
26fbb774
VS
4659 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4660 port_name(intel_dig_port->port),
0e32b39c 4661 long_hpd ? "long" : "short");
13cf5504 4662
1c767b33
ID
4663 power_domain = intel_display_port_power_domain(intel_encoder);
4664 intel_display_power_get(dev_priv, power_domain);
4665
0e32b39c 4666 if (long_hpd) {
2a592bec
DA
4667
4668 if (HAS_PCH_SPLIT(dev)) {
4669 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4670 goto mst_fail;
4671 } else {
4672 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4673 goto mst_fail;
4674 }
0e32b39c
DA
4675
4676 if (!intel_dp_get_dpcd(intel_dp)) {
4677 goto mst_fail;
4678 }
4679
4680 intel_dp_probe_oui(intel_dp);
4681
4682 if (!intel_dp_probe_mst(intel_dp))
4683 goto mst_fail;
4684
4685 } else {
4686 if (intel_dp->is_mst) {
1c767b33 4687 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4688 goto mst_fail;
4689 }
4690
4691 if (!intel_dp->is_mst) {
4692 /*
4693 * we'll check the link status via the normal hot plug path later -
4694 * but for short hpds we should check it now
4695 */
5b215bcf 4696 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4697 intel_dp_check_link_status(intel_dp);
5b215bcf 4698 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4699 }
4700 }
b2c5c181
DV
4701
4702 ret = IRQ_HANDLED;
4703
1c767b33 4704 goto put_power;
0e32b39c
DA
4705mst_fail:
4706 /* if we were in MST mode, and device is not there get out of MST mode */
4707 if (intel_dp->is_mst) {
4708 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4709 intel_dp->is_mst = false;
4710 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4711 }
1c767b33
ID
4712put_power:
4713 intel_display_power_put(dev_priv, power_domain);
4714
4715 return ret;
13cf5504
DA
4716}
4717
e3421a18
ZW
4718/* Return which DP Port should be selected for Transcoder DP control */
4719int
0206e353 4720intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4721{
4722 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4723 struct intel_encoder *intel_encoder;
4724 struct intel_dp *intel_dp;
e3421a18 4725
fa90ecef
PZ
4726 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4727 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4728
fa90ecef
PZ
4729 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4730 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4731 return intel_dp->output_reg;
e3421a18 4732 }
ea5b213a 4733
e3421a18
ZW
4734 return -1;
4735}
4736
36e83a18 4737/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4738bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4739{
4740 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4741 union child_device_config *p_child;
36e83a18 4742 int i;
5d8a7752
VS
4743 static const short port_mapping[] = {
4744 [PORT_B] = PORT_IDPB,
4745 [PORT_C] = PORT_IDPC,
4746 [PORT_D] = PORT_IDPD,
4747 };
36e83a18 4748
3b32a35b
VS
4749 if (port == PORT_A)
4750 return true;
4751
41aa3448 4752 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4753 return false;
4754
41aa3448
RV
4755 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4756 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4757
5d8a7752 4758 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4759 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4760 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4761 return true;
4762 }
4763 return false;
4764}
4765
0e32b39c 4766void
f684960e
CW
4767intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4768{
53b41837
YN
4769 struct intel_connector *intel_connector = to_intel_connector(connector);
4770
3f43c48d 4771 intel_attach_force_audio_property(connector);
e953fd7b 4772 intel_attach_broadcast_rgb_property(connector);
55bc60db 4773 intel_dp->color_range_auto = true;
53b41837
YN
4774
4775 if (is_edp(intel_dp)) {
4776 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4777 drm_object_attach_property(
4778 &connector->base,
53b41837 4779 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4780 DRM_MODE_SCALE_ASPECT);
4781 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4782 }
f684960e
CW
4783}
4784
dada1a9f
ID
4785static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4786{
4787 intel_dp->last_power_cycle = jiffies;
4788 intel_dp->last_power_on = jiffies;
4789 intel_dp->last_backlight_off = jiffies;
4790}
4791
67a54566
DV
4792static void
4793intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4794 struct intel_dp *intel_dp)
67a54566
DV
4795{
4796 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4797 struct edp_power_seq cur, vbt, spec,
4798 *final = &intel_dp->pps_delays;
67a54566 4799 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4800 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4801
e39b999a
VS
4802 lockdep_assert_held(&dev_priv->pps_mutex);
4803
81ddbc69
VS
4804 /* already initialized? */
4805 if (final->t11_t12 != 0)
4806 return;
4807
453c5420 4808 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4809 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4810 pp_on_reg = PCH_PP_ON_DELAYS;
4811 pp_off_reg = PCH_PP_OFF_DELAYS;
4812 pp_div_reg = PCH_PP_DIVISOR;
4813 } else {
bf13e81b
JN
4814 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4815
4816 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4817 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4818 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4819 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4820 }
67a54566
DV
4821
4822 /* Workaround: Need to write PP_CONTROL with the unlock key as
4823 * the very first thing. */
453c5420 4824 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4825 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4826
453c5420
JB
4827 pp_on = I915_READ(pp_on_reg);
4828 pp_off = I915_READ(pp_off_reg);
4829 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4830
4831 /* Pull timing values out of registers */
4832 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4833 PANEL_POWER_UP_DELAY_SHIFT;
4834
4835 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4836 PANEL_LIGHT_ON_DELAY_SHIFT;
4837
4838 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4839 PANEL_LIGHT_OFF_DELAY_SHIFT;
4840
4841 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4842 PANEL_POWER_DOWN_DELAY_SHIFT;
4843
4844 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4845 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4846
4847 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4848 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4849
41aa3448 4850 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4851
4852 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4853 * our hw here, which are all in 100usec. */
4854 spec.t1_t3 = 210 * 10;
4855 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4856 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4857 spec.t10 = 500 * 10;
4858 /* This one is special and actually in units of 100ms, but zero
4859 * based in the hw (so we need to add 100 ms). But the sw vbt
4860 * table multiplies it with 1000 to make it in units of 100usec,
4861 * too. */
4862 spec.t11_t12 = (510 + 100) * 10;
4863
4864 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4865 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4866
4867 /* Use the max of the register settings and vbt. If both are
4868 * unset, fall back to the spec limits. */
36b5f425 4869#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4870 spec.field : \
4871 max(cur.field, vbt.field))
4872 assign_final(t1_t3);
4873 assign_final(t8);
4874 assign_final(t9);
4875 assign_final(t10);
4876 assign_final(t11_t12);
4877#undef assign_final
4878
36b5f425 4879#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4880 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4881 intel_dp->backlight_on_delay = get_delay(t8);
4882 intel_dp->backlight_off_delay = get_delay(t9);
4883 intel_dp->panel_power_down_delay = get_delay(t10);
4884 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4885#undef get_delay
4886
f30d26e4
JN
4887 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4888 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4889 intel_dp->panel_power_cycle_delay);
4890
4891 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4892 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4893}
4894
4895static void
4896intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4897 struct intel_dp *intel_dp)
f30d26e4
JN
4898{
4899 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4900 u32 pp_on, pp_off, pp_div, port_sel = 0;
4901 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4902 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4903 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4904 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4905
e39b999a 4906 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4907
4908 if (HAS_PCH_SPLIT(dev)) {
4909 pp_on_reg = PCH_PP_ON_DELAYS;
4910 pp_off_reg = PCH_PP_OFF_DELAYS;
4911 pp_div_reg = PCH_PP_DIVISOR;
4912 } else {
bf13e81b
JN
4913 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4914
4915 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4916 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4917 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4918 }
4919
b2f19d1a
PZ
4920 /*
4921 * And finally store the new values in the power sequencer. The
4922 * backlight delays are set to 1 because we do manual waits on them. For
4923 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4924 * we'll end up waiting for the backlight off delay twice: once when we
4925 * do the manual sleep, and once when we disable the panel and wait for
4926 * the PP_STATUS bit to become zero.
4927 */
f30d26e4 4928 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4929 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4930 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4931 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4932 /* Compute the divisor for the pp clock, simply match the Bspec
4933 * formula. */
453c5420 4934 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4935 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4936 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4937
4938 /* Haswell doesn't have any port selection bits for the panel
4939 * power sequencer any more. */
bc7d38a4 4940 if (IS_VALLEYVIEW(dev)) {
ad933b56 4941 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4942 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4943 if (port == PORT_A)
a24c144c 4944 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4945 else
a24c144c 4946 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4947 }
4948
453c5420
JB
4949 pp_on |= port_sel;
4950
4951 I915_WRITE(pp_on_reg, pp_on);
4952 I915_WRITE(pp_off_reg, pp_off);
4953 I915_WRITE(pp_div_reg, pp_div);
67a54566 4954
67a54566 4955 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4956 I915_READ(pp_on_reg),
4957 I915_READ(pp_off_reg),
4958 I915_READ(pp_div_reg));
f684960e
CW
4959}
4960
b33a2815
VK
4961/**
4962 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4963 * @dev: DRM device
4964 * @refresh_rate: RR to be programmed
4965 *
4966 * This function gets called when refresh rate (RR) has to be changed from
4967 * one frequency to another. Switches can be between high and low RR
4968 * supported by the panel or to any other RR based on media playback (in
4969 * this case, RR value needs to be passed from user space).
4970 *
4971 * The caller of this function needs to take a lock on dev_priv->drrs.
4972 */
96178eeb 4973static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4974{
4975 struct drm_i915_private *dev_priv = dev->dev_private;
4976 struct intel_encoder *encoder;
96178eeb
VK
4977 struct intel_digital_port *dig_port = NULL;
4978 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4979 struct intel_crtc_state *config = NULL;
439d7ac0 4980 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4981 u32 reg, val;
96178eeb 4982 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4983
4984 if (refresh_rate <= 0) {
4985 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4986 return;
4987 }
4988
96178eeb
VK
4989 if (intel_dp == NULL) {
4990 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4991 return;
4992 }
4993
1fcc9d1c 4994 /*
e4d59f6b
RV
4995 * FIXME: This needs proper synchronization with psr state for some
4996 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4997 */
439d7ac0 4998
96178eeb
VK
4999 dig_port = dp_to_dig_port(intel_dp);
5000 encoder = &dig_port->base;
723f9aab 5001 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5002
5003 if (!intel_crtc) {
5004 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5005 return;
5006 }
5007
6e3c9717 5008 config = intel_crtc->config;
439d7ac0 5009
96178eeb 5010 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5011 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5012 return;
5013 }
5014
96178eeb
VK
5015 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5016 refresh_rate)
439d7ac0
PB
5017 index = DRRS_LOW_RR;
5018
96178eeb 5019 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5020 DRM_DEBUG_KMS(
5021 "DRRS requested for previously set RR...ignoring\n");
5022 return;
5023 }
5024
5025 if (!intel_crtc->active) {
5026 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5027 return;
5028 }
5029
44395bfe 5030 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5031 switch (index) {
5032 case DRRS_HIGH_RR:
5033 intel_dp_set_m_n(intel_crtc, M1_N1);
5034 break;
5035 case DRRS_LOW_RR:
5036 intel_dp_set_m_n(intel_crtc, M2_N2);
5037 break;
5038 case DRRS_MAX_RR:
5039 default:
5040 DRM_ERROR("Unsupported refreshrate type\n");
5041 }
5042 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5043 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5044 val = I915_READ(reg);
a4c30b1d 5045
439d7ac0 5046 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5047 if (IS_VALLEYVIEW(dev))
5048 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5049 else
5050 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5051 } else {
6fa7aec1
VK
5052 if (IS_VALLEYVIEW(dev))
5053 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5054 else
5055 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5056 }
5057 I915_WRITE(reg, val);
5058 }
5059
4e9ac947
VK
5060 dev_priv->drrs.refresh_rate_type = index;
5061
5062 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5063}
5064
b33a2815
VK
5065/**
5066 * intel_edp_drrs_enable - init drrs struct if supported
5067 * @intel_dp: DP struct
5068 *
5069 * Initializes frontbuffer_bits and drrs.dp
5070 */
c395578e
VK
5071void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5072{
5073 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5074 struct drm_i915_private *dev_priv = dev->dev_private;
5075 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5076 struct drm_crtc *crtc = dig_port->base.base.crtc;
5077 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5078
5079 if (!intel_crtc->config->has_drrs) {
5080 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5081 return;
5082 }
5083
5084 mutex_lock(&dev_priv->drrs.mutex);
5085 if (WARN_ON(dev_priv->drrs.dp)) {
5086 DRM_ERROR("DRRS already enabled\n");
5087 goto unlock;
5088 }
5089
5090 dev_priv->drrs.busy_frontbuffer_bits = 0;
5091
5092 dev_priv->drrs.dp = intel_dp;
5093
5094unlock:
5095 mutex_unlock(&dev_priv->drrs.mutex);
5096}
5097
b33a2815
VK
5098/**
5099 * intel_edp_drrs_disable - Disable DRRS
5100 * @intel_dp: DP struct
5101 *
5102 */
c395578e
VK
5103void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5104{
5105 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5106 struct drm_i915_private *dev_priv = dev->dev_private;
5107 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5108 struct drm_crtc *crtc = dig_port->base.base.crtc;
5109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5110
5111 if (!intel_crtc->config->has_drrs)
5112 return;
5113
5114 mutex_lock(&dev_priv->drrs.mutex);
5115 if (!dev_priv->drrs.dp) {
5116 mutex_unlock(&dev_priv->drrs.mutex);
5117 return;
5118 }
5119
5120 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5121 intel_dp_set_drrs_state(dev_priv->dev,
5122 intel_dp->attached_connector->panel.
5123 fixed_mode->vrefresh);
5124
5125 dev_priv->drrs.dp = NULL;
5126 mutex_unlock(&dev_priv->drrs.mutex);
5127
5128 cancel_delayed_work_sync(&dev_priv->drrs.work);
5129}
5130
4e9ac947
VK
5131static void intel_edp_drrs_downclock_work(struct work_struct *work)
5132{
5133 struct drm_i915_private *dev_priv =
5134 container_of(work, typeof(*dev_priv), drrs.work.work);
5135 struct intel_dp *intel_dp;
5136
5137 mutex_lock(&dev_priv->drrs.mutex);
5138
5139 intel_dp = dev_priv->drrs.dp;
5140
5141 if (!intel_dp)
5142 goto unlock;
5143
439d7ac0 5144 /*
4e9ac947
VK
5145 * The delayed work can race with an invalidate hence we need to
5146 * recheck.
439d7ac0
PB
5147 */
5148
4e9ac947
VK
5149 if (dev_priv->drrs.busy_frontbuffer_bits)
5150 goto unlock;
439d7ac0 5151
4e9ac947
VK
5152 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5153 intel_dp_set_drrs_state(dev_priv->dev,
5154 intel_dp->attached_connector->panel.
5155 downclock_mode->vrefresh);
439d7ac0 5156
4e9ac947 5157unlock:
439d7ac0 5158
4e9ac947 5159 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5160}
5161
b33a2815
VK
5162/**
5163 * intel_edp_drrs_invalidate - Invalidate DRRS
5164 * @dev: DRM device
5165 * @frontbuffer_bits: frontbuffer plane tracking bits
5166 *
5167 * When there is a disturbance on screen (due to cursor movement/time
5168 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5169 * high RR.
5170 *
5171 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5172 */
a93fad0f
VK
5173void intel_edp_drrs_invalidate(struct drm_device *dev,
5174 unsigned frontbuffer_bits)
5175{
5176 struct drm_i915_private *dev_priv = dev->dev_private;
5177 struct drm_crtc *crtc;
5178 enum pipe pipe;
5179
5180 if (!dev_priv->drrs.dp)
5181 return;
5182
3954e733
R
5183 cancel_delayed_work_sync(&dev_priv->drrs.work);
5184
a93fad0f
VK
5185 mutex_lock(&dev_priv->drrs.mutex);
5186 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5187 pipe = to_intel_crtc(crtc)->pipe;
5188
5189 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5190 intel_dp_set_drrs_state(dev_priv->dev,
5191 dev_priv->drrs.dp->attached_connector->panel.
5192 fixed_mode->vrefresh);
5193 }
5194
5195 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5196
5197 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5198 mutex_unlock(&dev_priv->drrs.mutex);
5199}
5200
b33a2815
VK
5201/**
5202 * intel_edp_drrs_flush - Flush DRRS
5203 * @dev: DRM device
5204 * @frontbuffer_bits: frontbuffer plane tracking bits
5205 *
5206 * When there is no movement on screen, DRRS work can be scheduled.
5207 * This DRRS work is responsible for setting relevant registers after a
5208 * timeout of 1 second.
5209 *
5210 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5211 */
a93fad0f
VK
5212void intel_edp_drrs_flush(struct drm_device *dev,
5213 unsigned frontbuffer_bits)
5214{
5215 struct drm_i915_private *dev_priv = dev->dev_private;
5216 struct drm_crtc *crtc;
5217 enum pipe pipe;
5218
5219 if (!dev_priv->drrs.dp)
5220 return;
5221
3954e733
R
5222 cancel_delayed_work_sync(&dev_priv->drrs.work);
5223
a93fad0f
VK
5224 mutex_lock(&dev_priv->drrs.mutex);
5225 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5226 pipe = to_intel_crtc(crtc)->pipe;
5227 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5228
a93fad0f
VK
5229 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5230 !dev_priv->drrs.busy_frontbuffer_bits)
5231 schedule_delayed_work(&dev_priv->drrs.work,
5232 msecs_to_jiffies(1000));
5233 mutex_unlock(&dev_priv->drrs.mutex);
5234}
5235
b33a2815
VK
5236/**
5237 * DOC: Display Refresh Rate Switching (DRRS)
5238 *
5239 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5240 * which enables swtching between low and high refresh rates,
5241 * dynamically, based on the usage scenario. This feature is applicable
5242 * for internal panels.
5243 *
5244 * Indication that the panel supports DRRS is given by the panel EDID, which
5245 * would list multiple refresh rates for one resolution.
5246 *
5247 * DRRS is of 2 types - static and seamless.
5248 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5249 * (may appear as a blink on screen) and is used in dock-undock scenario.
5250 * Seamless DRRS involves changing RR without any visual effect to the user
5251 * and can be used during normal system usage. This is done by programming
5252 * certain registers.
5253 *
5254 * Support for static/seamless DRRS may be indicated in the VBT based on
5255 * inputs from the panel spec.
5256 *
5257 * DRRS saves power by switching to low RR based on usage scenarios.
5258 *
5259 * eDP DRRS:-
5260 * The implementation is based on frontbuffer tracking implementation.
5261 * When there is a disturbance on the screen triggered by user activity or a
5262 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5263 * When there is no movement on screen, after a timeout of 1 second, a switch
5264 * to low RR is made.
5265 * For integration with frontbuffer tracking code,
5266 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5267 *
5268 * DRRS can be further extended to support other internal panels and also
5269 * the scenario of video playback wherein RR is set based on the rate
5270 * requested by userspace.
5271 */
5272
5273/**
5274 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5275 * @intel_connector: eDP connector
5276 * @fixed_mode: preferred mode of panel
5277 *
5278 * This function is called only once at driver load to initialize basic
5279 * DRRS stuff.
5280 *
5281 * Returns:
5282 * Downclock mode if panel supports it, else return NULL.
5283 * DRRS support is determined by the presence of downclock mode (apart
5284 * from VBT setting).
5285 */
4f9db5b5 5286static struct drm_display_mode *
96178eeb
VK
5287intel_dp_drrs_init(struct intel_connector *intel_connector,
5288 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5289{
5290 struct drm_connector *connector = &intel_connector->base;
96178eeb 5291 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5292 struct drm_i915_private *dev_priv = dev->dev_private;
5293 struct drm_display_mode *downclock_mode = NULL;
5294
5295 if (INTEL_INFO(dev)->gen <= 6) {
5296 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5297 return NULL;
5298 }
5299
5300 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5301 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5302 return NULL;
5303 }
5304
5305 downclock_mode = intel_find_panel_downclock
5306 (dev, fixed_mode, connector);
5307
5308 if (!downclock_mode) {
a1d26342 5309 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5310 return NULL;
5311 }
5312
4e9ac947
VK
5313 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5314
96178eeb 5315 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5316
96178eeb 5317 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5318
96178eeb 5319 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5320 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5321 return downclock_mode;
5322}
5323
ed92f0b2 5324static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5325 struct intel_connector *intel_connector)
ed92f0b2
PZ
5326{
5327 struct drm_connector *connector = &intel_connector->base;
5328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5329 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5330 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5331 struct drm_i915_private *dev_priv = dev->dev_private;
5332 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5333 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5334 bool has_dpcd;
5335 struct drm_display_mode *scan;
5336 struct edid *edid;
6517d273 5337 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5338
5339 if (!is_edp(intel_dp))
5340 return true;
5341
49e6bc51
VS
5342 pps_lock(intel_dp);
5343 intel_edp_panel_vdd_sanitize(intel_dp);
5344 pps_unlock(intel_dp);
63635217 5345
ed92f0b2 5346 /* Cache DPCD and EDID for edp. */
ed92f0b2 5347 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5348
5349 if (has_dpcd) {
5350 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5351 dev_priv->no_aux_handshake =
5352 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5353 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5354 } else {
5355 /* if this fails, presume the device is a ghost */
5356 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5357 return false;
5358 }
5359
5360 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5361 pps_lock(intel_dp);
36b5f425 5362 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5363 pps_unlock(intel_dp);
ed92f0b2 5364
060c8778 5365 mutex_lock(&dev->mode_config.mutex);
0b99836f 5366 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5367 if (edid) {
5368 if (drm_add_edid_modes(connector, edid)) {
5369 drm_mode_connector_update_edid_property(connector,
5370 edid);
5371 drm_edid_to_eld(connector, edid);
5372 } else {
5373 kfree(edid);
5374 edid = ERR_PTR(-EINVAL);
5375 }
5376 } else {
5377 edid = ERR_PTR(-ENOENT);
5378 }
5379 intel_connector->edid = edid;
5380
5381 /* prefer fixed mode from EDID if available */
5382 list_for_each_entry(scan, &connector->probed_modes, head) {
5383 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5384 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5385 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5386 intel_connector, fixed_mode);
ed92f0b2
PZ
5387 break;
5388 }
5389 }
5390
5391 /* fallback to VBT if available for eDP */
5392 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5393 fixed_mode = drm_mode_duplicate(dev,
5394 dev_priv->vbt.lfp_lvds_vbt_mode);
5395 if (fixed_mode)
5396 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5397 }
060c8778 5398 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5399
01527b31
CT
5400 if (IS_VALLEYVIEW(dev)) {
5401 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5402 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5403
5404 /*
5405 * Figure out the current pipe for the initial backlight setup.
5406 * If the current pipe isn't valid, try the PPS pipe, and if that
5407 * fails just assume pipe A.
5408 */
5409 if (IS_CHERRYVIEW(dev))
5410 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5411 else
5412 pipe = PORT_TO_PIPE(intel_dp->DP);
5413
5414 if (pipe != PIPE_A && pipe != PIPE_B)
5415 pipe = intel_dp->pps_pipe;
5416
5417 if (pipe != PIPE_A && pipe != PIPE_B)
5418 pipe = PIPE_A;
5419
5420 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5421 pipe_name(pipe));
01527b31
CT
5422 }
5423
4f9db5b5 5424 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5425 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5426 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5427
5428 return true;
5429}
5430
16c25533 5431bool
f0fec3f2
PZ
5432intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5433 struct intel_connector *intel_connector)
a4fc5ed6 5434{
f0fec3f2
PZ
5435 struct drm_connector *connector = &intel_connector->base;
5436 struct intel_dp *intel_dp = &intel_dig_port->dp;
5437 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5438 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5439 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5440 enum port port = intel_dig_port->port;
0b99836f 5441 int type;
a4fc5ed6 5442
a4a5d2f8
VS
5443 intel_dp->pps_pipe = INVALID_PIPE;
5444
ec5b01dd 5445 /* intel_dp vfuncs */
b6b5e383
DL
5446 if (INTEL_INFO(dev)->gen >= 9)
5447 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5448 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5449 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5450 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5451 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5452 else if (HAS_PCH_SPLIT(dev))
5453 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5454 else
5455 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5456
b9ca5fad
DL
5457 if (INTEL_INFO(dev)->gen >= 9)
5458 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5459 else
5460 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5461
0767935e
DV
5462 /* Preserve the current hw state. */
5463 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5464 intel_dp->attached_connector = intel_connector;
3d3dc149 5465
3b32a35b 5466 if (intel_dp_is_edp(dev, port))
b329530c 5467 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5468 else
5469 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5470
f7d24902
ID
5471 /*
5472 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5473 * for DP the encoder type can be set by the caller to
5474 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5475 */
5476 if (type == DRM_MODE_CONNECTOR_eDP)
5477 intel_encoder->type = INTEL_OUTPUT_EDP;
5478
c17ed5b5
VS
5479 /* eDP only on port B and/or C on vlv/chv */
5480 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5481 port != PORT_B && port != PORT_C))
5482 return false;
5483
e7281eab
ID
5484 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5485 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5486 port_name(port));
5487
b329530c 5488 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5489 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5490
a4fc5ed6
KP
5491 connector->interlace_allowed = true;
5492 connector->doublescan_allowed = 0;
5493
f0fec3f2 5494 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5495 edp_panel_vdd_work);
a4fc5ed6 5496
df0e9248 5497 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5498 drm_connector_register(connector);
a4fc5ed6 5499
affa9354 5500 if (HAS_DDI(dev))
bcbc889b
PZ
5501 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5502 else
5503 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5504 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5505
0b99836f 5506 /* Set up the hotplug pin. */
ab9d7c30
PZ
5507 switch (port) {
5508 case PORT_A:
1d843f9d 5509 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5510 break;
5511 case PORT_B:
1d843f9d 5512 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5513 break;
5514 case PORT_C:
1d843f9d 5515 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5516 break;
5517 case PORT_D:
1d843f9d 5518 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5519 break;
5520 default:
ad1c0b19 5521 BUG();
5eb08b69
ZW
5522 }
5523
dada1a9f 5524 if (is_edp(intel_dp)) {
773538e8 5525 pps_lock(intel_dp);
1e74a324
VS
5526 intel_dp_init_panel_power_timestamps(intel_dp);
5527 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5528 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5529 else
36b5f425 5530 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5531 pps_unlock(intel_dp);
dada1a9f 5532 }
0095e6dc 5533
9d1a1031 5534 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5535
0e32b39c 5536 /* init MST on ports that can support it */
c86ea3d0 5537 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5538 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5539 intel_dp_mst_encoder_init(intel_dig_port,
5540 intel_connector->base.base.id);
0e32b39c
DA
5541 }
5542 }
5543
36b5f425 5544 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5545 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5546 if (is_edp(intel_dp)) {
5547 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5548 /*
5549 * vdd might still be enabled do to the delayed vdd off.
5550 * Make sure vdd is actually turned off here.
5551 */
773538e8 5552 pps_lock(intel_dp);
4be73780 5553 edp_panel_vdd_off_sync(intel_dp);
773538e8 5554 pps_unlock(intel_dp);
15b1d171 5555 }
34ea3d38 5556 drm_connector_unregister(connector);
b2f246a8 5557 drm_connector_cleanup(connector);
16c25533 5558 return false;
b2f246a8 5559 }
32f9d658 5560
f684960e
CW
5561 intel_dp_add_properties(intel_dp, connector);
5562
a4fc5ed6
KP
5563 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5564 * 0xd. Failure to do so will result in spurious interrupts being
5565 * generated on the port when a cable is not attached.
5566 */
5567 if (IS_G4X(dev) && !IS_GM45(dev)) {
5568 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5569 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5570 }
16c25533
PZ
5571
5572 return true;
a4fc5ed6 5573}
f0fec3f2
PZ
5574
5575void
5576intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5577{
13cf5504 5578 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5579 struct intel_digital_port *intel_dig_port;
5580 struct intel_encoder *intel_encoder;
5581 struct drm_encoder *encoder;
5582 struct intel_connector *intel_connector;
5583
b14c5679 5584 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5585 if (!intel_dig_port)
5586 return;
5587
b14c5679 5588 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5589 if (!intel_connector) {
5590 kfree(intel_dig_port);
5591 return;
5592 }
5593
5594 intel_encoder = &intel_dig_port->base;
5595 encoder = &intel_encoder->base;
5596
5597 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5598 DRM_MODE_ENCODER_TMDS);
5599
5bfe2ac0 5600 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5601 intel_encoder->disable = intel_disable_dp;
00c09d70 5602 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5603 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5604 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5605 if (IS_CHERRYVIEW(dev)) {
9197c88b 5606 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5607 intel_encoder->pre_enable = chv_pre_enable_dp;
5608 intel_encoder->enable = vlv_enable_dp;
580d3811 5609 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5610 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5611 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5612 intel_encoder->pre_enable = vlv_pre_enable_dp;
5613 intel_encoder->enable = vlv_enable_dp;
49277c31 5614 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5615 } else {
ecff4f3b
JN
5616 intel_encoder->pre_enable = g4x_pre_enable_dp;
5617 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5618 if (INTEL_INFO(dev)->gen >= 5)
5619 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5620 }
f0fec3f2 5621
174edf1f 5622 intel_dig_port->port = port;
f0fec3f2
PZ
5623 intel_dig_port->dp.output_reg = output_reg;
5624
00c09d70 5625 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5626 if (IS_CHERRYVIEW(dev)) {
5627 if (port == PORT_D)
5628 intel_encoder->crtc_mask = 1 << 2;
5629 else
5630 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5631 } else {
5632 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5633 }
bc079e8b 5634 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5635 intel_encoder->hot_plug = intel_dp_hot_plug;
5636
13cf5504
DA
5637 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5638 dev_priv->hpd_irq_port[port] = intel_dig_port;
5639
15b1d171
PZ
5640 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5641 drm_encoder_cleanup(encoder);
5642 kfree(intel_dig_port);
b2f246a8 5643 kfree(intel_connector);
15b1d171 5644 }
f0fec3f2 5645}
0e32b39c
DA
5646
5647void intel_dp_mst_suspend(struct drm_device *dev)
5648{
5649 struct drm_i915_private *dev_priv = dev->dev_private;
5650 int i;
5651
5652 /* disable MST */
5653 for (i = 0; i < I915_MAX_PORTS; i++) {
5654 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5655 if (!intel_dig_port)
5656 continue;
5657
5658 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5659 if (!intel_dig_port->dp.can_mst)
5660 continue;
5661 if (intel_dig_port->dp.is_mst)
5662 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5663 }
5664 }
5665}
5666
5667void intel_dp_mst_resume(struct drm_device *dev)
5668{
5669 struct drm_i915_private *dev_priv = dev->dev_private;
5670 int i;
5671
5672 for (i = 0; i < I915_MAX_PORTS; i++) {
5673 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5674 if (!intel_dig_port)
5675 continue;
5676 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5677 int ret;
5678
5679 if (!intel_dig_port->dp.can_mst)
5680 continue;
5681
5682 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5683 if (ret != 0) {
5684 intel_dp_check_mst_status(&intel_dig_port->dp);
5685 }
5686 }
5687 }
5688}