]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: copy staged scaler state from drm state to crtc->config.
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
fe51bfb9
VS
90static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
f4896f15 93static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 94
cfcb0fc9
JB
95/**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102static bool is_edp(struct intel_dp *intel_dp)
103{
da63a9f2
PZ
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
107}
108
68b4d824 109static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 110{
68b4d824
ID
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
114}
115
df0e9248
CW
116static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117{
fa90ecef 118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
119}
120
ea5b213a 121static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 122static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 123static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 124static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
125static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
a4fc5ed6 127
ed4e9c1d
VS
128static int
129intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 130{
7183dc29 131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
1db10e28 136 case DP_LINK_BW_5_4:
d4eead50 137 break;
a4fc5ed6 138 default:
d4eead50
ID
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
a4fc5ed6
KP
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145}
146
eeb6324d
PZ
147static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148{
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161}
162
cd9dde44
AJ
163/*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
a4fc5ed6 180static int
c898261c 181intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 182{
cd9dde44 183 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
184}
185
fe27d53e
DA
186static int
187intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188{
189 return (max_link_clock * max_lanes * 8) / 10;
190}
191
c19de8eb 192static enum drm_mode_status
a4fc5ed6
KP
193intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195{
df0e9248 196 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 201
dd06f90e
JN
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
204 return MODE_PANEL;
205
dd06f90e 206 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 207 return MODE_PANEL;
03afc4a2
DV
208
209 target_clock = fixed_mode->clock;
7de56f43
ZY
210 }
211
50fec21a 212 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 213 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
c4867936 219 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
0af78a2b
DV
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
a4fc5ed6
KP
227 return MODE_OK;
228}
229
a4f1289e 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
231{
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240}
241
c2af70e2 242static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
243{
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249}
250
fb0f8fbf
KP
251/* hrawclock is 1/4 the FSB frequency */
252static int
253intel_hrawclk(struct drm_device *dev)
254{
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
9473c8f4
VP
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
fb0f8fbf
KP
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283}
284
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b
JN
288static void
289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 290 struct intel_dp *intel_dp);
bf13e81b 291
773538e8
VS
292static void pps_lock(struct intel_dp *intel_dp)
293{
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308}
309
310static void pps_unlock(struct intel_dp *intel_dp)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322}
323
961a0db0
VS
324static void
325vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326{
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 331 bool pll_enabled;
961a0db0
VS
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
d288f65f
VS
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
961a0db0
VS
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
961a0db0
VS
382}
383
bf13e81b
JN
384static enum pipe
385vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386{
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 392 enum pipe pipe;
bf13e81b 393
e39b999a 394 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 395
a8c3344e
VS
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
a4a5d2f8
VS
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
a8c3344e
VS
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
a4a5d2f8 427
a8c3344e
VS
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
36b5f425
VS
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 438
961a0db0
VS
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
444
445 return intel_dp->pps_pipe;
446}
447
6491ab27
VS
448typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453{
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455}
456
457static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461}
462
463static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return true;
467}
bf13e81b 468
a4a5d2f8 469static enum pipe
6491ab27
VS
470vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
a4a5d2f8
VS
473{
474 enum pipe pipe;
bf13e81b 475
bf13e81b
JN
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
6491ab27
VS
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
a4a5d2f8 486 return pipe;
bf13e81b
JN
487 }
488
a4a5d2f8
VS
489 return INVALID_PIPE;
490}
491
492static void
493vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494{
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
6491ab27
VS
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
a4a5d2f8
VS
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
bf13e81b
JN
520 }
521
a4a5d2f8
VS
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
36b5f425
VS
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
527}
528
773538e8
VS
529void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530{
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
bf13e81b
JN
556}
557
558static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566}
567
568static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569{
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576}
577
01527b31
CT
578/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582{
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
773538e8 593 pps_lock(intel_dp);
e39b999a 594
01527b31 595 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
01527b31
CT
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
773538e8 609 pps_unlock(intel_dp);
e39b999a 610
01527b31
CT
611 return 0;
612}
613
4be73780 614static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 615{
30add22d 616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
e39b999a
VS
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
9a42356b
VS
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
bf13e81b 625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
626}
627
4be73780 628static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 629{
30add22d 630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
e39b999a
VS
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
9a42356b
VS
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
773538e8 639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
640}
641
9b984dae
KP
642static void
643intel_dp_check_edp(struct intel_dp *intel_dp)
644{
30add22d 645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 646 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 647
9b984dae
KP
648 if (!is_edp(intel_dp))
649 return;
453c5420 650
4be73780 651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
656 }
657}
658
9ee32fea
DV
659static uint32_t
660intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661{
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
666 uint32_t status;
667 bool done;
668
ef04f00d 669#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 670 if (has_aux_irq)
b18ac466 671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 672 msecs_to_jiffies_timeout(10));
9ee32fea
DV
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678#undef C
679
680 return status;
681}
682
ec5b01dd 683static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 684{
174edf1f
PZ
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 687
ec5b01dd
DL
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 691 */
ec5b01dd
DL
692 return index ? 0 : intel_hrawclk(dev) / 2;
693}
694
695static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 699 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
700
701 if (index)
702 return 0;
703
704 if (intel_dig_port->port == PORT_A) {
469d4b2a 705 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
ec5b01dd
DL
706 } else {
707 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
708 }
709}
710
711static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
712{
713 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
714 struct drm_device *dev = intel_dig_port->base.base.dev;
715 struct drm_i915_private *dev_priv = dev->dev_private;
716
717 if (intel_dig_port->port == PORT_A) {
718 if (index)
719 return 0;
1652d19e 720 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
2c55c336
JN
721 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
722 /* Workaround for non-ULT HSW */
bc86625a
CW
723 switch (index) {
724 case 0: return 63;
725 case 1: return 72;
726 default: return 0;
727 }
ec5b01dd 728 } else {
bc86625a 729 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 730 }
b84a1cf8
RV
731}
732
ec5b01dd
DL
733static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
734{
735 return index ? 0 : 100;
736}
737
b6b5e383
DL
738static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
739{
740 /*
741 * SKL doesn't need us to program the AUX clock divider (Hardware will
742 * derive the clock from CDCLK automatically). We still implement the
743 * get_aux_clock_divider vfunc to plug-in into the existing code.
744 */
745 return index ? 0 : 1;
746}
747
5ed12a19
DL
748static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
749 bool has_aux_irq,
750 int send_bytes,
751 uint32_t aux_clock_divider)
752{
753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
754 struct drm_device *dev = intel_dig_port->base.base.dev;
755 uint32_t precharge, timeout;
756
757 if (IS_GEN6(dev))
758 precharge = 3;
759 else
760 precharge = 5;
761
762 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
763 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
764 else
765 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
766
767 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 768 DP_AUX_CH_CTL_DONE |
5ed12a19 769 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 770 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 771 timeout |
788d4433 772 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
773 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
774 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 775 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
776}
777
b9ca5fad
DL
778static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
779 bool has_aux_irq,
780 int send_bytes,
781 uint32_t unused)
782{
783 return DP_AUX_CH_CTL_SEND_BUSY |
784 DP_AUX_CH_CTL_DONE |
785 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
786 DP_AUX_CH_CTL_TIME_OUT_ERROR |
787 DP_AUX_CH_CTL_TIME_OUT_1600us |
788 DP_AUX_CH_CTL_RECEIVE_ERROR |
789 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
790 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
791}
792
b84a1cf8
RV
793static int
794intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 795 const uint8_t *send, int send_bytes,
b84a1cf8
RV
796 uint8_t *recv, int recv_size)
797{
798 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
799 struct drm_device *dev = intel_dig_port->base.base.dev;
800 struct drm_i915_private *dev_priv = dev->dev_private;
801 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
802 uint32_t ch_data = ch_ctl + 4;
bc86625a 803 uint32_t aux_clock_divider;
b84a1cf8
RV
804 int i, ret, recv_bytes;
805 uint32_t status;
5ed12a19 806 int try, clock = 0;
4e6b788c 807 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
808 bool vdd;
809
773538e8 810 pps_lock(intel_dp);
e39b999a 811
72c3500a
VS
812 /*
813 * We will be called with VDD already enabled for dpcd/edid/oui reads.
814 * In such cases we want to leave VDD enabled and it's up to upper layers
815 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
816 * ourselves.
817 */
1e0560e0 818 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
819
820 /* dp aux is extremely sensitive to irq latency, hence request the
821 * lowest possible wakeup latency and so prevent the cpu from going into
822 * deep sleep states.
823 */
824 pm_qos_update_request(&dev_priv->pm_qos, 0);
825
826 intel_dp_check_edp(intel_dp);
5eb08b69 827
c67a470b
PZ
828 intel_aux_display_runtime_get(dev_priv);
829
11bee43e
JB
830 /* Try to wait for any previous AUX channel activity */
831 for (try = 0; try < 3; try++) {
ef04f00d 832 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
833 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
834 break;
835 msleep(1);
836 }
837
838 if (try == 3) {
839 WARN(1, "dp_aux_ch not started status 0x%08x\n",
840 I915_READ(ch_ctl));
9ee32fea
DV
841 ret = -EBUSY;
842 goto out;
4f7f7b7e
CW
843 }
844
46a5ae9f
PZ
845 /* Only 5 data registers! */
846 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
847 ret = -E2BIG;
848 goto out;
849 }
850
ec5b01dd 851 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
852 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
853 has_aux_irq,
854 send_bytes,
855 aux_clock_divider);
5ed12a19 856
bc86625a
CW
857 /* Must try at least 3 times according to DP spec */
858 for (try = 0; try < 5; try++) {
859 /* Load the send data into the aux channel data registers */
860 for (i = 0; i < send_bytes; i += 4)
861 I915_WRITE(ch_data + i,
a4f1289e
RV
862 intel_dp_pack_aux(send + i,
863 send_bytes - i));
bc86625a
CW
864
865 /* Send the command and wait for it to complete */
5ed12a19 866 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
867
868 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
869
870 /* Clear done status and any errors */
871 I915_WRITE(ch_ctl,
872 status |
873 DP_AUX_CH_CTL_DONE |
874 DP_AUX_CH_CTL_TIME_OUT_ERROR |
875 DP_AUX_CH_CTL_RECEIVE_ERROR);
876
877 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
878 DP_AUX_CH_CTL_RECEIVE_ERROR))
879 continue;
880 if (status & DP_AUX_CH_CTL_DONE)
881 break;
882 }
4f7f7b7e 883 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
884 break;
885 }
886
a4fc5ed6 887 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 888 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
889 ret = -EBUSY;
890 goto out;
a4fc5ed6
KP
891 }
892
893 /* Check for timeout or receive error.
894 * Timeouts occur when the sink is not connected
895 */
a5b3da54 896 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 897 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
898 ret = -EIO;
899 goto out;
a5b3da54 900 }
1ae8c0a5
KP
901
902 /* Timeouts occur when the device isn't connected, so they're
903 * "normal" -- don't fill the kernel log with these */
a5b3da54 904 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 905 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
906 ret = -ETIMEDOUT;
907 goto out;
a4fc5ed6
KP
908 }
909
910 /* Unload any bytes sent back from the other side */
911 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
912 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
913 if (recv_bytes > recv_size)
914 recv_bytes = recv_size;
0206e353 915
4f7f7b7e 916 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
917 intel_dp_unpack_aux(I915_READ(ch_data + i),
918 recv + i, recv_bytes - i);
a4fc5ed6 919
9ee32fea
DV
920 ret = recv_bytes;
921out:
922 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 923 intel_aux_display_runtime_put(dev_priv);
9ee32fea 924
884f19e9
JN
925 if (vdd)
926 edp_panel_vdd_off(intel_dp, false);
927
773538e8 928 pps_unlock(intel_dp);
e39b999a 929
9ee32fea 930 return ret;
a4fc5ed6
KP
931}
932
a6c8aff0
JN
933#define BARE_ADDRESS_SIZE 3
934#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
935static ssize_t
936intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 937{
9d1a1031
JN
938 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
939 uint8_t txbuf[20], rxbuf[20];
940 size_t txsize, rxsize;
a4fc5ed6 941 int ret;
a4fc5ed6 942
d2d9cbbd
VS
943 txbuf[0] = (msg->request << 4) |
944 ((msg->address >> 16) & 0xf);
945 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
946 txbuf[2] = msg->address & 0xff;
947 txbuf[3] = msg->size - 1;
46a5ae9f 948
9d1a1031
JN
949 switch (msg->request & ~DP_AUX_I2C_MOT) {
950 case DP_AUX_NATIVE_WRITE:
951 case DP_AUX_I2C_WRITE:
a6c8aff0 952 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 953 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 954
9d1a1031
JN
955 if (WARN_ON(txsize > 20))
956 return -E2BIG;
a4fc5ed6 957
9d1a1031 958 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 959
9d1a1031
JN
960 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
961 if (ret > 0) {
962 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 963
a1ddefd8
JN
964 if (ret > 1) {
965 /* Number of bytes written in a short write. */
966 ret = clamp_t(int, rxbuf[1], 0, msg->size);
967 } else {
968 /* Return payload size. */
969 ret = msg->size;
970 }
9d1a1031
JN
971 }
972 break;
46a5ae9f 973
9d1a1031
JN
974 case DP_AUX_NATIVE_READ:
975 case DP_AUX_I2C_READ:
a6c8aff0 976 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 977 rxsize = msg->size + 1;
a4fc5ed6 978
9d1a1031
JN
979 if (WARN_ON(rxsize > 20))
980 return -E2BIG;
a4fc5ed6 981
9d1a1031
JN
982 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
983 if (ret > 0) {
984 msg->reply = rxbuf[0] >> 4;
985 /*
986 * Assume happy day, and copy the data. The caller is
987 * expected to check msg->reply before touching it.
988 *
989 * Return payload size.
990 */
991 ret--;
992 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 993 }
9d1a1031
JN
994 break;
995
996 default:
997 ret = -EINVAL;
998 break;
a4fc5ed6 999 }
f51a44b9 1000
9d1a1031 1001 return ret;
a4fc5ed6
KP
1002}
1003
9d1a1031
JN
1004static void
1005intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1006{
1007 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1008 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1009 enum port port = intel_dig_port->port;
0b99836f 1010 const char *name = NULL;
ab2c0672
DA
1011 int ret;
1012
33ad6626
JN
1013 switch (port) {
1014 case PORT_A:
1015 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1016 name = "DPDDC-A";
ab2c0672 1017 break;
33ad6626
JN
1018 case PORT_B:
1019 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1020 name = "DPDDC-B";
ab2c0672 1021 break;
33ad6626
JN
1022 case PORT_C:
1023 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1024 name = "DPDDC-C";
ab2c0672 1025 break;
33ad6626
JN
1026 case PORT_D:
1027 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1028 name = "DPDDC-D";
33ad6626
JN
1029 break;
1030 default:
1031 BUG();
ab2c0672
DA
1032 }
1033
1b1aad75
DL
1034 /*
1035 * The AUX_CTL register is usually DP_CTL + 0x10.
1036 *
1037 * On Haswell and Broadwell though:
1038 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1039 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1040 *
1041 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1042 */
1043 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1044 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1045
0b99836f 1046 intel_dp->aux.name = name;
9d1a1031
JN
1047 intel_dp->aux.dev = dev->dev;
1048 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1049
0b99836f
JN
1050 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1051 connector->base.kdev->kobj.name);
8316f337 1052
4f71d0cb 1053 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1054 if (ret < 0) {
4f71d0cb 1055 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1056 name, ret);
1057 return;
ab2c0672 1058 }
8a5e6aeb 1059
0b99836f
JN
1060 ret = sysfs_create_link(&connector->base.kdev->kobj,
1061 &intel_dp->aux.ddc.dev.kobj,
1062 intel_dp->aux.ddc.dev.kobj.name);
1063 if (ret < 0) {
1064 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1065 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1066 }
a4fc5ed6
KP
1067}
1068
80f65de3
ID
1069static void
1070intel_dp_connector_unregister(struct intel_connector *intel_connector)
1071{
1072 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1073
0e32b39c
DA
1074 if (!intel_connector->mst_port)
1075 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1076 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1077 intel_connector_unregister(intel_connector);
1078}
1079
5416d871 1080static void
c3346ef6 1081skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1082{
1083 u32 ctrl1;
1084
1085 pipe_config->ddi_pll_sel = SKL_DPLL0;
1086 pipe_config->dpll_hw_state.cfgcr1 = 0;
1087 pipe_config->dpll_hw_state.cfgcr2 = 0;
1088
1089 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1090 switch (link_clock / 2) {
1091 case 81000:
5416d871
DL
1092 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1093 SKL_DPLL0);
1094 break;
c3346ef6 1095 case 135000:
5416d871
DL
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1097 SKL_DPLL0);
1098 break;
c3346ef6 1099 case 270000:
5416d871
DL
1100 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1101 SKL_DPLL0);
1102 break;
c3346ef6
SJ
1103 case 162000:
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1105 SKL_DPLL0);
1106 break;
1107 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1108 results in CDCLK change. Need to handle the change of CDCLK by
1109 disabling pipes and re-enabling them */
1110 case 108000:
1111 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1112 SKL_DPLL0);
1113 break;
1114 case 216000:
1115 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1116 SKL_DPLL0);
1117 break;
1118
5416d871
DL
1119 }
1120 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1121}
1122
0e50338c 1123static void
5cec258b 1124hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1125{
1126 switch (link_bw) {
1127 case DP_LINK_BW_1_62:
1128 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1129 break;
1130 case DP_LINK_BW_2_7:
1131 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1132 break;
1133 case DP_LINK_BW_5_4:
1134 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1135 break;
1136 }
1137}
1138
fc0f8e25 1139static int
12f6a2e2 1140intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1141{
94ca719e
VS
1142 if (intel_dp->num_sink_rates) {
1143 *sink_rates = intel_dp->sink_rates;
1144 return intel_dp->num_sink_rates;
fc0f8e25 1145 }
12f6a2e2
VS
1146
1147 *sink_rates = default_rates;
1148
1149 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1150}
1151
a8f3ef61 1152static int
1db10e28 1153intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1154{
636280ba
VS
1155 if (INTEL_INFO(dev)->gen >= 9) {
1156 *source_rates = gen9_rates;
1157 return ARRAY_SIZE(gen9_rates);
fe51bfb9
VS
1158 } else if (IS_CHERRYVIEW(dev)) {
1159 *source_rates = chv_rates;
1160 return ARRAY_SIZE(chv_rates);
a8f3ef61 1161 }
636280ba
VS
1162
1163 *source_rates = default_rates;
1164
1db10e28
VS
1165 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1166 /* WaDisableHBR2:skl */
1167 return (DP_LINK_BW_2_7 >> 3) + 1;
1168 else if (INTEL_INFO(dev)->gen >= 8 ||
1169 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1170 return (DP_LINK_BW_5_4 >> 3) + 1;
1171 else
1172 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1173}
1174
c6bb3538
DV
1175static void
1176intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1177 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1178{
1179 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1180 const struct dp_link_dpll *divisor = NULL;
1181 int i, count = 0;
c6bb3538
DV
1182
1183 if (IS_G4X(dev)) {
9dd4ffdf
CML
1184 divisor = gen4_dpll;
1185 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1186 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1187 divisor = pch_dpll;
1188 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1189 } else if (IS_CHERRYVIEW(dev)) {
1190 divisor = chv_dpll;
1191 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1192 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1193 divisor = vlv_dpll;
1194 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1195 }
9dd4ffdf
CML
1196
1197 if (divisor && count) {
1198 for (i = 0; i < count; i++) {
1199 if (link_bw == divisor[i].link_bw) {
1200 pipe_config->dpll = divisor[i].dpll;
1201 pipe_config->clock_set = true;
1202 break;
1203 }
1204 }
c6bb3538
DV
1205 }
1206}
1207
2ecae76a
VS
1208static int intersect_rates(const int *source_rates, int source_len,
1209 const int *sink_rates, int sink_len,
94ca719e 1210 int *common_rates)
a8f3ef61
SJ
1211{
1212 int i = 0, j = 0, k = 0;
1213
a8f3ef61
SJ
1214 while (i < source_len && j < sink_len) {
1215 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1216 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1217 return k;
94ca719e 1218 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1219 ++k;
1220 ++i;
1221 ++j;
1222 } else if (source_rates[i] < sink_rates[j]) {
1223 ++i;
1224 } else {
1225 ++j;
1226 }
1227 }
1228 return k;
1229}
1230
94ca719e
VS
1231static int intel_dp_common_rates(struct intel_dp *intel_dp,
1232 int *common_rates)
2ecae76a
VS
1233{
1234 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1235 const int *source_rates, *sink_rates;
1236 int source_len, sink_len;
1237
1238 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1239 source_len = intel_dp_source_rates(dev, &source_rates);
1240
1241 return intersect_rates(source_rates, source_len,
1242 sink_rates, sink_len,
94ca719e 1243 common_rates);
2ecae76a
VS
1244}
1245
0336400e
VS
1246static void snprintf_int_array(char *str, size_t len,
1247 const int *array, int nelem)
1248{
1249 int i;
1250
1251 str[0] = '\0';
1252
1253 for (i = 0; i < nelem; i++) {
1254 int r = snprintf(str, len, "%d,", array[i]);
1255 if (r >= len)
1256 return;
1257 str += r;
1258 len -= r;
1259 }
1260}
1261
1262static void intel_dp_print_rates(struct intel_dp *intel_dp)
1263{
1264 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1265 const int *source_rates, *sink_rates;
94ca719e
VS
1266 int source_len, sink_len, common_len;
1267 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1268 char str[128]; /* FIXME: too big for stack? */
1269
1270 if ((drm_debug & DRM_UT_KMS) == 0)
1271 return;
1272
1273 source_len = intel_dp_source_rates(dev, &source_rates);
1274 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1275 DRM_DEBUG_KMS("source rates: %s\n", str);
1276
1277 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1278 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1279 DRM_DEBUG_KMS("sink rates: %s\n", str);
1280
94ca719e
VS
1281 common_len = intel_dp_common_rates(intel_dp, common_rates);
1282 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1283 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1284}
1285
f4896f15 1286static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1287{
1288 int i = 0;
1289
1290 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1291 if (find == rates[i])
1292 break;
1293
1294 return i;
1295}
1296
50fec21a
VS
1297int
1298intel_dp_max_link_rate(struct intel_dp *intel_dp)
1299{
1300 int rates[DP_MAX_SUPPORTED_RATES] = {};
1301 int len;
1302
94ca719e 1303 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1304 if (WARN_ON(len <= 0))
1305 return 162000;
1306
1307 return rates[rate_to_index(0, rates) - 1];
1308}
1309
ed4e9c1d
VS
1310int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1311{
94ca719e 1312 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1313}
1314
00c09d70 1315bool
5bfe2ac0 1316intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1317 struct intel_crtc_state *pipe_config)
a4fc5ed6 1318{
5bfe2ac0 1319 struct drm_device *dev = encoder->base.dev;
36008365 1320 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1321 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1322 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1323 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1324 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1325 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1326 int lane_count, clock;
56071a20 1327 int min_lane_count = 1;
eeb6324d 1328 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1329 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1330 int min_clock = 0;
a8f3ef61 1331 int max_clock;
083f9560 1332 int bpp, mode_rate;
ff9a6750 1333 int link_avail, link_clock;
94ca719e
VS
1334 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1335 int common_len;
a8f3ef61 1336
94ca719e 1337 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1338
1339 /* No common link rates between source and sink */
94ca719e 1340 WARN_ON(common_len <= 0);
a8f3ef61 1341
94ca719e 1342 max_clock = common_len - 1;
a4fc5ed6 1343
bc7d38a4 1344 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1345 pipe_config->has_pch_encoder = true;
1346
03afc4a2 1347 pipe_config->has_dp_encoder = true;
f769cd24 1348 pipe_config->has_drrs = false;
9ed109a7 1349 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1350
dd06f90e
JN
1351 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1352 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1353 adjusted_mode);
2dd24552
JB
1354 if (!HAS_PCH_SPLIT(dev))
1355 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1356 intel_connector->panel.fitting_mode);
1357 else
b074cec8
JB
1358 intel_pch_panel_fitting(intel_crtc, pipe_config,
1359 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1360 }
1361
cb1793ce 1362 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1363 return false;
1364
083f9560 1365 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1366 "max bw %d pixel clock %iKHz\n",
94ca719e 1367 max_lane_count, common_rates[max_clock],
241bfc38 1368 adjusted_mode->crtc_clock);
083f9560 1369
36008365
DV
1370 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1371 * bpc in between. */
3e7ca985 1372 bpp = pipe_config->pipe_bpp;
56071a20
JN
1373 if (is_edp(intel_dp)) {
1374 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1375 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1376 dev_priv->vbt.edp_bpp);
1377 bpp = dev_priv->vbt.edp_bpp;
1378 }
1379
344c5bbc
JN
1380 /*
1381 * Use the maximum clock and number of lanes the eDP panel
1382 * advertizes being capable of. The panels are generally
1383 * designed to support only a single clock and lane
1384 * configuration, and typically these values correspond to the
1385 * native resolution of the panel.
1386 */
1387 min_lane_count = max_lane_count;
1388 min_clock = max_clock;
7984211e 1389 }
657445fe 1390
36008365 1391 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1392 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1393 bpp);
36008365 1394
c6930992 1395 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1396 for (lane_count = min_lane_count;
1397 lane_count <= max_lane_count;
1398 lane_count <<= 1) {
1399
94ca719e 1400 link_clock = common_rates[clock];
36008365
DV
1401 link_avail = intel_dp_max_data_rate(link_clock,
1402 lane_count);
1403
1404 if (mode_rate <= link_avail) {
1405 goto found;
1406 }
1407 }
1408 }
1409 }
c4867936 1410
36008365 1411 return false;
3685a8f3 1412
36008365 1413found:
55bc60db
VS
1414 if (intel_dp->color_range_auto) {
1415 /*
1416 * See:
1417 * CEA-861-E - 5.1 Default Encoding Parameters
1418 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1419 */
18316c8c 1420 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1421 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1422 else
1423 intel_dp->color_range = 0;
1424 }
1425
3685a8f3 1426 if (intel_dp->color_range)
50f3b016 1427 pipe_config->limited_color_range = true;
a4fc5ed6 1428
36008365 1429 intel_dp->lane_count = lane_count;
a8f3ef61 1430
94ca719e 1431 if (intel_dp->num_sink_rates) {
bc27b7d3 1432 intel_dp->link_bw = 0;
a8f3ef61 1433 intel_dp->rate_select =
94ca719e 1434 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1435 } else {
1436 intel_dp->link_bw =
94ca719e 1437 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1438 intel_dp->rate_select = 0;
a8f3ef61
SJ
1439 }
1440
657445fe 1441 pipe_config->pipe_bpp = bpp;
94ca719e 1442 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1443
36008365
DV
1444 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1445 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1446 pipe_config->port_clock, bpp);
36008365
DV
1447 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1448 mode_rate, link_avail);
a4fc5ed6 1449
03afc4a2 1450 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1451 adjusted_mode->crtc_clock,
1452 pipe_config->port_clock,
03afc4a2 1453 &pipe_config->dp_m_n);
9d1a455b 1454
439d7ac0 1455 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1456 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1457 pipe_config->has_drrs = true;
439d7ac0
PB
1458 intel_link_compute_m_n(bpp, lane_count,
1459 intel_connector->panel.downclock_mode->clock,
1460 pipe_config->port_clock,
1461 &pipe_config->dp_m2_n2);
1462 }
1463
5416d871 1464 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1465 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
5416d871 1466 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1467 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1468 else
1469 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1470
03afc4a2 1471 return true;
a4fc5ed6
KP
1472}
1473
7c62a164 1474static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1475{
7c62a164
DV
1476 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1477 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1478 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1479 struct drm_i915_private *dev_priv = dev->dev_private;
1480 u32 dpa_ctl;
1481
6e3c9717
ACO
1482 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1483 crtc->config->port_clock);
ea9b6006
DV
1484 dpa_ctl = I915_READ(DP_A);
1485 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1486
6e3c9717 1487 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1488 /* For a long time we've carried around a ILK-DevA w/a for the
1489 * 160MHz clock. If we're really unlucky, it's still required.
1490 */
1491 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1492 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1493 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1494 } else {
1495 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1496 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1497 }
1ce17038 1498
ea9b6006
DV
1499 I915_WRITE(DP_A, dpa_ctl);
1500
1501 POSTING_READ(DP_A);
1502 udelay(500);
1503}
1504
8ac33ed3 1505static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1506{
b934223d 1507 struct drm_device *dev = encoder->base.dev;
417e822d 1508 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1510 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1511 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1512 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1513
417e822d 1514 /*
1a2eb460 1515 * There are four kinds of DP registers:
417e822d
KP
1516 *
1517 * IBX PCH
1a2eb460
KP
1518 * SNB CPU
1519 * IVB CPU
417e822d
KP
1520 * CPT PCH
1521 *
1522 * IBX PCH and CPU are the same for almost everything,
1523 * except that the CPU DP PLL is configured in this
1524 * register
1525 *
1526 * CPT PCH is quite different, having many bits moved
1527 * to the TRANS_DP_CTL register instead. That
1528 * configuration happens (oddly) in ironlake_pch_enable
1529 */
9c9e7927 1530
417e822d
KP
1531 /* Preserve the BIOS-computed detected bit. This is
1532 * supposed to be read-only.
1533 */
1534 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1535
417e822d 1536 /* Handle DP bits in common between all three register formats */
417e822d 1537 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1538 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1539
6e3c9717 1540 if (crtc->config->has_audio)
ea5b213a 1541 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1542
417e822d 1543 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1544
bc7d38a4 1545 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1546 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1547 intel_dp->DP |= DP_SYNC_HS_HIGH;
1548 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1549 intel_dp->DP |= DP_SYNC_VS_HIGH;
1550 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1551
6aba5b6c 1552 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1553 intel_dp->DP |= DP_ENHANCED_FRAMING;
1554
7c62a164 1555 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1556 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1557 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1558 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1559
1560 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1561 intel_dp->DP |= DP_SYNC_HS_HIGH;
1562 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1563 intel_dp->DP |= DP_SYNC_VS_HIGH;
1564 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1565
6aba5b6c 1566 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1567 intel_dp->DP |= DP_ENHANCED_FRAMING;
1568
44f37d1f
CML
1569 if (!IS_CHERRYVIEW(dev)) {
1570 if (crtc->pipe == 1)
1571 intel_dp->DP |= DP_PIPEB_SELECT;
1572 } else {
1573 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1574 }
417e822d
KP
1575 } else {
1576 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1577 }
a4fc5ed6
KP
1578}
1579
ffd6749d
PZ
1580#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1581#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1582
1a5ef5b7
PZ
1583#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1584#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1585
ffd6749d
PZ
1586#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1587#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1588
4be73780 1589static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1590 u32 mask,
1591 u32 value)
bd943159 1592{
30add22d 1593 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1594 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1595 u32 pp_stat_reg, pp_ctrl_reg;
1596
e39b999a
VS
1597 lockdep_assert_held(&dev_priv->pps_mutex);
1598
bf13e81b
JN
1599 pp_stat_reg = _pp_stat_reg(intel_dp);
1600 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1601
99ea7127 1602 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1603 mask, value,
1604 I915_READ(pp_stat_reg),
1605 I915_READ(pp_ctrl_reg));
32ce697c 1606
453c5420 1607 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1608 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1609 I915_READ(pp_stat_reg),
1610 I915_READ(pp_ctrl_reg));
32ce697c 1611 }
54c136d4
CW
1612
1613 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1614}
32ce697c 1615
4be73780 1616static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1617{
1618 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1619 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1620}
1621
4be73780 1622static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1623{
1624 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1625 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1626}
1627
4be73780 1628static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1629{
1630 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1631
1632 /* When we disable the VDD override bit last we have to do the manual
1633 * wait. */
1634 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1635 intel_dp->panel_power_cycle_delay);
1636
4be73780 1637 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1638}
1639
4be73780 1640static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1641{
1642 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1643 intel_dp->backlight_on_delay);
1644}
1645
4be73780 1646static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1647{
1648 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1649 intel_dp->backlight_off_delay);
1650}
99ea7127 1651
832dd3c1
KP
1652/* Read the current pp_control value, unlocking the register if it
1653 * is locked
1654 */
1655
453c5420 1656static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1657{
453c5420
JB
1658 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1659 struct drm_i915_private *dev_priv = dev->dev_private;
1660 u32 control;
832dd3c1 1661
e39b999a
VS
1662 lockdep_assert_held(&dev_priv->pps_mutex);
1663
bf13e81b 1664 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1665 control &= ~PANEL_UNLOCK_MASK;
1666 control |= PANEL_UNLOCK_REGS;
1667 return control;
bd943159
KP
1668}
1669
951468f3
VS
1670/*
1671 * Must be paired with edp_panel_vdd_off().
1672 * Must hold pps_mutex around the whole on/off sequence.
1673 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1674 */
1e0560e0 1675static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1676{
30add22d 1677 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1678 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1679 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1680 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1681 enum intel_display_power_domain power_domain;
5d613501 1682 u32 pp;
453c5420 1683 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1684 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1685
e39b999a
VS
1686 lockdep_assert_held(&dev_priv->pps_mutex);
1687
97af61f5 1688 if (!is_edp(intel_dp))
adddaaf4 1689 return false;
bd943159 1690
2c623c11 1691 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1692 intel_dp->want_panel_vdd = true;
99ea7127 1693
4be73780 1694 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1695 return need_to_disable;
b0665d57 1696
4e6e1a54
ID
1697 power_domain = intel_display_port_power_domain(intel_encoder);
1698 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1699
3936fcf4
VS
1700 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1701 port_name(intel_dig_port->port));
bd943159 1702
4be73780
DV
1703 if (!edp_have_panel_power(intel_dp))
1704 wait_panel_power_cycle(intel_dp);
99ea7127 1705
453c5420 1706 pp = ironlake_get_pp_control(intel_dp);
5d613501 1707 pp |= EDP_FORCE_VDD;
ebf33b18 1708
bf13e81b
JN
1709 pp_stat_reg = _pp_stat_reg(intel_dp);
1710 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1711
1712 I915_WRITE(pp_ctrl_reg, pp);
1713 POSTING_READ(pp_ctrl_reg);
1714 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1715 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1716 /*
1717 * If the panel wasn't on, delay before accessing aux channel
1718 */
4be73780 1719 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1720 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1721 port_name(intel_dig_port->port));
f01eca2e 1722 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1723 }
adddaaf4
JN
1724
1725 return need_to_disable;
1726}
1727
951468f3
VS
1728/*
1729 * Must be paired with intel_edp_panel_vdd_off() or
1730 * intel_edp_panel_off().
1731 * Nested calls to these functions are not allowed since
1732 * we drop the lock. Caller must use some higher level
1733 * locking to prevent nested calls from other threads.
1734 */
b80d6c78 1735void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1736{
c695b6b6 1737 bool vdd;
adddaaf4 1738
c695b6b6
VS
1739 if (!is_edp(intel_dp))
1740 return;
1741
773538e8 1742 pps_lock(intel_dp);
c695b6b6 1743 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1744 pps_unlock(intel_dp);
c695b6b6 1745
e2c719b7 1746 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1747 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1748}
1749
4be73780 1750static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1751{
30add22d 1752 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1753 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1754 struct intel_digital_port *intel_dig_port =
1755 dp_to_dig_port(intel_dp);
1756 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1757 enum intel_display_power_domain power_domain;
5d613501 1758 u32 pp;
453c5420 1759 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1760
e39b999a 1761 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1762
15e899a0 1763 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1764
15e899a0 1765 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1766 return;
b0665d57 1767
3936fcf4
VS
1768 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1769 port_name(intel_dig_port->port));
bd943159 1770
be2c9196
VS
1771 pp = ironlake_get_pp_control(intel_dp);
1772 pp &= ~EDP_FORCE_VDD;
453c5420 1773
be2c9196
VS
1774 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1775 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1776
be2c9196
VS
1777 I915_WRITE(pp_ctrl_reg, pp);
1778 POSTING_READ(pp_ctrl_reg);
90791a5c 1779
be2c9196
VS
1780 /* Make sure sequencer is idle before allowing subsequent activity */
1781 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1782 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1783
be2c9196
VS
1784 if ((pp & POWER_TARGET_ON) == 0)
1785 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1786
be2c9196
VS
1787 power_domain = intel_display_port_power_domain(intel_encoder);
1788 intel_display_power_put(dev_priv, power_domain);
bd943159 1789}
5d613501 1790
4be73780 1791static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1792{
1793 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1794 struct intel_dp, panel_vdd_work);
bd943159 1795
773538e8 1796 pps_lock(intel_dp);
15e899a0
VS
1797 if (!intel_dp->want_panel_vdd)
1798 edp_panel_vdd_off_sync(intel_dp);
773538e8 1799 pps_unlock(intel_dp);
bd943159
KP
1800}
1801
aba86890
ID
1802static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1803{
1804 unsigned long delay;
1805
1806 /*
1807 * Queue the timer to fire a long time from now (relative to the power
1808 * down delay) to keep the panel power up across a sequence of
1809 * operations.
1810 */
1811 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1812 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1813}
1814
951468f3
VS
1815/*
1816 * Must be paired with edp_panel_vdd_on().
1817 * Must hold pps_mutex around the whole on/off sequence.
1818 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1819 */
4be73780 1820static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1821{
e39b999a
VS
1822 struct drm_i915_private *dev_priv =
1823 intel_dp_to_dev(intel_dp)->dev_private;
1824
1825 lockdep_assert_held(&dev_priv->pps_mutex);
1826
97af61f5
KP
1827 if (!is_edp(intel_dp))
1828 return;
5d613501 1829
e2c719b7 1830 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1831 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1832
bd943159
KP
1833 intel_dp->want_panel_vdd = false;
1834
aba86890 1835 if (sync)
4be73780 1836 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1837 else
1838 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1839}
1840
9f0fb5be 1841static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1842{
30add22d 1843 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1844 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1845 u32 pp;
453c5420 1846 u32 pp_ctrl_reg;
9934c132 1847
9f0fb5be
VS
1848 lockdep_assert_held(&dev_priv->pps_mutex);
1849
97af61f5 1850 if (!is_edp(intel_dp))
bd943159 1851 return;
99ea7127 1852
3936fcf4
VS
1853 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1854 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1855
e7a89ace
VS
1856 if (WARN(edp_have_panel_power(intel_dp),
1857 "eDP port %c panel power already on\n",
1858 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1859 return;
9934c132 1860
4be73780 1861 wait_panel_power_cycle(intel_dp);
37c6c9b0 1862
bf13e81b 1863 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1864 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1865 if (IS_GEN5(dev)) {
1866 /* ILK workaround: disable reset around power sequence */
1867 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1868 I915_WRITE(pp_ctrl_reg, pp);
1869 POSTING_READ(pp_ctrl_reg);
05ce1a49 1870 }
37c6c9b0 1871
1c0ae80a 1872 pp |= POWER_TARGET_ON;
99ea7127
KP
1873 if (!IS_GEN5(dev))
1874 pp |= PANEL_POWER_RESET;
1875
453c5420
JB
1876 I915_WRITE(pp_ctrl_reg, pp);
1877 POSTING_READ(pp_ctrl_reg);
9934c132 1878
4be73780 1879 wait_panel_on(intel_dp);
dce56b3c 1880 intel_dp->last_power_on = jiffies;
9934c132 1881
05ce1a49
KP
1882 if (IS_GEN5(dev)) {
1883 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1884 I915_WRITE(pp_ctrl_reg, pp);
1885 POSTING_READ(pp_ctrl_reg);
05ce1a49 1886 }
9f0fb5be 1887}
e39b999a 1888
9f0fb5be
VS
1889void intel_edp_panel_on(struct intel_dp *intel_dp)
1890{
1891 if (!is_edp(intel_dp))
1892 return;
1893
1894 pps_lock(intel_dp);
1895 edp_panel_on(intel_dp);
773538e8 1896 pps_unlock(intel_dp);
9934c132
JB
1897}
1898
9f0fb5be
VS
1899
1900static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1901{
4e6e1a54
ID
1902 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1903 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1904 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1905 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1906 enum intel_display_power_domain power_domain;
99ea7127 1907 u32 pp;
453c5420 1908 u32 pp_ctrl_reg;
9934c132 1909
9f0fb5be
VS
1910 lockdep_assert_held(&dev_priv->pps_mutex);
1911
97af61f5
KP
1912 if (!is_edp(intel_dp))
1913 return;
37c6c9b0 1914
3936fcf4
VS
1915 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1916 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1917
3936fcf4
VS
1918 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1919 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1920
453c5420 1921 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1922 /* We need to switch off panel power _and_ force vdd, for otherwise some
1923 * panels get very unhappy and cease to work. */
b3064154
PJ
1924 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1925 EDP_BLC_ENABLE);
453c5420 1926
bf13e81b 1927 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1928
849e39f5
PZ
1929 intel_dp->want_panel_vdd = false;
1930
453c5420
JB
1931 I915_WRITE(pp_ctrl_reg, pp);
1932 POSTING_READ(pp_ctrl_reg);
9934c132 1933
dce56b3c 1934 intel_dp->last_power_cycle = jiffies;
4be73780 1935 wait_panel_off(intel_dp);
849e39f5
PZ
1936
1937 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1938 power_domain = intel_display_port_power_domain(intel_encoder);
1939 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1940}
e39b999a 1941
9f0fb5be
VS
1942void intel_edp_panel_off(struct intel_dp *intel_dp)
1943{
1944 if (!is_edp(intel_dp))
1945 return;
e39b999a 1946
9f0fb5be
VS
1947 pps_lock(intel_dp);
1948 edp_panel_off(intel_dp);
773538e8 1949 pps_unlock(intel_dp);
9934c132
JB
1950}
1951
1250d107
JN
1952/* Enable backlight in the panel power control. */
1953static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1954{
da63a9f2
PZ
1955 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1956 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1957 struct drm_i915_private *dev_priv = dev->dev_private;
1958 u32 pp;
453c5420 1959 u32 pp_ctrl_reg;
32f9d658 1960
01cb9ea6
JB
1961 /*
1962 * If we enable the backlight right away following a panel power
1963 * on, we may see slight flicker as the panel syncs with the eDP
1964 * link. So delay a bit to make sure the image is solid before
1965 * allowing it to appear.
1966 */
4be73780 1967 wait_backlight_on(intel_dp);
e39b999a 1968
773538e8 1969 pps_lock(intel_dp);
e39b999a 1970
453c5420 1971 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1972 pp |= EDP_BLC_ENABLE;
453c5420 1973
bf13e81b 1974 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1975
1976 I915_WRITE(pp_ctrl_reg, pp);
1977 POSTING_READ(pp_ctrl_reg);
e39b999a 1978
773538e8 1979 pps_unlock(intel_dp);
32f9d658
ZW
1980}
1981
1250d107
JN
1982/* Enable backlight PWM and backlight PP control. */
1983void intel_edp_backlight_on(struct intel_dp *intel_dp)
1984{
1985 if (!is_edp(intel_dp))
1986 return;
1987
1988 DRM_DEBUG_KMS("\n");
1989
1990 intel_panel_enable_backlight(intel_dp->attached_connector);
1991 _intel_edp_backlight_on(intel_dp);
1992}
1993
1994/* Disable backlight in the panel power control. */
1995static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1996{
30add22d 1997 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1998 struct drm_i915_private *dev_priv = dev->dev_private;
1999 u32 pp;
453c5420 2000 u32 pp_ctrl_reg;
32f9d658 2001
f01eca2e
KP
2002 if (!is_edp(intel_dp))
2003 return;
2004
773538e8 2005 pps_lock(intel_dp);
e39b999a 2006
453c5420 2007 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2008 pp &= ~EDP_BLC_ENABLE;
453c5420 2009
bf13e81b 2010 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2011
2012 I915_WRITE(pp_ctrl_reg, pp);
2013 POSTING_READ(pp_ctrl_reg);
f7d2323c 2014
773538e8 2015 pps_unlock(intel_dp);
e39b999a
VS
2016
2017 intel_dp->last_backlight_off = jiffies;
f7d2323c 2018 edp_wait_backlight_off(intel_dp);
1250d107 2019}
f7d2323c 2020
1250d107
JN
2021/* Disable backlight PP control and backlight PWM. */
2022void intel_edp_backlight_off(struct intel_dp *intel_dp)
2023{
2024 if (!is_edp(intel_dp))
2025 return;
2026
2027 DRM_DEBUG_KMS("\n");
f7d2323c 2028
1250d107 2029 _intel_edp_backlight_off(intel_dp);
f7d2323c 2030 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2031}
a4fc5ed6 2032
73580fb7
JN
2033/*
2034 * Hook for controlling the panel power control backlight through the bl_power
2035 * sysfs attribute. Take care to handle multiple calls.
2036 */
2037static void intel_edp_backlight_power(struct intel_connector *connector,
2038 bool enable)
2039{
2040 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2041 bool is_enabled;
2042
773538e8 2043 pps_lock(intel_dp);
e39b999a 2044 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2045 pps_unlock(intel_dp);
73580fb7
JN
2046
2047 if (is_enabled == enable)
2048 return;
2049
23ba9373
JN
2050 DRM_DEBUG_KMS("panel power control backlight %s\n",
2051 enable ? "enable" : "disable");
73580fb7
JN
2052
2053 if (enable)
2054 _intel_edp_backlight_on(intel_dp);
2055 else
2056 _intel_edp_backlight_off(intel_dp);
2057}
2058
2bd2ad64 2059static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2060{
da63a9f2
PZ
2061 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2062 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2063 struct drm_device *dev = crtc->dev;
d240f20f
JB
2064 struct drm_i915_private *dev_priv = dev->dev_private;
2065 u32 dpa_ctl;
2066
2bd2ad64
DV
2067 assert_pipe_disabled(dev_priv,
2068 to_intel_crtc(crtc)->pipe);
2069
d240f20f
JB
2070 DRM_DEBUG_KMS("\n");
2071 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2072 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2073 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2074
2075 /* We don't adjust intel_dp->DP while tearing down the link, to
2076 * facilitate link retraining (e.g. after hotplug). Hence clear all
2077 * enable bits here to ensure that we don't enable too much. */
2078 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2079 intel_dp->DP |= DP_PLL_ENABLE;
2080 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2081 POSTING_READ(DP_A);
2082 udelay(200);
d240f20f
JB
2083}
2084
2bd2ad64 2085static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2086{
da63a9f2
PZ
2087 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2088 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2089 struct drm_device *dev = crtc->dev;
d240f20f
JB
2090 struct drm_i915_private *dev_priv = dev->dev_private;
2091 u32 dpa_ctl;
2092
2bd2ad64
DV
2093 assert_pipe_disabled(dev_priv,
2094 to_intel_crtc(crtc)->pipe);
2095
d240f20f 2096 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2097 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2098 "dp pll off, should be on\n");
2099 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2100
2101 /* We can't rely on the value tracked for the DP register in
2102 * intel_dp->DP because link_down must not change that (otherwise link
2103 * re-training will fail. */
298b0b39 2104 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2105 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2106 POSTING_READ(DP_A);
d240f20f
JB
2107 udelay(200);
2108}
2109
c7ad3810 2110/* If the sink supports it, try to set the power state appropriately */
c19b0669 2111void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2112{
2113 int ret, i;
2114
2115 /* Should have a valid DPCD by this point */
2116 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2117 return;
2118
2119 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2120 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2121 DP_SET_POWER_D3);
c7ad3810
JB
2122 } else {
2123 /*
2124 * When turning on, we need to retry for 1ms to give the sink
2125 * time to wake up.
2126 */
2127 for (i = 0; i < 3; i++) {
9d1a1031
JN
2128 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2129 DP_SET_POWER_D0);
c7ad3810
JB
2130 if (ret == 1)
2131 break;
2132 msleep(1);
2133 }
2134 }
f9cac721
JN
2135
2136 if (ret != 1)
2137 DRM_DEBUG_KMS("failed to %s sink power state\n",
2138 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2139}
2140
19d8fe15
DV
2141static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2142 enum pipe *pipe)
d240f20f 2143{
19d8fe15 2144 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2145 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2146 struct drm_device *dev = encoder->base.dev;
2147 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2148 enum intel_display_power_domain power_domain;
2149 u32 tmp;
2150
2151 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2152 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2153 return false;
2154
2155 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2156
2157 if (!(tmp & DP_PORT_EN))
2158 return false;
2159
bc7d38a4 2160 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2161 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2162 } else if (IS_CHERRYVIEW(dev)) {
2163 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2164 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2165 *pipe = PORT_TO_PIPE(tmp);
2166 } else {
2167 u32 trans_sel;
2168 u32 trans_dp;
2169 int i;
2170
2171 switch (intel_dp->output_reg) {
2172 case PCH_DP_B:
2173 trans_sel = TRANS_DP_PORT_SEL_B;
2174 break;
2175 case PCH_DP_C:
2176 trans_sel = TRANS_DP_PORT_SEL_C;
2177 break;
2178 case PCH_DP_D:
2179 trans_sel = TRANS_DP_PORT_SEL_D;
2180 break;
2181 default:
2182 return true;
2183 }
2184
055e393f 2185 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2186 trans_dp = I915_READ(TRANS_DP_CTL(i));
2187 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2188 *pipe = i;
2189 return true;
2190 }
2191 }
19d8fe15 2192
4a0833ec
DV
2193 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2194 intel_dp->output_reg);
2195 }
d240f20f 2196
19d8fe15
DV
2197 return true;
2198}
d240f20f 2199
045ac3b5 2200static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2201 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2202{
2203 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2204 u32 tmp, flags = 0;
63000ef6
XZ
2205 struct drm_device *dev = encoder->base.dev;
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2207 enum port port = dp_to_dig_port(intel_dp)->port;
2208 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2209 int dotclock;
045ac3b5 2210
9ed109a7
DV
2211 tmp = I915_READ(intel_dp->output_reg);
2212 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2213 pipe_config->has_audio = true;
2214
63000ef6 2215 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2216 if (tmp & DP_SYNC_HS_HIGH)
2217 flags |= DRM_MODE_FLAG_PHSYNC;
2218 else
2219 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2220
63000ef6
XZ
2221 if (tmp & DP_SYNC_VS_HIGH)
2222 flags |= DRM_MODE_FLAG_PVSYNC;
2223 else
2224 flags |= DRM_MODE_FLAG_NVSYNC;
2225 } else {
2226 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2227 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2228 flags |= DRM_MODE_FLAG_PHSYNC;
2229 else
2230 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2231
63000ef6
XZ
2232 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2233 flags |= DRM_MODE_FLAG_PVSYNC;
2234 else
2235 flags |= DRM_MODE_FLAG_NVSYNC;
2236 }
045ac3b5 2237
2d112de7 2238 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2239
8c875fca
VS
2240 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2241 tmp & DP_COLOR_RANGE_16_235)
2242 pipe_config->limited_color_range = true;
2243
eb14cb74
VS
2244 pipe_config->has_dp_encoder = true;
2245
2246 intel_dp_get_m_n(crtc, pipe_config);
2247
18442d08 2248 if (port == PORT_A) {
f1f644dc
JB
2249 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2250 pipe_config->port_clock = 162000;
2251 else
2252 pipe_config->port_clock = 270000;
2253 }
18442d08
VS
2254
2255 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2256 &pipe_config->dp_m_n);
2257
2258 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2259 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2260
2d112de7 2261 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2262
c6cd2ee2
JN
2263 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2264 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2265 /*
2266 * This is a big fat ugly hack.
2267 *
2268 * Some machines in UEFI boot mode provide us a VBT that has 18
2269 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2270 * unknown we fail to light up. Yet the same BIOS boots up with
2271 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2272 * max, not what it tells us to use.
2273 *
2274 * Note: This will still be broken if the eDP panel is not lit
2275 * up by the BIOS, and thus we can't get the mode at module
2276 * load.
2277 */
2278 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2279 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2280 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2281 }
045ac3b5
JB
2282}
2283
e8cb4558 2284static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2285{
e8cb4558 2286 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2287 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2288 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2289
6e3c9717 2290 if (crtc->config->has_audio)
495a5bb8 2291 intel_audio_codec_disable(encoder);
6cb49835 2292
b32c6f48
RV
2293 if (HAS_PSR(dev) && !HAS_DDI(dev))
2294 intel_psr_disable(intel_dp);
2295
6cb49835
DV
2296 /* Make sure the panel is off before trying to change the mode. But also
2297 * ensure that we have vdd while we switch off the panel. */
24f3e092 2298 intel_edp_panel_vdd_on(intel_dp);
4be73780 2299 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2300 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2301 intel_edp_panel_off(intel_dp);
3739850b 2302
08aff3fe
VS
2303 /* disable the port before the pipe on g4x */
2304 if (INTEL_INFO(dev)->gen < 5)
3739850b 2305 intel_dp_link_down(intel_dp);
d240f20f
JB
2306}
2307
08aff3fe 2308static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2309{
2bd2ad64 2310 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2311 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2312
49277c31 2313 intel_dp_link_down(intel_dp);
08aff3fe
VS
2314 if (port == PORT_A)
2315 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2316}
2317
2318static void vlv_post_disable_dp(struct intel_encoder *encoder)
2319{
2320 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2321
2322 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2323}
2324
580d3811
VS
2325static void chv_post_disable_dp(struct intel_encoder *encoder)
2326{
2327 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2328 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2329 struct drm_device *dev = encoder->base.dev;
2330 struct drm_i915_private *dev_priv = dev->dev_private;
2331 struct intel_crtc *intel_crtc =
2332 to_intel_crtc(encoder->base.crtc);
2333 enum dpio_channel ch = vlv_dport_to_channel(dport);
2334 enum pipe pipe = intel_crtc->pipe;
2335 u32 val;
2336
2337 intel_dp_link_down(intel_dp);
2338
2339 mutex_lock(&dev_priv->dpio_lock);
2340
2341 /* Propagate soft reset to data lane reset */
97fd4d5c 2342 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2343 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2344 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2345
97fd4d5c
VS
2346 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2347 val |= CHV_PCS_REQ_SOFTRESET_EN;
2348 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2349
2350 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2351 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2352 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2353
2354 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2355 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2356 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2357
2358 mutex_unlock(&dev_priv->dpio_lock);
2359}
2360
7b13b58a
VS
2361static void
2362_intel_dp_set_link_train(struct intel_dp *intel_dp,
2363 uint32_t *DP,
2364 uint8_t dp_train_pat)
2365{
2366 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2367 struct drm_device *dev = intel_dig_port->base.base.dev;
2368 struct drm_i915_private *dev_priv = dev->dev_private;
2369 enum port port = intel_dig_port->port;
2370
2371 if (HAS_DDI(dev)) {
2372 uint32_t temp = I915_READ(DP_TP_CTL(port));
2373
2374 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2375 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2376 else
2377 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2378
2379 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2380 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2381 case DP_TRAINING_PATTERN_DISABLE:
2382 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2383
2384 break;
2385 case DP_TRAINING_PATTERN_1:
2386 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2387 break;
2388 case DP_TRAINING_PATTERN_2:
2389 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2390 break;
2391 case DP_TRAINING_PATTERN_3:
2392 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2393 break;
2394 }
2395 I915_WRITE(DP_TP_CTL(port), temp);
2396
2397 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2398 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2399
2400 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2401 case DP_TRAINING_PATTERN_DISABLE:
2402 *DP |= DP_LINK_TRAIN_OFF_CPT;
2403 break;
2404 case DP_TRAINING_PATTERN_1:
2405 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2406 break;
2407 case DP_TRAINING_PATTERN_2:
2408 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2409 break;
2410 case DP_TRAINING_PATTERN_3:
2411 DRM_ERROR("DP training pattern 3 not supported\n");
2412 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2413 break;
2414 }
2415
2416 } else {
2417 if (IS_CHERRYVIEW(dev))
2418 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2419 else
2420 *DP &= ~DP_LINK_TRAIN_MASK;
2421
2422 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2423 case DP_TRAINING_PATTERN_DISABLE:
2424 *DP |= DP_LINK_TRAIN_OFF;
2425 break;
2426 case DP_TRAINING_PATTERN_1:
2427 *DP |= DP_LINK_TRAIN_PAT_1;
2428 break;
2429 case DP_TRAINING_PATTERN_2:
2430 *DP |= DP_LINK_TRAIN_PAT_2;
2431 break;
2432 case DP_TRAINING_PATTERN_3:
2433 if (IS_CHERRYVIEW(dev)) {
2434 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2435 } else {
2436 DRM_ERROR("DP training pattern 3 not supported\n");
2437 *DP |= DP_LINK_TRAIN_PAT_2;
2438 }
2439 break;
2440 }
2441 }
2442}
2443
2444static void intel_dp_enable_port(struct intel_dp *intel_dp)
2445{
2446 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2447 struct drm_i915_private *dev_priv = dev->dev_private;
2448
7b13b58a
VS
2449 /* enable with pattern 1 (as per spec) */
2450 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2451 DP_TRAINING_PATTERN_1);
2452
2453 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2454 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2455
2456 /*
2457 * Magic for VLV/CHV. We _must_ first set up the register
2458 * without actually enabling the port, and then do another
2459 * write to enable the port. Otherwise link training will
2460 * fail when the power sequencer is freshly used for this port.
2461 */
2462 intel_dp->DP |= DP_PORT_EN;
2463
2464 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2465 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2466}
2467
e8cb4558 2468static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2469{
e8cb4558
DV
2470 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2471 struct drm_device *dev = encoder->base.dev;
2472 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2473 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2474 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2475
0c33d8d7
DV
2476 if (WARN_ON(dp_reg & DP_PORT_EN))
2477 return;
5d613501 2478
093e3f13
VS
2479 pps_lock(intel_dp);
2480
2481 if (IS_VALLEYVIEW(dev))
2482 vlv_init_panel_power_sequencer(intel_dp);
2483
7b13b58a 2484 intel_dp_enable_port(intel_dp);
093e3f13
VS
2485
2486 edp_panel_vdd_on(intel_dp);
2487 edp_panel_on(intel_dp);
2488 edp_panel_vdd_off(intel_dp, true);
2489
2490 pps_unlock(intel_dp);
2491
61234fa5
VS
2492 if (IS_VALLEYVIEW(dev))
2493 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2494
f01eca2e 2495 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2496 intel_dp_start_link_train(intel_dp);
33a34e4e 2497 intel_dp_complete_link_train(intel_dp);
3ab9c637 2498 intel_dp_stop_link_train(intel_dp);
c1dec79a 2499
6e3c9717 2500 if (crtc->config->has_audio) {
c1dec79a
JN
2501 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2502 pipe_name(crtc->pipe));
2503 intel_audio_codec_enable(encoder);
2504 }
ab1f90f9 2505}
89b667f8 2506
ecff4f3b
JN
2507static void g4x_enable_dp(struct intel_encoder *encoder)
2508{
828f5c6e
JN
2509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2510
ecff4f3b 2511 intel_enable_dp(encoder);
4be73780 2512 intel_edp_backlight_on(intel_dp);
ab1f90f9 2513}
89b667f8 2514
ab1f90f9
JN
2515static void vlv_enable_dp(struct intel_encoder *encoder)
2516{
828f5c6e
JN
2517 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2518
4be73780 2519 intel_edp_backlight_on(intel_dp);
b32c6f48 2520 intel_psr_enable(intel_dp);
d240f20f
JB
2521}
2522
ecff4f3b 2523static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2524{
2525 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2526 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2527
8ac33ed3
DV
2528 intel_dp_prepare(encoder);
2529
d41f1efb
DV
2530 /* Only ilk+ has port A */
2531 if (dport->port == PORT_A) {
2532 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2533 ironlake_edp_pll_on(intel_dp);
d41f1efb 2534 }
ab1f90f9
JN
2535}
2536
83b84597
VS
2537static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2538{
2539 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2540 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2541 enum pipe pipe = intel_dp->pps_pipe;
2542 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2543
2544 edp_panel_vdd_off_sync(intel_dp);
2545
2546 /*
2547 * VLV seems to get confused when multiple power seqeuencers
2548 * have the same port selected (even if only one has power/vdd
2549 * enabled). The failure manifests as vlv_wait_port_ready() failing
2550 * CHV on the other hand doesn't seem to mind having the same port
2551 * selected in multiple power seqeuencers, but let's clear the
2552 * port select always when logically disconnecting a power sequencer
2553 * from a port.
2554 */
2555 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2556 pipe_name(pipe), port_name(intel_dig_port->port));
2557 I915_WRITE(pp_on_reg, 0);
2558 POSTING_READ(pp_on_reg);
2559
2560 intel_dp->pps_pipe = INVALID_PIPE;
2561}
2562
a4a5d2f8
VS
2563static void vlv_steal_power_sequencer(struct drm_device *dev,
2564 enum pipe pipe)
2565{
2566 struct drm_i915_private *dev_priv = dev->dev_private;
2567 struct intel_encoder *encoder;
2568
2569 lockdep_assert_held(&dev_priv->pps_mutex);
2570
ac3c12e4
VS
2571 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2572 return;
2573
a4a5d2f8
VS
2574 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2575 base.head) {
2576 struct intel_dp *intel_dp;
773538e8 2577 enum port port;
a4a5d2f8
VS
2578
2579 if (encoder->type != INTEL_OUTPUT_EDP)
2580 continue;
2581
2582 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2583 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2584
2585 if (intel_dp->pps_pipe != pipe)
2586 continue;
2587
2588 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2589 pipe_name(pipe), port_name(port));
a4a5d2f8 2590
034e43c6
VS
2591 WARN(encoder->connectors_active,
2592 "stealing pipe %c power sequencer from active eDP port %c\n",
2593 pipe_name(pipe), port_name(port));
a4a5d2f8 2594
a4a5d2f8 2595 /* make sure vdd is off before we steal it */
83b84597 2596 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2597 }
2598}
2599
2600static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2601{
2602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2603 struct intel_encoder *encoder = &intel_dig_port->base;
2604 struct drm_device *dev = encoder->base.dev;
2605 struct drm_i915_private *dev_priv = dev->dev_private;
2606 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2607
2608 lockdep_assert_held(&dev_priv->pps_mutex);
2609
093e3f13
VS
2610 if (!is_edp(intel_dp))
2611 return;
2612
a4a5d2f8
VS
2613 if (intel_dp->pps_pipe == crtc->pipe)
2614 return;
2615
2616 /*
2617 * If another power sequencer was being used on this
2618 * port previously make sure to turn off vdd there while
2619 * we still have control of it.
2620 */
2621 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2622 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2623
2624 /*
2625 * We may be stealing the power
2626 * sequencer from another port.
2627 */
2628 vlv_steal_power_sequencer(dev, crtc->pipe);
2629
2630 /* now it's all ours */
2631 intel_dp->pps_pipe = crtc->pipe;
2632
2633 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2634 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2635
2636 /* init power sequencer on this pipe and port */
36b5f425
VS
2637 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2638 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2639}
2640
ab1f90f9 2641static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2642{
2bd2ad64 2643 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2644 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2645 struct drm_device *dev = encoder->base.dev;
89b667f8 2646 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2647 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2648 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2649 int pipe = intel_crtc->pipe;
2650 u32 val;
a4fc5ed6 2651
ab1f90f9 2652 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2653
ab3c759a 2654 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2655 val = 0;
2656 if (pipe)
2657 val |= (1<<21);
2658 else
2659 val &= ~(1<<21);
2660 val |= 0x001000c4;
ab3c759a
CML
2661 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2662 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2664
ab1f90f9
JN
2665 mutex_unlock(&dev_priv->dpio_lock);
2666
2667 intel_enable_dp(encoder);
89b667f8
JB
2668}
2669
ecff4f3b 2670static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2671{
2672 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2673 struct drm_device *dev = encoder->base.dev;
2674 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2675 struct intel_crtc *intel_crtc =
2676 to_intel_crtc(encoder->base.crtc);
e4607fcf 2677 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2678 int pipe = intel_crtc->pipe;
89b667f8 2679
8ac33ed3
DV
2680 intel_dp_prepare(encoder);
2681
89b667f8 2682 /* Program Tx lane resets to default */
0980a60f 2683 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2684 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2685 DPIO_PCS_TX_LANE2_RESET |
2686 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2687 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2688 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2689 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2690 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2691 DPIO_PCS_CLK_SOFT_RESET);
2692
2693 /* Fix up inter-pair skew failure */
ab3c759a
CML
2694 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2695 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2696 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2697 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2698}
2699
e4a1d846
CML
2700static void chv_pre_enable_dp(struct intel_encoder *encoder)
2701{
2702 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2703 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2704 struct drm_device *dev = encoder->base.dev;
2705 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2706 struct intel_crtc *intel_crtc =
2707 to_intel_crtc(encoder->base.crtc);
2708 enum dpio_channel ch = vlv_dport_to_channel(dport);
2709 int pipe = intel_crtc->pipe;
2710 int data, i;
949c1d43 2711 u32 val;
e4a1d846 2712
e4a1d846 2713 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2714
570e2a74
VS
2715 /* allow hardware to manage TX FIFO reset source */
2716 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2717 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2718 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2719
2720 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2721 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2722 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2723
949c1d43 2724 /* Deassert soft data lane reset*/
97fd4d5c 2725 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2726 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2727 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2728
2729 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2730 val |= CHV_PCS_REQ_SOFTRESET_EN;
2731 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2732
2733 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2734 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2735 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2736
97fd4d5c 2737 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2738 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2739 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2740
2741 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2742 for (i = 0; i < 4; i++) {
2743 /* Set the latency optimal bit */
2744 data = (i == 1) ? 0x0 : 0x6;
2745 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2746 data << DPIO_FRC_LATENCY_SHFIT);
2747
2748 /* Set the upar bit */
2749 data = (i == 1) ? 0x0 : 0x1;
2750 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2751 data << DPIO_UPAR_SHIFT);
2752 }
2753
2754 /* Data lane stagger programming */
2755 /* FIXME: Fix up value only after power analysis */
2756
2757 mutex_unlock(&dev_priv->dpio_lock);
2758
e4a1d846 2759 intel_enable_dp(encoder);
e4a1d846
CML
2760}
2761
9197c88b
VS
2762static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2763{
2764 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2765 struct drm_device *dev = encoder->base.dev;
2766 struct drm_i915_private *dev_priv = dev->dev_private;
2767 struct intel_crtc *intel_crtc =
2768 to_intel_crtc(encoder->base.crtc);
2769 enum dpio_channel ch = vlv_dport_to_channel(dport);
2770 enum pipe pipe = intel_crtc->pipe;
2771 u32 val;
2772
625695f8
VS
2773 intel_dp_prepare(encoder);
2774
9197c88b
VS
2775 mutex_lock(&dev_priv->dpio_lock);
2776
b9e5ac3c
VS
2777 /* program left/right clock distribution */
2778 if (pipe != PIPE_B) {
2779 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2780 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2781 if (ch == DPIO_CH0)
2782 val |= CHV_BUFLEFTENA1_FORCE;
2783 if (ch == DPIO_CH1)
2784 val |= CHV_BUFRIGHTENA1_FORCE;
2785 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2786 } else {
2787 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2788 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2789 if (ch == DPIO_CH0)
2790 val |= CHV_BUFLEFTENA2_FORCE;
2791 if (ch == DPIO_CH1)
2792 val |= CHV_BUFRIGHTENA2_FORCE;
2793 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2794 }
2795
9197c88b
VS
2796 /* program clock channel usage */
2797 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2798 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2799 if (pipe != PIPE_B)
2800 val &= ~CHV_PCS_USEDCLKCHANNEL;
2801 else
2802 val |= CHV_PCS_USEDCLKCHANNEL;
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2804
2805 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2806 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2807 if (pipe != PIPE_B)
2808 val &= ~CHV_PCS_USEDCLKCHANNEL;
2809 else
2810 val |= CHV_PCS_USEDCLKCHANNEL;
2811 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2812
2813 /*
2814 * This a a bit weird since generally CL
2815 * matches the pipe, but here we need to
2816 * pick the CL based on the port.
2817 */
2818 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2819 if (pipe != PIPE_B)
2820 val &= ~CHV_CMN_USEDCLKCHANNEL;
2821 else
2822 val |= CHV_CMN_USEDCLKCHANNEL;
2823 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2824
2825 mutex_unlock(&dev_priv->dpio_lock);
2826}
2827
a4fc5ed6 2828/*
df0c237d
JB
2829 * Native read with retry for link status and receiver capability reads for
2830 * cases where the sink may still be asleep.
9d1a1031
JN
2831 *
2832 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2833 * supposed to retry 3 times per the spec.
a4fc5ed6 2834 */
9d1a1031
JN
2835static ssize_t
2836intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2837 void *buffer, size_t size)
a4fc5ed6 2838{
9d1a1031
JN
2839 ssize_t ret;
2840 int i;
61da5fab 2841
f6a19066
VS
2842 /*
2843 * Sometime we just get the same incorrect byte repeated
2844 * over the entire buffer. Doing just one throw away read
2845 * initially seems to "solve" it.
2846 */
2847 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2848
61da5fab 2849 for (i = 0; i < 3; i++) {
9d1a1031
JN
2850 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2851 if (ret == size)
2852 return ret;
61da5fab
JB
2853 msleep(1);
2854 }
a4fc5ed6 2855
9d1a1031 2856 return ret;
a4fc5ed6
KP
2857}
2858
2859/*
2860 * Fetch AUX CH registers 0x202 - 0x207 which contain
2861 * link status information
2862 */
2863static bool
93f62dad 2864intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2865{
9d1a1031
JN
2866 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2867 DP_LANE0_1_STATUS,
2868 link_status,
2869 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2870}
2871
1100244e 2872/* These are source-specific values. */
a4fc5ed6 2873static uint8_t
1a2eb460 2874intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2875{
30add22d 2876 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2877 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2878 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2879
7ad14a29
SJ
2880 if (INTEL_INFO(dev)->gen >= 9) {
2881 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2882 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2883 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2884 } else if (IS_VALLEYVIEW(dev))
bd60018a 2885 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2886 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2887 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2888 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2889 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2890 else
bd60018a 2891 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2892}
2893
2894static uint8_t
2895intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2896{
30add22d 2897 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2898 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2899
5a9d1f1a
DL
2900 if (INTEL_INFO(dev)->gen >= 9) {
2901 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2902 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2903 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2904 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2905 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2906 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2907 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2908 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2909 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2910 default:
2911 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2912 }
2913 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2914 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2915 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2916 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2917 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2918 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2919 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2920 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2922 default:
bd60018a 2923 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2924 }
e2fa6fba
P
2925 } else if (IS_VALLEYVIEW(dev)) {
2926 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2928 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2930 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2931 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2932 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2934 default:
bd60018a 2935 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2936 }
bc7d38a4 2937 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2938 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2940 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2942 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2943 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2944 default:
bd60018a 2945 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2946 }
2947 } else {
2948 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2949 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2950 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2952 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2953 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2954 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2956 default:
bd60018a 2957 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2958 }
a4fc5ed6
KP
2959 }
2960}
2961
e2fa6fba
P
2962static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2963{
2964 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2965 struct drm_i915_private *dev_priv = dev->dev_private;
2966 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2967 struct intel_crtc *intel_crtc =
2968 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2969 unsigned long demph_reg_value, preemph_reg_value,
2970 uniqtranscale_reg_value;
2971 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2972 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2973 int pipe = intel_crtc->pipe;
e2fa6fba
P
2974
2975 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2976 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2977 preemph_reg_value = 0x0004000;
2978 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2980 demph_reg_value = 0x2B405555;
2981 uniqtranscale_reg_value = 0x552AB83A;
2982 break;
bd60018a 2983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2984 demph_reg_value = 0x2B404040;
2985 uniqtranscale_reg_value = 0x5548B83A;
2986 break;
bd60018a 2987 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2988 demph_reg_value = 0x2B245555;
2989 uniqtranscale_reg_value = 0x5560B83A;
2990 break;
bd60018a 2991 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2992 demph_reg_value = 0x2B405555;
2993 uniqtranscale_reg_value = 0x5598DA3A;
2994 break;
2995 default:
2996 return 0;
2997 }
2998 break;
bd60018a 2999 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3000 preemph_reg_value = 0x0002000;
3001 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3002 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3003 demph_reg_value = 0x2B404040;
3004 uniqtranscale_reg_value = 0x5552B83A;
3005 break;
bd60018a 3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3007 demph_reg_value = 0x2B404848;
3008 uniqtranscale_reg_value = 0x5580B83A;
3009 break;
bd60018a 3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3011 demph_reg_value = 0x2B404040;
3012 uniqtranscale_reg_value = 0x55ADDA3A;
3013 break;
3014 default:
3015 return 0;
3016 }
3017 break;
bd60018a 3018 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3019 preemph_reg_value = 0x0000000;
3020 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3021 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3022 demph_reg_value = 0x2B305555;
3023 uniqtranscale_reg_value = 0x5570B83A;
3024 break;
bd60018a 3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3026 demph_reg_value = 0x2B2B4040;
3027 uniqtranscale_reg_value = 0x55ADDA3A;
3028 break;
3029 default:
3030 return 0;
3031 }
3032 break;
bd60018a 3033 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3034 preemph_reg_value = 0x0006000;
3035 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3036 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3037 demph_reg_value = 0x1B405555;
3038 uniqtranscale_reg_value = 0x55ADDA3A;
3039 break;
3040 default:
3041 return 0;
3042 }
3043 break;
3044 default:
3045 return 0;
3046 }
3047
0980a60f 3048 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3049 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3050 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3051 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3052 uniqtranscale_reg_value);
ab3c759a
CML
3053 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3054 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3055 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3056 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3057 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3058
3059 return 0;
3060}
3061
e4a1d846
CML
3062static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3063{
3064 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3065 struct drm_i915_private *dev_priv = dev->dev_private;
3066 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3067 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3068 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3069 uint8_t train_set = intel_dp->train_set[0];
3070 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3071 enum pipe pipe = intel_crtc->pipe;
3072 int i;
e4a1d846
CML
3073
3074 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3075 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3076 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3077 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3078 deemph_reg_value = 128;
3079 margin_reg_value = 52;
3080 break;
bd60018a 3081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3082 deemph_reg_value = 128;
3083 margin_reg_value = 77;
3084 break;
bd60018a 3085 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3086 deemph_reg_value = 128;
3087 margin_reg_value = 102;
3088 break;
bd60018a 3089 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3090 deemph_reg_value = 128;
3091 margin_reg_value = 154;
3092 /* FIXME extra to set for 1200 */
3093 break;
3094 default:
3095 return 0;
3096 }
3097 break;
bd60018a 3098 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3099 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3100 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3101 deemph_reg_value = 85;
3102 margin_reg_value = 78;
3103 break;
bd60018a 3104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3105 deemph_reg_value = 85;
3106 margin_reg_value = 116;
3107 break;
bd60018a 3108 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3109 deemph_reg_value = 85;
3110 margin_reg_value = 154;
3111 break;
3112 default:
3113 return 0;
3114 }
3115 break;
bd60018a 3116 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3117 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3119 deemph_reg_value = 64;
3120 margin_reg_value = 104;
3121 break;
bd60018a 3122 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3123 deemph_reg_value = 64;
3124 margin_reg_value = 154;
3125 break;
3126 default:
3127 return 0;
3128 }
3129 break;
bd60018a 3130 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3131 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3133 deemph_reg_value = 43;
3134 margin_reg_value = 154;
3135 break;
3136 default:
3137 return 0;
3138 }
3139 break;
3140 default:
3141 return 0;
3142 }
3143
3144 mutex_lock(&dev_priv->dpio_lock);
3145
3146 /* Clear calc init */
1966e59e
VS
3147 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3148 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3149 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3150 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3151 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3152
3153 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3154 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3155 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3156 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3157 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3158
a02ef3c7
VS
3159 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3160 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3161 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3162 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3163
3164 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3165 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3166 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3167 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3168
e4a1d846 3169 /* Program swing deemph */
f72df8db
VS
3170 for (i = 0; i < 4; i++) {
3171 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3172 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3173 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3174 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3175 }
e4a1d846
CML
3176
3177 /* Program swing margin */
f72df8db
VS
3178 for (i = 0; i < 4; i++) {
3179 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3180 val &= ~DPIO_SWING_MARGIN000_MASK;
3181 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3182 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3183 }
e4a1d846
CML
3184
3185 /* Disable unique transition scale */
f72df8db
VS
3186 for (i = 0; i < 4; i++) {
3187 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3188 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3189 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3190 }
e4a1d846
CML
3191
3192 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3193 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3194 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3195 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3196
3197 /*
3198 * The document said it needs to set bit 27 for ch0 and bit 26
3199 * for ch1. Might be a typo in the doc.
3200 * For now, for this unique transition scale selection, set bit
3201 * 27 for ch0 and ch1.
3202 */
f72df8db
VS
3203 for (i = 0; i < 4; i++) {
3204 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3205 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3206 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3207 }
e4a1d846 3208
f72df8db
VS
3209 for (i = 0; i < 4; i++) {
3210 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3211 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3212 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3213 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3214 }
e4a1d846
CML
3215 }
3216
3217 /* Start swing calculation */
1966e59e
VS
3218 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3219 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3220 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3221
3222 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3223 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3224 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3225
3226 /* LRC Bypass */
3227 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3228 val |= DPIO_LRC_BYPASS;
3229 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3230
3231 mutex_unlock(&dev_priv->dpio_lock);
3232
3233 return 0;
3234}
3235
a4fc5ed6 3236static void
0301b3ac
JN
3237intel_get_adjust_train(struct intel_dp *intel_dp,
3238 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3239{
3240 uint8_t v = 0;
3241 uint8_t p = 0;
3242 int lane;
1a2eb460
KP
3243 uint8_t voltage_max;
3244 uint8_t preemph_max;
a4fc5ed6 3245
33a34e4e 3246 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3247 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3248 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3249
3250 if (this_v > v)
3251 v = this_v;
3252 if (this_p > p)
3253 p = this_p;
3254 }
3255
1a2eb460 3256 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3257 if (v >= voltage_max)
3258 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3259
1a2eb460
KP
3260 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3261 if (p >= preemph_max)
3262 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3263
3264 for (lane = 0; lane < 4; lane++)
33a34e4e 3265 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3266}
3267
3268static uint32_t
f0a3424e 3269intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3270{
3cf2efb1 3271 uint32_t signal_levels = 0;
a4fc5ed6 3272
3cf2efb1 3273 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3275 default:
3276 signal_levels |= DP_VOLTAGE_0_4;
3277 break;
bd60018a 3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3279 signal_levels |= DP_VOLTAGE_0_6;
3280 break;
bd60018a 3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3282 signal_levels |= DP_VOLTAGE_0_8;
3283 break;
bd60018a 3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3285 signal_levels |= DP_VOLTAGE_1_2;
3286 break;
3287 }
3cf2efb1 3288 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3289 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3290 default:
3291 signal_levels |= DP_PRE_EMPHASIS_0;
3292 break;
bd60018a 3293 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3294 signal_levels |= DP_PRE_EMPHASIS_3_5;
3295 break;
bd60018a 3296 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3297 signal_levels |= DP_PRE_EMPHASIS_6;
3298 break;
bd60018a 3299 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3300 signal_levels |= DP_PRE_EMPHASIS_9_5;
3301 break;
3302 }
3303 return signal_levels;
3304}
3305
e3421a18
ZW
3306/* Gen6's DP voltage swing and pre-emphasis control */
3307static uint32_t
3308intel_gen6_edp_signal_levels(uint8_t train_set)
3309{
3c5a62b5
YL
3310 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3311 DP_TRAIN_PRE_EMPHASIS_MASK);
3312 switch (signal_levels) {
bd60018a
SJ
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3315 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3317 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3320 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3323 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3326 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3327 default:
3c5a62b5
YL
3328 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3329 "0x%x\n", signal_levels);
3330 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3331 }
3332}
3333
1a2eb460
KP
3334/* Gen7's DP voltage swing and pre-emphasis control */
3335static uint32_t
3336intel_gen7_edp_signal_levels(uint8_t train_set)
3337{
3338 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3339 DP_TRAIN_PRE_EMPHASIS_MASK);
3340 switch (signal_levels) {
bd60018a 3341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3342 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3344 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3346 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3347
bd60018a 3348 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3349 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3351 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3352
bd60018a 3353 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3354 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3355 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3356 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3357
3358 default:
3359 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3360 "0x%x\n", signal_levels);
3361 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3362 }
3363}
3364
d6c0d722
PZ
3365/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3366static uint32_t
f0a3424e 3367intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3368{
d6c0d722
PZ
3369 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3370 DP_TRAIN_PRE_EMPHASIS_MASK);
3371 switch (signal_levels) {
bd60018a 3372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3373 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3375 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3377 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3379 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3380
bd60018a 3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3382 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3384 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3386 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3387
bd60018a 3388 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3389 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3391 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3392
3393 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3394 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3395 default:
3396 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3397 "0x%x\n", signal_levels);
c5fe6a06 3398 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3399 }
a4fc5ed6
KP
3400}
3401
f0a3424e
PZ
3402/* Properly updates "DP" with the correct signal levels. */
3403static void
3404intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3405{
3406 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3407 enum port port = intel_dig_port->port;
f0a3424e
PZ
3408 struct drm_device *dev = intel_dig_port->base.base.dev;
3409 uint32_t signal_levels, mask;
3410 uint8_t train_set = intel_dp->train_set[0];
3411
5a9d1f1a 3412 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3413 signal_levels = intel_hsw_signal_levels(train_set);
3414 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3415 } else if (IS_CHERRYVIEW(dev)) {
3416 signal_levels = intel_chv_signal_levels(intel_dp);
3417 mask = 0;
e2fa6fba
P
3418 } else if (IS_VALLEYVIEW(dev)) {
3419 signal_levels = intel_vlv_signal_levels(intel_dp);
3420 mask = 0;
bc7d38a4 3421 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3422 signal_levels = intel_gen7_edp_signal_levels(train_set);
3423 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3424 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3425 signal_levels = intel_gen6_edp_signal_levels(train_set);
3426 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3427 } else {
3428 signal_levels = intel_gen4_signal_levels(train_set);
3429 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3430 }
3431
3432 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3433
3434 *DP = (*DP & ~mask) | signal_levels;
3435}
3436
a4fc5ed6 3437static bool
ea5b213a 3438intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3439 uint32_t *DP,
58e10eb9 3440 uint8_t dp_train_pat)
a4fc5ed6 3441{
174edf1f
PZ
3442 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3443 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3444 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3445 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3446 int ret, len;
a4fc5ed6 3447
7b13b58a 3448 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3449
70aff66c 3450 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3451 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3452
2cdfe6c8
JN
3453 buf[0] = dp_train_pat;
3454 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3455 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3456 /* don't write DP_TRAINING_LANEx_SET on disable */
3457 len = 1;
3458 } else {
3459 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3460 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3461 len = intel_dp->lane_count + 1;
47ea7542 3462 }
a4fc5ed6 3463
9d1a1031
JN
3464 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3465 buf, len);
2cdfe6c8
JN
3466
3467 return ret == len;
a4fc5ed6
KP
3468}
3469
70aff66c
JN
3470static bool
3471intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3472 uint8_t dp_train_pat)
3473{
953d22e8 3474 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3475 intel_dp_set_signal_levels(intel_dp, DP);
3476 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3477}
3478
3479static bool
3480intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3481 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3482{
3483 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3484 struct drm_device *dev = intel_dig_port->base.base.dev;
3485 struct drm_i915_private *dev_priv = dev->dev_private;
3486 int ret;
3487
3488 intel_get_adjust_train(intel_dp, link_status);
3489 intel_dp_set_signal_levels(intel_dp, DP);
3490
3491 I915_WRITE(intel_dp->output_reg, *DP);
3492 POSTING_READ(intel_dp->output_reg);
3493
9d1a1031
JN
3494 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3495 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3496
3497 return ret == intel_dp->lane_count;
3498}
3499
3ab9c637
ID
3500static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3501{
3502 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3503 struct drm_device *dev = intel_dig_port->base.base.dev;
3504 struct drm_i915_private *dev_priv = dev->dev_private;
3505 enum port port = intel_dig_port->port;
3506 uint32_t val;
3507
3508 if (!HAS_DDI(dev))
3509 return;
3510
3511 val = I915_READ(DP_TP_CTL(port));
3512 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3513 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3514 I915_WRITE(DP_TP_CTL(port), val);
3515
3516 /*
3517 * On PORT_A we can have only eDP in SST mode. There the only reason
3518 * we need to set idle transmission mode is to work around a HW issue
3519 * where we enable the pipe while not in idle link-training mode.
3520 * In this case there is requirement to wait for a minimum number of
3521 * idle patterns to be sent.
3522 */
3523 if (port == PORT_A)
3524 return;
3525
3526 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3527 1))
3528 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3529}
3530
33a34e4e 3531/* Enable corresponding port and start training pattern 1 */
c19b0669 3532void
33a34e4e 3533intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3534{
da63a9f2 3535 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3536 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3537 int i;
3538 uint8_t voltage;
cdb0e95b 3539 int voltage_tries, loop_tries;
ea5b213a 3540 uint32_t DP = intel_dp->DP;
6aba5b6c 3541 uint8_t link_config[2];
a4fc5ed6 3542
affa9354 3543 if (HAS_DDI(dev))
c19b0669
PZ
3544 intel_ddi_prepare_link_retrain(encoder);
3545
3cf2efb1 3546 /* Write the link configuration data */
6aba5b6c
JN
3547 link_config[0] = intel_dp->link_bw;
3548 link_config[1] = intel_dp->lane_count;
3549 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3550 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3551 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3552 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3553 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3554 &intel_dp->rate_select, 1);
6aba5b6c
JN
3555
3556 link_config[0] = 0;
3557 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3558 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3559
3560 DP |= DP_PORT_EN;
1a2eb460 3561
70aff66c
JN
3562 /* clock recovery */
3563 if (!intel_dp_reset_link_train(intel_dp, &DP,
3564 DP_TRAINING_PATTERN_1 |
3565 DP_LINK_SCRAMBLING_DISABLE)) {
3566 DRM_ERROR("failed to enable link training\n");
3567 return;
3568 }
3569
a4fc5ed6 3570 voltage = 0xff;
cdb0e95b
KP
3571 voltage_tries = 0;
3572 loop_tries = 0;
a4fc5ed6 3573 for (;;) {
70aff66c 3574 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3575
a7c9655f 3576 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3577 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3578 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3579 break;
93f62dad 3580 }
a4fc5ed6 3581
01916270 3582 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3583 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3584 break;
3585 }
3586
3587 /* Check to see if we've tried the max voltage */
3588 for (i = 0; i < intel_dp->lane_count; i++)
3589 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3590 break;
3b4f819d 3591 if (i == intel_dp->lane_count) {
b06fbda3
DV
3592 ++loop_tries;
3593 if (loop_tries == 5) {
3def84b3 3594 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3595 break;
3596 }
70aff66c
JN
3597 intel_dp_reset_link_train(intel_dp, &DP,
3598 DP_TRAINING_PATTERN_1 |
3599 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3600 voltage_tries = 0;
3601 continue;
3602 }
a4fc5ed6 3603
3cf2efb1 3604 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3605 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3606 ++voltage_tries;
b06fbda3 3607 if (voltage_tries == 5) {
3def84b3 3608 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3609 break;
3610 }
3611 } else
3612 voltage_tries = 0;
3613 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3614
70aff66c
JN
3615 /* Update training set as requested by target */
3616 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3617 DRM_ERROR("failed to update link training\n");
3618 break;
3619 }
a4fc5ed6
KP
3620 }
3621
33a34e4e
JB
3622 intel_dp->DP = DP;
3623}
3624
c19b0669 3625void
33a34e4e
JB
3626intel_dp_complete_link_train(struct intel_dp *intel_dp)
3627{
33a34e4e 3628 bool channel_eq = false;
37f80975 3629 int tries, cr_tries;
33a34e4e 3630 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3631 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3632
3633 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3634 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3635 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3636
a4fc5ed6 3637 /* channel equalization */
70aff66c 3638 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3639 training_pattern |
70aff66c
JN
3640 DP_LINK_SCRAMBLING_DISABLE)) {
3641 DRM_ERROR("failed to start channel equalization\n");
3642 return;
3643 }
3644
a4fc5ed6 3645 tries = 0;
37f80975 3646 cr_tries = 0;
a4fc5ed6
KP
3647 channel_eq = false;
3648 for (;;) {
70aff66c 3649 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3650
37f80975
JB
3651 if (cr_tries > 5) {
3652 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3653 break;
3654 }
3655
a7c9655f 3656 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3657 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3658 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3659 break;
70aff66c 3660 }
a4fc5ed6 3661
37f80975 3662 /* Make sure clock is still ok */
01916270 3663 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3664 intel_dp_start_link_train(intel_dp);
70aff66c 3665 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3666 training_pattern |
70aff66c 3667 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3668 cr_tries++;
3669 continue;
3670 }
3671
1ffdff13 3672 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3673 channel_eq = true;
3674 break;
3675 }
a4fc5ed6 3676
37f80975
JB
3677 /* Try 5 times, then try clock recovery if that fails */
3678 if (tries > 5) {
37f80975 3679 intel_dp_start_link_train(intel_dp);
70aff66c 3680 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3681 training_pattern |
70aff66c 3682 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3683 tries = 0;
3684 cr_tries++;
3685 continue;
3686 }
a4fc5ed6 3687
70aff66c
JN
3688 /* Update training set as requested by target */
3689 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3690 DRM_ERROR("failed to update link training\n");
3691 break;
3692 }
3cf2efb1 3693 ++tries;
869184a6 3694 }
3cf2efb1 3695
3ab9c637
ID
3696 intel_dp_set_idle_link_train(intel_dp);
3697
3698 intel_dp->DP = DP;
3699
d6c0d722 3700 if (channel_eq)
07f42258 3701 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3702
3ab9c637
ID
3703}
3704
3705void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3706{
70aff66c 3707 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3708 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3709}
3710
3711static void
ea5b213a 3712intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3713{
da63a9f2 3714 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3715 enum port port = intel_dig_port->port;
da63a9f2 3716 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3717 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3718 uint32_t DP = intel_dp->DP;
a4fc5ed6 3719
bc76e320 3720 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3721 return;
3722
0c33d8d7 3723 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3724 return;
3725
28c97730 3726 DRM_DEBUG_KMS("\n");
32f9d658 3727
bc7d38a4 3728 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3729 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3730 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3731 } else {
aad3d14d
VS
3732 if (IS_CHERRYVIEW(dev))
3733 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3734 else
3735 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3736 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3737 }
fe255d00 3738 POSTING_READ(intel_dp->output_reg);
5eb08b69 3739
493a7081 3740 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3741 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3742 /* Hardware workaround: leaving our transcoder select
3743 * set to transcoder B while it's off will prevent the
3744 * corresponding HDMI output on transcoder A.
3745 *
3746 * Combine this with another hardware workaround:
3747 * transcoder select bit can only be cleared while the
3748 * port is enabled.
3749 */
3750 DP &= ~DP_PIPEB_SELECT;
3751 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3752 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3753 }
3754
832afda6 3755 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3756 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3757 POSTING_READ(intel_dp->output_reg);
f01eca2e 3758 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3759}
3760
26d61aad
KP
3761static bool
3762intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3763{
a031d709
RV
3764 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3765 struct drm_device *dev = dig_port->base.base.dev;
3766 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3767 uint8_t rev;
a031d709 3768
9d1a1031
JN
3769 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3770 sizeof(intel_dp->dpcd)) < 0)
edb39244 3771 return false; /* aux transfer failed */
92fd8fd1 3772
a8e98153 3773 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3774
edb39244
AJ
3775 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3776 return false; /* DPCD not present */
3777
2293bb5c
SK
3778 /* Check if the panel supports PSR */
3779 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3780 if (is_edp(intel_dp)) {
9d1a1031
JN
3781 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3782 intel_dp->psr_dpcd,
3783 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3784 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3785 dev_priv->psr.sink_support = true;
50003939 3786 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3787 }
474d1ec4
SJ
3788
3789 if (INTEL_INFO(dev)->gen >= 9 &&
3790 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3791 uint8_t frame_sync_cap;
3792
3793 dev_priv->psr.sink_support = true;
3794 intel_dp_dpcd_read_wake(&intel_dp->aux,
3795 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3796 &frame_sync_cap, 1);
3797 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3798 /* PSR2 needs frame sync as well */
3799 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3800 DRM_DEBUG_KMS("PSR2 %s on sink",
3801 dev_priv->psr.psr2_support ? "supported" : "not supported");
3802 }
50003939
JN
3803 }
3804
7809a611 3805 /* Training Pattern 3 support, both source and sink */
06ea66b6 3806 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3807 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3808 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3809 intel_dp->use_tps3 = true;
f8d8a672 3810 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3811 } else
3812 intel_dp->use_tps3 = false;
3813
fc0f8e25
SJ
3814 /* Intermediate frequency support */
3815 if (is_edp(intel_dp) &&
3816 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3817 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3818 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3819 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3820 int i;
3821
fc0f8e25
SJ
3822 intel_dp_dpcd_read_wake(&intel_dp->aux,
3823 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3824 sink_rates,
3825 sizeof(sink_rates));
ea2d8a42 3826
94ca719e
VS
3827 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3828 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3829
3830 if (val == 0)
3831 break;
3832
94ca719e 3833 intel_dp->sink_rates[i] = val * 200;
ea2d8a42 3834 }
94ca719e 3835 intel_dp->num_sink_rates = i;
fc0f8e25 3836 }
0336400e
VS
3837
3838 intel_dp_print_rates(intel_dp);
3839
edb39244
AJ
3840 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3841 DP_DWN_STRM_PORT_PRESENT))
3842 return true; /* native DP sink */
3843
3844 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3845 return true; /* no per-port downstream info */
3846
9d1a1031
JN
3847 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3848 intel_dp->downstream_ports,
3849 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3850 return false; /* downstream port status fetch failed */
3851
3852 return true;
92fd8fd1
KP
3853}
3854
0d198328
AJ
3855static void
3856intel_dp_probe_oui(struct intel_dp *intel_dp)
3857{
3858 u8 buf[3];
3859
3860 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3861 return;
3862
9d1a1031 3863 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3864 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3865 buf[0], buf[1], buf[2]);
3866
9d1a1031 3867 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3868 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3869 buf[0], buf[1], buf[2]);
3870}
3871
0e32b39c
DA
3872static bool
3873intel_dp_probe_mst(struct intel_dp *intel_dp)
3874{
3875 u8 buf[1];
3876
3877 if (!intel_dp->can_mst)
3878 return false;
3879
3880 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3881 return false;
3882
0e32b39c
DA
3883 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3884 if (buf[0] & DP_MST_CAP) {
3885 DRM_DEBUG_KMS("Sink is MST capable\n");
3886 intel_dp->is_mst = true;
3887 } else {
3888 DRM_DEBUG_KMS("Sink is not MST capable\n");
3889 intel_dp->is_mst = false;
3890 }
3891 }
0e32b39c
DA
3892
3893 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3894 return intel_dp->is_mst;
3895}
3896
d2e216d0
RV
3897int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3898{
3899 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3900 struct drm_device *dev = intel_dig_port->base.base.dev;
3901 struct intel_crtc *intel_crtc =
3902 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3903 u8 buf;
3904 int test_crc_count;
3905 int attempts = 6;
d2e216d0 3906
ad9dc91b 3907 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3908 return -EIO;
d2e216d0 3909
ad9dc91b 3910 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3911 return -ENOTTY;
3912
1dda5f93
RV
3913 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3914 return -EIO;
3915
9d1a1031 3916 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3917 buf | DP_TEST_SINK_START) < 0)
bda0381e 3918 return -EIO;
d2e216d0 3919
1dda5f93 3920 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3921 return -EIO;
ad9dc91b 3922 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3923
ad9dc91b 3924 do {
1dda5f93
RV
3925 if (drm_dp_dpcd_readb(&intel_dp->aux,
3926 DP_TEST_SINK_MISC, &buf) < 0)
3927 return -EIO;
ad9dc91b
RV
3928 intel_wait_for_vblank(dev, intel_crtc->pipe);
3929 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3930
3931 if (attempts == 0) {
90bd1f46
DV
3932 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3933 return -ETIMEDOUT;
ad9dc91b 3934 }
d2e216d0 3935
9d1a1031 3936 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3937 return -EIO;
d2e216d0 3938
1dda5f93
RV
3939 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3940 return -EIO;
3941 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3942 buf & ~DP_TEST_SINK_START) < 0)
3943 return -EIO;
ce31d9f4 3944
d2e216d0
RV
3945 return 0;
3946}
3947
a60f0e38
JB
3948static bool
3949intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3950{
9d1a1031
JN
3951 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3952 DP_DEVICE_SERVICE_IRQ_VECTOR,
3953 sink_irq_vector, 1) == 1;
a60f0e38
JB
3954}
3955
0e32b39c
DA
3956static bool
3957intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3958{
3959 int ret;
3960
3961 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3962 DP_SINK_COUNT_ESI,
3963 sink_irq_vector, 14);
3964 if (ret != 14)
3965 return false;
3966
3967 return true;
3968}
3969
a60f0e38
JB
3970static void
3971intel_dp_handle_test_request(struct intel_dp *intel_dp)
3972{
3973 /* NAK by default */
9d1a1031 3974 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3975}
3976
0e32b39c
DA
3977static int
3978intel_dp_check_mst_status(struct intel_dp *intel_dp)
3979{
3980 bool bret;
3981
3982 if (intel_dp->is_mst) {
3983 u8 esi[16] = { 0 };
3984 int ret = 0;
3985 int retry;
3986 bool handled;
3987 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3988go_again:
3989 if (bret == true) {
3990
3991 /* check link status - esi[10] = 0x200c */
3992 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3993 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3994 intel_dp_start_link_train(intel_dp);
3995 intel_dp_complete_link_train(intel_dp);
3996 intel_dp_stop_link_train(intel_dp);
3997 }
3998
6f34cc39 3999 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4000 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4001
4002 if (handled) {
4003 for (retry = 0; retry < 3; retry++) {
4004 int wret;
4005 wret = drm_dp_dpcd_write(&intel_dp->aux,
4006 DP_SINK_COUNT_ESI+1,
4007 &esi[1], 3);
4008 if (wret == 3) {
4009 break;
4010 }
4011 }
4012
4013 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4014 if (bret == true) {
6f34cc39 4015 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4016 goto go_again;
4017 }
4018 } else
4019 ret = 0;
4020
4021 return ret;
4022 } else {
4023 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4024 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4025 intel_dp->is_mst = false;
4026 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4027 /* send a hotplug event */
4028 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4029 }
4030 }
4031 return -EINVAL;
4032}
4033
a4fc5ed6
KP
4034/*
4035 * According to DP spec
4036 * 5.1.2:
4037 * 1. Read DPCD
4038 * 2. Configure link according to Receiver Capabilities
4039 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4040 * 4. Check link status on receipt of hot-plug interrupt
4041 */
a5146200 4042static void
ea5b213a 4043intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4044{
5b215bcf 4045 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4046 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4047 u8 sink_irq_vector;
93f62dad 4048 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4049
5b215bcf
DA
4050 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4051
da63a9f2 4052 if (!intel_encoder->connectors_active)
d2b996ac 4053 return;
59cd09e1 4054
da63a9f2 4055 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4056 return;
4057
1a125d8a
ID
4058 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4059 return;
4060
92fd8fd1 4061 /* Try to read receiver status if the link appears to be up */
93f62dad 4062 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4063 return;
4064 }
4065
92fd8fd1 4066 /* Now read the DPCD to see if it's actually running */
26d61aad 4067 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4068 return;
4069 }
4070
a60f0e38
JB
4071 /* Try to read the source of the interrupt */
4072 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4073 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4074 /* Clear interrupt source */
9d1a1031
JN
4075 drm_dp_dpcd_writeb(&intel_dp->aux,
4076 DP_DEVICE_SERVICE_IRQ_VECTOR,
4077 sink_irq_vector);
a60f0e38
JB
4078
4079 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4080 intel_dp_handle_test_request(intel_dp);
4081 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4082 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4083 }
4084
1ffdff13 4085 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4086 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4087 intel_encoder->base.name);
33a34e4e
JB
4088 intel_dp_start_link_train(intel_dp);
4089 intel_dp_complete_link_train(intel_dp);
3ab9c637 4090 intel_dp_stop_link_train(intel_dp);
33a34e4e 4091 }
a4fc5ed6 4092}
a4fc5ed6 4093
caf9ab24 4094/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4095static enum drm_connector_status
26d61aad 4096intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4097{
caf9ab24 4098 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4099 uint8_t type;
4100
4101 if (!intel_dp_get_dpcd(intel_dp))
4102 return connector_status_disconnected;
4103
4104 /* if there's no downstream port, we're done */
4105 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4106 return connector_status_connected;
caf9ab24
AJ
4107
4108 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4109 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4110 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4111 uint8_t reg;
9d1a1031
JN
4112
4113 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4114 &reg, 1) < 0)
caf9ab24 4115 return connector_status_unknown;
9d1a1031 4116
23235177
AJ
4117 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4118 : connector_status_disconnected;
caf9ab24
AJ
4119 }
4120
4121 /* If no HPD, poke DDC gently */
0b99836f 4122 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4123 return connector_status_connected;
caf9ab24
AJ
4124
4125 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4126 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4127 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4128 if (type == DP_DS_PORT_TYPE_VGA ||
4129 type == DP_DS_PORT_TYPE_NON_EDID)
4130 return connector_status_unknown;
4131 } else {
4132 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4133 DP_DWN_STRM_PORT_TYPE_MASK;
4134 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4135 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4136 return connector_status_unknown;
4137 }
caf9ab24
AJ
4138
4139 /* Anything else is out of spec, warn and ignore */
4140 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4141 return connector_status_disconnected;
71ba9000
AJ
4142}
4143
d410b56d
CW
4144static enum drm_connector_status
4145edp_detect(struct intel_dp *intel_dp)
4146{
4147 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4148 enum drm_connector_status status;
4149
4150 status = intel_panel_detect(dev);
4151 if (status == connector_status_unknown)
4152 status = connector_status_connected;
4153
4154 return status;
4155}
4156
5eb08b69 4157static enum drm_connector_status
a9756bb5 4158ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4159{
30add22d 4160 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4161 struct drm_i915_private *dev_priv = dev->dev_private;
4162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4163
1b469639
DL
4164 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4165 return connector_status_disconnected;
4166
26d61aad 4167 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4168}
4169
2a592bec
DA
4170static int g4x_digital_port_connected(struct drm_device *dev,
4171 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4172{
a4fc5ed6 4173 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4174 uint32_t bit;
5eb08b69 4175
232a6ee9
TP
4176 if (IS_VALLEYVIEW(dev)) {
4177 switch (intel_dig_port->port) {
4178 case PORT_B:
4179 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4180 break;
4181 case PORT_C:
4182 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4183 break;
4184 case PORT_D:
4185 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4186 break;
4187 default:
2a592bec 4188 return -EINVAL;
232a6ee9
TP
4189 }
4190 } else {
4191 switch (intel_dig_port->port) {
4192 case PORT_B:
4193 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4194 break;
4195 case PORT_C:
4196 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4197 break;
4198 case PORT_D:
4199 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4200 break;
4201 default:
2a592bec 4202 return -EINVAL;
232a6ee9 4203 }
a4fc5ed6
KP
4204 }
4205
10f76a38 4206 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4207 return 0;
4208 return 1;
4209}
4210
4211static enum drm_connector_status
4212g4x_dp_detect(struct intel_dp *intel_dp)
4213{
4214 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4215 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4216 int ret;
4217
4218 /* Can't disconnect eDP, but you can close the lid... */
4219 if (is_edp(intel_dp)) {
4220 enum drm_connector_status status;
4221
4222 status = intel_panel_detect(dev);
4223 if (status == connector_status_unknown)
4224 status = connector_status_connected;
4225 return status;
4226 }
4227
4228 ret = g4x_digital_port_connected(dev, intel_dig_port);
4229 if (ret == -EINVAL)
4230 return connector_status_unknown;
4231 else if (ret == 0)
a4fc5ed6
KP
4232 return connector_status_disconnected;
4233
26d61aad 4234 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4235}
4236
8c241fef 4237static struct edid *
beb60608 4238intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4239{
beb60608 4240 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4241
9cd300e0
JN
4242 /* use cached edid if we have one */
4243 if (intel_connector->edid) {
9cd300e0
JN
4244 /* invalid edid */
4245 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4246 return NULL;
4247
55e9edeb 4248 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4249 } else
4250 return drm_get_edid(&intel_connector->base,
4251 &intel_dp->aux.ddc);
4252}
8c241fef 4253
beb60608
CW
4254static void
4255intel_dp_set_edid(struct intel_dp *intel_dp)
4256{
4257 struct intel_connector *intel_connector = intel_dp->attached_connector;
4258 struct edid *edid;
8c241fef 4259
beb60608
CW
4260 edid = intel_dp_get_edid(intel_dp);
4261 intel_connector->detect_edid = edid;
4262
4263 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4264 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4265 else
4266 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4267}
4268
beb60608
CW
4269static void
4270intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4271{
beb60608 4272 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4273
beb60608
CW
4274 kfree(intel_connector->detect_edid);
4275 intel_connector->detect_edid = NULL;
9cd300e0 4276
beb60608
CW
4277 intel_dp->has_audio = false;
4278}
d6f24d0f 4279
beb60608
CW
4280static enum intel_display_power_domain
4281intel_dp_power_get(struct intel_dp *dp)
4282{
4283 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4284 enum intel_display_power_domain power_domain;
4285
4286 power_domain = intel_display_port_power_domain(encoder);
4287 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4288
4289 return power_domain;
4290}
d6f24d0f 4291
beb60608
CW
4292static void
4293intel_dp_power_put(struct intel_dp *dp,
4294 enum intel_display_power_domain power_domain)
4295{
4296 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4297 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4298}
4299
a9756bb5
ZW
4300static enum drm_connector_status
4301intel_dp_detect(struct drm_connector *connector, bool force)
4302{
4303 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4304 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4305 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4306 struct drm_device *dev = connector->dev;
a9756bb5 4307 enum drm_connector_status status;
671dedd2 4308 enum intel_display_power_domain power_domain;
0e32b39c 4309 bool ret;
a9756bb5 4310
164c8598 4311 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4312 connector->base.id, connector->name);
beb60608 4313 intel_dp_unset_edid(intel_dp);
164c8598 4314
0e32b39c
DA
4315 if (intel_dp->is_mst) {
4316 /* MST devices are disconnected from a monitor POV */
4317 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4318 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4319 return connector_status_disconnected;
0e32b39c
DA
4320 }
4321
beb60608 4322 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4323
d410b56d
CW
4324 /* Can't disconnect eDP, but you can close the lid... */
4325 if (is_edp(intel_dp))
4326 status = edp_detect(intel_dp);
4327 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4328 status = ironlake_dp_detect(intel_dp);
4329 else
4330 status = g4x_dp_detect(intel_dp);
4331 if (status != connector_status_connected)
c8c8fb33 4332 goto out;
a9756bb5 4333
0d198328
AJ
4334 intel_dp_probe_oui(intel_dp);
4335
0e32b39c
DA
4336 ret = intel_dp_probe_mst(intel_dp);
4337 if (ret) {
4338 /* if we are in MST mode then this connector
4339 won't appear connected or have anything with EDID on it */
4340 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4341 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4342 status = connector_status_disconnected;
4343 goto out;
4344 }
4345
beb60608 4346 intel_dp_set_edid(intel_dp);
a9756bb5 4347
d63885da
PZ
4348 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4349 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4350 status = connector_status_connected;
4351
4352out:
beb60608 4353 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4354 return status;
a4fc5ed6
KP
4355}
4356
beb60608
CW
4357static void
4358intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4359{
df0e9248 4360 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4361 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4362 enum intel_display_power_domain power_domain;
a4fc5ed6 4363
beb60608
CW
4364 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4365 connector->base.id, connector->name);
4366 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4367
beb60608
CW
4368 if (connector->status != connector_status_connected)
4369 return;
671dedd2 4370
beb60608
CW
4371 power_domain = intel_dp_power_get(intel_dp);
4372
4373 intel_dp_set_edid(intel_dp);
4374
4375 intel_dp_power_put(intel_dp, power_domain);
4376
4377 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4378 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4379}
4380
4381static int intel_dp_get_modes(struct drm_connector *connector)
4382{
4383 struct intel_connector *intel_connector = to_intel_connector(connector);
4384 struct edid *edid;
4385
4386 edid = intel_connector->detect_edid;
4387 if (edid) {
4388 int ret = intel_connector_update_modes(connector, edid);
4389 if (ret)
4390 return ret;
4391 }
32f9d658 4392
f8779fda 4393 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4394 if (is_edp(intel_attached_dp(connector)) &&
4395 intel_connector->panel.fixed_mode) {
f8779fda 4396 struct drm_display_mode *mode;
beb60608
CW
4397
4398 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4399 intel_connector->panel.fixed_mode);
f8779fda 4400 if (mode) {
32f9d658
ZW
4401 drm_mode_probed_add(connector, mode);
4402 return 1;
4403 }
4404 }
beb60608 4405
32f9d658 4406 return 0;
a4fc5ed6
KP
4407}
4408
1aad7ac0
CW
4409static bool
4410intel_dp_detect_audio(struct drm_connector *connector)
4411{
1aad7ac0 4412 bool has_audio = false;
beb60608 4413 struct edid *edid;
1aad7ac0 4414
beb60608
CW
4415 edid = to_intel_connector(connector)->detect_edid;
4416 if (edid)
1aad7ac0 4417 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4418
1aad7ac0
CW
4419 return has_audio;
4420}
4421
f684960e
CW
4422static int
4423intel_dp_set_property(struct drm_connector *connector,
4424 struct drm_property *property,
4425 uint64_t val)
4426{
e953fd7b 4427 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4428 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4429 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4430 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4431 int ret;
4432
662595df 4433 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4434 if (ret)
4435 return ret;
4436
3f43c48d 4437 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4438 int i = val;
4439 bool has_audio;
4440
4441 if (i == intel_dp->force_audio)
f684960e
CW
4442 return 0;
4443
1aad7ac0 4444 intel_dp->force_audio = i;
f684960e 4445
c3e5f67b 4446 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4447 has_audio = intel_dp_detect_audio(connector);
4448 else
c3e5f67b 4449 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4450
4451 if (has_audio == intel_dp->has_audio)
f684960e
CW
4452 return 0;
4453
1aad7ac0 4454 intel_dp->has_audio = has_audio;
f684960e
CW
4455 goto done;
4456 }
4457
e953fd7b 4458 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4459 bool old_auto = intel_dp->color_range_auto;
4460 uint32_t old_range = intel_dp->color_range;
4461
55bc60db
VS
4462 switch (val) {
4463 case INTEL_BROADCAST_RGB_AUTO:
4464 intel_dp->color_range_auto = true;
4465 break;
4466 case INTEL_BROADCAST_RGB_FULL:
4467 intel_dp->color_range_auto = false;
4468 intel_dp->color_range = 0;
4469 break;
4470 case INTEL_BROADCAST_RGB_LIMITED:
4471 intel_dp->color_range_auto = false;
4472 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4473 break;
4474 default:
4475 return -EINVAL;
4476 }
ae4edb80
DV
4477
4478 if (old_auto == intel_dp->color_range_auto &&
4479 old_range == intel_dp->color_range)
4480 return 0;
4481
e953fd7b
CW
4482 goto done;
4483 }
4484
53b41837
YN
4485 if (is_edp(intel_dp) &&
4486 property == connector->dev->mode_config.scaling_mode_property) {
4487 if (val == DRM_MODE_SCALE_NONE) {
4488 DRM_DEBUG_KMS("no scaling not supported\n");
4489 return -EINVAL;
4490 }
4491
4492 if (intel_connector->panel.fitting_mode == val) {
4493 /* the eDP scaling property is not changed */
4494 return 0;
4495 }
4496 intel_connector->panel.fitting_mode = val;
4497
4498 goto done;
4499 }
4500
f684960e
CW
4501 return -EINVAL;
4502
4503done:
c0c36b94
CW
4504 if (intel_encoder->base.crtc)
4505 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4506
4507 return 0;
4508}
4509
a4fc5ed6 4510static void
73845adf 4511intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4512{
1d508706 4513 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4514
10e972d3 4515 kfree(intel_connector->detect_edid);
beb60608 4516
9cd300e0
JN
4517 if (!IS_ERR_OR_NULL(intel_connector->edid))
4518 kfree(intel_connector->edid);
4519
acd8db10
PZ
4520 /* Can't call is_edp() since the encoder may have been destroyed
4521 * already. */
4522 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4523 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4524
a4fc5ed6 4525 drm_connector_cleanup(connector);
55f78c43 4526 kfree(connector);
a4fc5ed6
KP
4527}
4528
00c09d70 4529void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4530{
da63a9f2
PZ
4531 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4532 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4533
4f71d0cb 4534 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4535 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4536 if (is_edp(intel_dp)) {
4537 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4538 /*
4539 * vdd might still be enabled do to the delayed vdd off.
4540 * Make sure vdd is actually turned off here.
4541 */
773538e8 4542 pps_lock(intel_dp);
4be73780 4543 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4544 pps_unlock(intel_dp);
4545
01527b31
CT
4546 if (intel_dp->edp_notifier.notifier_call) {
4547 unregister_reboot_notifier(&intel_dp->edp_notifier);
4548 intel_dp->edp_notifier.notifier_call = NULL;
4549 }
bd943159 4550 }
c8bd0e49 4551 drm_encoder_cleanup(encoder);
da63a9f2 4552 kfree(intel_dig_port);
24d05927
DV
4553}
4554
07f9cd0b
ID
4555static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4556{
4557 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4558
4559 if (!is_edp(intel_dp))
4560 return;
4561
951468f3
VS
4562 /*
4563 * vdd might still be enabled do to the delayed vdd off.
4564 * Make sure vdd is actually turned off here.
4565 */
afa4e53a 4566 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4567 pps_lock(intel_dp);
07f9cd0b 4568 edp_panel_vdd_off_sync(intel_dp);
773538e8 4569 pps_unlock(intel_dp);
07f9cd0b
ID
4570}
4571
49e6bc51
VS
4572static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4573{
4574 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4575 struct drm_device *dev = intel_dig_port->base.base.dev;
4576 struct drm_i915_private *dev_priv = dev->dev_private;
4577 enum intel_display_power_domain power_domain;
4578
4579 lockdep_assert_held(&dev_priv->pps_mutex);
4580
4581 if (!edp_have_panel_vdd(intel_dp))
4582 return;
4583
4584 /*
4585 * The VDD bit needs a power domain reference, so if the bit is
4586 * already enabled when we boot or resume, grab this reference and
4587 * schedule a vdd off, so we don't hold on to the reference
4588 * indefinitely.
4589 */
4590 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4591 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4592 intel_display_power_get(dev_priv, power_domain);
4593
4594 edp_panel_vdd_schedule_off(intel_dp);
4595}
4596
6d93c0c4
ID
4597static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4598{
49e6bc51
VS
4599 struct intel_dp *intel_dp;
4600
4601 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4602 return;
4603
4604 intel_dp = enc_to_intel_dp(encoder);
4605
4606 pps_lock(intel_dp);
4607
4608 /*
4609 * Read out the current power sequencer assignment,
4610 * in case the BIOS did something with it.
4611 */
4612 if (IS_VALLEYVIEW(encoder->dev))
4613 vlv_initial_power_sequencer_setup(intel_dp);
4614
4615 intel_edp_panel_vdd_sanitize(intel_dp);
4616
4617 pps_unlock(intel_dp);
6d93c0c4
ID
4618}
4619
a4fc5ed6 4620static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4621 .dpms = intel_connector_dpms,
a4fc5ed6 4622 .detect = intel_dp_detect,
beb60608 4623 .force = intel_dp_force,
a4fc5ed6 4624 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4625 .set_property = intel_dp_set_property,
2545e4a6 4626 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4627 .destroy = intel_dp_connector_destroy,
c6f95f27 4628 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4629 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4630};
4631
4632static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4633 .get_modes = intel_dp_get_modes,
4634 .mode_valid = intel_dp_mode_valid,
df0e9248 4635 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4636};
4637
a4fc5ed6 4638static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4639 .reset = intel_dp_encoder_reset,
24d05927 4640 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4641};
4642
0e32b39c 4643void
21d40d37 4644intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4645{
0e32b39c 4646 return;
c8110e52 4647}
6207937d 4648
b2c5c181 4649enum irqreturn
13cf5504
DA
4650intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4651{
4652 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4653 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4654 struct drm_device *dev = intel_dig_port->base.base.dev;
4655 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4656 enum intel_display_power_domain power_domain;
b2c5c181 4657 enum irqreturn ret = IRQ_NONE;
1c767b33 4658
0e32b39c
DA
4659 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4660 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4661
7a7f84cc
VS
4662 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4663 /*
4664 * vdd off can generate a long pulse on eDP which
4665 * would require vdd on to handle it, and thus we
4666 * would end up in an endless cycle of
4667 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4668 */
4669 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4670 port_name(intel_dig_port->port));
a8b3d52f 4671 return IRQ_HANDLED;
7a7f84cc
VS
4672 }
4673
26fbb774
VS
4674 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4675 port_name(intel_dig_port->port),
0e32b39c 4676 long_hpd ? "long" : "short");
13cf5504 4677
1c767b33
ID
4678 power_domain = intel_display_port_power_domain(intel_encoder);
4679 intel_display_power_get(dev_priv, power_domain);
4680
0e32b39c 4681 if (long_hpd) {
2a592bec
DA
4682
4683 if (HAS_PCH_SPLIT(dev)) {
4684 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4685 goto mst_fail;
4686 } else {
4687 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4688 goto mst_fail;
4689 }
0e32b39c
DA
4690
4691 if (!intel_dp_get_dpcd(intel_dp)) {
4692 goto mst_fail;
4693 }
4694
4695 intel_dp_probe_oui(intel_dp);
4696
4697 if (!intel_dp_probe_mst(intel_dp))
4698 goto mst_fail;
4699
4700 } else {
4701 if (intel_dp->is_mst) {
1c767b33 4702 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4703 goto mst_fail;
4704 }
4705
4706 if (!intel_dp->is_mst) {
4707 /*
4708 * we'll check the link status via the normal hot plug path later -
4709 * but for short hpds we should check it now
4710 */
5b215bcf 4711 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4712 intel_dp_check_link_status(intel_dp);
5b215bcf 4713 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4714 }
4715 }
b2c5c181
DV
4716
4717 ret = IRQ_HANDLED;
4718
1c767b33 4719 goto put_power;
0e32b39c
DA
4720mst_fail:
4721 /* if we were in MST mode, and device is not there get out of MST mode */
4722 if (intel_dp->is_mst) {
4723 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4724 intel_dp->is_mst = false;
4725 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4726 }
1c767b33
ID
4727put_power:
4728 intel_display_power_put(dev_priv, power_domain);
4729
4730 return ret;
13cf5504
DA
4731}
4732
e3421a18
ZW
4733/* Return which DP Port should be selected for Transcoder DP control */
4734int
0206e353 4735intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4736{
4737 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4738 struct intel_encoder *intel_encoder;
4739 struct intel_dp *intel_dp;
e3421a18 4740
fa90ecef
PZ
4741 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4742 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4743
fa90ecef
PZ
4744 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4745 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4746 return intel_dp->output_reg;
e3421a18 4747 }
ea5b213a 4748
e3421a18
ZW
4749 return -1;
4750}
4751
36e83a18 4752/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4753bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4754{
4755 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4756 union child_device_config *p_child;
36e83a18 4757 int i;
5d8a7752
VS
4758 static const short port_mapping[] = {
4759 [PORT_B] = PORT_IDPB,
4760 [PORT_C] = PORT_IDPC,
4761 [PORT_D] = PORT_IDPD,
4762 };
36e83a18 4763
3b32a35b
VS
4764 if (port == PORT_A)
4765 return true;
4766
41aa3448 4767 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4768 return false;
4769
41aa3448
RV
4770 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4771 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4772
5d8a7752 4773 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4774 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4775 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4776 return true;
4777 }
4778 return false;
4779}
4780
0e32b39c 4781void
f684960e
CW
4782intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4783{
53b41837
YN
4784 struct intel_connector *intel_connector = to_intel_connector(connector);
4785
3f43c48d 4786 intel_attach_force_audio_property(connector);
e953fd7b 4787 intel_attach_broadcast_rgb_property(connector);
55bc60db 4788 intel_dp->color_range_auto = true;
53b41837
YN
4789
4790 if (is_edp(intel_dp)) {
4791 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4792 drm_object_attach_property(
4793 &connector->base,
53b41837 4794 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4795 DRM_MODE_SCALE_ASPECT);
4796 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4797 }
f684960e
CW
4798}
4799
dada1a9f
ID
4800static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4801{
4802 intel_dp->last_power_cycle = jiffies;
4803 intel_dp->last_power_on = jiffies;
4804 intel_dp->last_backlight_off = jiffies;
4805}
4806
67a54566
DV
4807static void
4808intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4809 struct intel_dp *intel_dp)
67a54566
DV
4810{
4811 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4812 struct edp_power_seq cur, vbt, spec,
4813 *final = &intel_dp->pps_delays;
67a54566 4814 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4815 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4816
e39b999a
VS
4817 lockdep_assert_held(&dev_priv->pps_mutex);
4818
81ddbc69
VS
4819 /* already initialized? */
4820 if (final->t11_t12 != 0)
4821 return;
4822
453c5420 4823 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4824 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4825 pp_on_reg = PCH_PP_ON_DELAYS;
4826 pp_off_reg = PCH_PP_OFF_DELAYS;
4827 pp_div_reg = PCH_PP_DIVISOR;
4828 } else {
bf13e81b
JN
4829 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4830
4831 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4832 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4833 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4834 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4835 }
67a54566
DV
4836
4837 /* Workaround: Need to write PP_CONTROL with the unlock key as
4838 * the very first thing. */
453c5420 4839 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4840 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4841
453c5420
JB
4842 pp_on = I915_READ(pp_on_reg);
4843 pp_off = I915_READ(pp_off_reg);
4844 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4845
4846 /* Pull timing values out of registers */
4847 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4848 PANEL_POWER_UP_DELAY_SHIFT;
4849
4850 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4851 PANEL_LIGHT_ON_DELAY_SHIFT;
4852
4853 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4854 PANEL_LIGHT_OFF_DELAY_SHIFT;
4855
4856 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4857 PANEL_POWER_DOWN_DELAY_SHIFT;
4858
4859 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4860 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4861
4862 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4863 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4864
41aa3448 4865 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4866
4867 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4868 * our hw here, which are all in 100usec. */
4869 spec.t1_t3 = 210 * 10;
4870 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4871 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4872 spec.t10 = 500 * 10;
4873 /* This one is special and actually in units of 100ms, but zero
4874 * based in the hw (so we need to add 100 ms). But the sw vbt
4875 * table multiplies it with 1000 to make it in units of 100usec,
4876 * too. */
4877 spec.t11_t12 = (510 + 100) * 10;
4878
4879 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4880 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4881
4882 /* Use the max of the register settings and vbt. If both are
4883 * unset, fall back to the spec limits. */
36b5f425 4884#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4885 spec.field : \
4886 max(cur.field, vbt.field))
4887 assign_final(t1_t3);
4888 assign_final(t8);
4889 assign_final(t9);
4890 assign_final(t10);
4891 assign_final(t11_t12);
4892#undef assign_final
4893
36b5f425 4894#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4895 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4896 intel_dp->backlight_on_delay = get_delay(t8);
4897 intel_dp->backlight_off_delay = get_delay(t9);
4898 intel_dp->panel_power_down_delay = get_delay(t10);
4899 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4900#undef get_delay
4901
f30d26e4
JN
4902 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4903 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4904 intel_dp->panel_power_cycle_delay);
4905
4906 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4907 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4908}
4909
4910static void
4911intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4912 struct intel_dp *intel_dp)
f30d26e4
JN
4913{
4914 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4915 u32 pp_on, pp_off, pp_div, port_sel = 0;
4916 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4917 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4918 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4919 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4920
e39b999a 4921 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4922
4923 if (HAS_PCH_SPLIT(dev)) {
4924 pp_on_reg = PCH_PP_ON_DELAYS;
4925 pp_off_reg = PCH_PP_OFF_DELAYS;
4926 pp_div_reg = PCH_PP_DIVISOR;
4927 } else {
bf13e81b
JN
4928 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4929
4930 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4931 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4932 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4933 }
4934
b2f19d1a
PZ
4935 /*
4936 * And finally store the new values in the power sequencer. The
4937 * backlight delays are set to 1 because we do manual waits on them. For
4938 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4939 * we'll end up waiting for the backlight off delay twice: once when we
4940 * do the manual sleep, and once when we disable the panel and wait for
4941 * the PP_STATUS bit to become zero.
4942 */
f30d26e4 4943 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4944 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4945 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4946 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4947 /* Compute the divisor for the pp clock, simply match the Bspec
4948 * formula. */
453c5420 4949 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4950 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4951 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4952
4953 /* Haswell doesn't have any port selection bits for the panel
4954 * power sequencer any more. */
bc7d38a4 4955 if (IS_VALLEYVIEW(dev)) {
ad933b56 4956 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4957 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4958 if (port == PORT_A)
a24c144c 4959 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4960 else
a24c144c 4961 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4962 }
4963
453c5420
JB
4964 pp_on |= port_sel;
4965
4966 I915_WRITE(pp_on_reg, pp_on);
4967 I915_WRITE(pp_off_reg, pp_off);
4968 I915_WRITE(pp_div_reg, pp_div);
67a54566 4969
67a54566 4970 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4971 I915_READ(pp_on_reg),
4972 I915_READ(pp_off_reg),
4973 I915_READ(pp_div_reg));
f684960e
CW
4974}
4975
b33a2815
VK
4976/**
4977 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4978 * @dev: DRM device
4979 * @refresh_rate: RR to be programmed
4980 *
4981 * This function gets called when refresh rate (RR) has to be changed from
4982 * one frequency to another. Switches can be between high and low RR
4983 * supported by the panel or to any other RR based on media playback (in
4984 * this case, RR value needs to be passed from user space).
4985 *
4986 * The caller of this function needs to take a lock on dev_priv->drrs.
4987 */
96178eeb 4988static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4989{
4990 struct drm_i915_private *dev_priv = dev->dev_private;
4991 struct intel_encoder *encoder;
96178eeb
VK
4992 struct intel_digital_port *dig_port = NULL;
4993 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4994 struct intel_crtc_state *config = NULL;
439d7ac0 4995 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4996 u32 reg, val;
96178eeb 4997 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4998
4999 if (refresh_rate <= 0) {
5000 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5001 return;
5002 }
5003
96178eeb
VK
5004 if (intel_dp == NULL) {
5005 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5006 return;
5007 }
5008
1fcc9d1c 5009 /*
e4d59f6b
RV
5010 * FIXME: This needs proper synchronization with psr state for some
5011 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5012 */
439d7ac0 5013
96178eeb
VK
5014 dig_port = dp_to_dig_port(intel_dp);
5015 encoder = &dig_port->base;
723f9aab 5016 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5017
5018 if (!intel_crtc) {
5019 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5020 return;
5021 }
5022
6e3c9717 5023 config = intel_crtc->config;
439d7ac0 5024
96178eeb 5025 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5026 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5027 return;
5028 }
5029
96178eeb
VK
5030 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5031 refresh_rate)
439d7ac0
PB
5032 index = DRRS_LOW_RR;
5033
96178eeb 5034 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5035 DRM_DEBUG_KMS(
5036 "DRRS requested for previously set RR...ignoring\n");
5037 return;
5038 }
5039
5040 if (!intel_crtc->active) {
5041 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5042 return;
5043 }
5044
44395bfe 5045 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5046 switch (index) {
5047 case DRRS_HIGH_RR:
5048 intel_dp_set_m_n(intel_crtc, M1_N1);
5049 break;
5050 case DRRS_LOW_RR:
5051 intel_dp_set_m_n(intel_crtc, M2_N2);
5052 break;
5053 case DRRS_MAX_RR:
5054 default:
5055 DRM_ERROR("Unsupported refreshrate type\n");
5056 }
5057 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5058 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5059 val = I915_READ(reg);
a4c30b1d 5060
439d7ac0 5061 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5062 if (IS_VALLEYVIEW(dev))
5063 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5064 else
5065 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5066 } else {
6fa7aec1
VK
5067 if (IS_VALLEYVIEW(dev))
5068 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5069 else
5070 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5071 }
5072 I915_WRITE(reg, val);
5073 }
5074
4e9ac947
VK
5075 dev_priv->drrs.refresh_rate_type = index;
5076
5077 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5078}
5079
b33a2815
VK
5080/**
5081 * intel_edp_drrs_enable - init drrs struct if supported
5082 * @intel_dp: DP struct
5083 *
5084 * Initializes frontbuffer_bits and drrs.dp
5085 */
c395578e
VK
5086void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5087{
5088 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5089 struct drm_i915_private *dev_priv = dev->dev_private;
5090 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5091 struct drm_crtc *crtc = dig_port->base.base.crtc;
5092 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5093
5094 if (!intel_crtc->config->has_drrs) {
5095 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5096 return;
5097 }
5098
5099 mutex_lock(&dev_priv->drrs.mutex);
5100 if (WARN_ON(dev_priv->drrs.dp)) {
5101 DRM_ERROR("DRRS already enabled\n");
5102 goto unlock;
5103 }
5104
5105 dev_priv->drrs.busy_frontbuffer_bits = 0;
5106
5107 dev_priv->drrs.dp = intel_dp;
5108
5109unlock:
5110 mutex_unlock(&dev_priv->drrs.mutex);
5111}
5112
b33a2815
VK
5113/**
5114 * intel_edp_drrs_disable - Disable DRRS
5115 * @intel_dp: DP struct
5116 *
5117 */
c395578e
VK
5118void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5119{
5120 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5121 struct drm_i915_private *dev_priv = dev->dev_private;
5122 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5123 struct drm_crtc *crtc = dig_port->base.base.crtc;
5124 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5125
5126 if (!intel_crtc->config->has_drrs)
5127 return;
5128
5129 mutex_lock(&dev_priv->drrs.mutex);
5130 if (!dev_priv->drrs.dp) {
5131 mutex_unlock(&dev_priv->drrs.mutex);
5132 return;
5133 }
5134
5135 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5136 intel_dp_set_drrs_state(dev_priv->dev,
5137 intel_dp->attached_connector->panel.
5138 fixed_mode->vrefresh);
5139
5140 dev_priv->drrs.dp = NULL;
5141 mutex_unlock(&dev_priv->drrs.mutex);
5142
5143 cancel_delayed_work_sync(&dev_priv->drrs.work);
5144}
5145
4e9ac947
VK
5146static void intel_edp_drrs_downclock_work(struct work_struct *work)
5147{
5148 struct drm_i915_private *dev_priv =
5149 container_of(work, typeof(*dev_priv), drrs.work.work);
5150 struct intel_dp *intel_dp;
5151
5152 mutex_lock(&dev_priv->drrs.mutex);
5153
5154 intel_dp = dev_priv->drrs.dp;
5155
5156 if (!intel_dp)
5157 goto unlock;
5158
439d7ac0 5159 /*
4e9ac947
VK
5160 * The delayed work can race with an invalidate hence we need to
5161 * recheck.
439d7ac0
PB
5162 */
5163
4e9ac947
VK
5164 if (dev_priv->drrs.busy_frontbuffer_bits)
5165 goto unlock;
439d7ac0 5166
4e9ac947
VK
5167 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5168 intel_dp_set_drrs_state(dev_priv->dev,
5169 intel_dp->attached_connector->panel.
5170 downclock_mode->vrefresh);
439d7ac0 5171
4e9ac947 5172unlock:
439d7ac0 5173
4e9ac947 5174 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5175}
5176
b33a2815
VK
5177/**
5178 * intel_edp_drrs_invalidate - Invalidate DRRS
5179 * @dev: DRM device
5180 * @frontbuffer_bits: frontbuffer plane tracking bits
5181 *
5182 * When there is a disturbance on screen (due to cursor movement/time
5183 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5184 * high RR.
5185 *
5186 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5187 */
a93fad0f
VK
5188void intel_edp_drrs_invalidate(struct drm_device *dev,
5189 unsigned frontbuffer_bits)
5190{
5191 struct drm_i915_private *dev_priv = dev->dev_private;
5192 struct drm_crtc *crtc;
5193 enum pipe pipe;
5194
5195 if (!dev_priv->drrs.dp)
5196 return;
5197
3954e733
R
5198 cancel_delayed_work_sync(&dev_priv->drrs.work);
5199
a93fad0f
VK
5200 mutex_lock(&dev_priv->drrs.mutex);
5201 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5202 pipe = to_intel_crtc(crtc)->pipe;
5203
5204 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5205 intel_dp_set_drrs_state(dev_priv->dev,
5206 dev_priv->drrs.dp->attached_connector->panel.
5207 fixed_mode->vrefresh);
5208 }
5209
5210 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5211
5212 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5213 mutex_unlock(&dev_priv->drrs.mutex);
5214}
5215
b33a2815
VK
5216/**
5217 * intel_edp_drrs_flush - Flush DRRS
5218 * @dev: DRM device
5219 * @frontbuffer_bits: frontbuffer plane tracking bits
5220 *
5221 * When there is no movement on screen, DRRS work can be scheduled.
5222 * This DRRS work is responsible for setting relevant registers after a
5223 * timeout of 1 second.
5224 *
5225 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5226 */
a93fad0f
VK
5227void intel_edp_drrs_flush(struct drm_device *dev,
5228 unsigned frontbuffer_bits)
5229{
5230 struct drm_i915_private *dev_priv = dev->dev_private;
5231 struct drm_crtc *crtc;
5232 enum pipe pipe;
5233
5234 if (!dev_priv->drrs.dp)
5235 return;
5236
3954e733
R
5237 cancel_delayed_work_sync(&dev_priv->drrs.work);
5238
a93fad0f
VK
5239 mutex_lock(&dev_priv->drrs.mutex);
5240 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5241 pipe = to_intel_crtc(crtc)->pipe;
5242 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5243
a93fad0f
VK
5244 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5245 !dev_priv->drrs.busy_frontbuffer_bits)
5246 schedule_delayed_work(&dev_priv->drrs.work,
5247 msecs_to_jiffies(1000));
5248 mutex_unlock(&dev_priv->drrs.mutex);
5249}
5250
b33a2815
VK
5251/**
5252 * DOC: Display Refresh Rate Switching (DRRS)
5253 *
5254 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5255 * which enables swtching between low and high refresh rates,
5256 * dynamically, based on the usage scenario. This feature is applicable
5257 * for internal panels.
5258 *
5259 * Indication that the panel supports DRRS is given by the panel EDID, which
5260 * would list multiple refresh rates for one resolution.
5261 *
5262 * DRRS is of 2 types - static and seamless.
5263 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5264 * (may appear as a blink on screen) and is used in dock-undock scenario.
5265 * Seamless DRRS involves changing RR without any visual effect to the user
5266 * and can be used during normal system usage. This is done by programming
5267 * certain registers.
5268 *
5269 * Support for static/seamless DRRS may be indicated in the VBT based on
5270 * inputs from the panel spec.
5271 *
5272 * DRRS saves power by switching to low RR based on usage scenarios.
5273 *
5274 * eDP DRRS:-
5275 * The implementation is based on frontbuffer tracking implementation.
5276 * When there is a disturbance on the screen triggered by user activity or a
5277 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5278 * When there is no movement on screen, after a timeout of 1 second, a switch
5279 * to low RR is made.
5280 * For integration with frontbuffer tracking code,
5281 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5282 *
5283 * DRRS can be further extended to support other internal panels and also
5284 * the scenario of video playback wherein RR is set based on the rate
5285 * requested by userspace.
5286 */
5287
5288/**
5289 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5290 * @intel_connector: eDP connector
5291 * @fixed_mode: preferred mode of panel
5292 *
5293 * This function is called only once at driver load to initialize basic
5294 * DRRS stuff.
5295 *
5296 * Returns:
5297 * Downclock mode if panel supports it, else return NULL.
5298 * DRRS support is determined by the presence of downclock mode (apart
5299 * from VBT setting).
5300 */
4f9db5b5 5301static struct drm_display_mode *
96178eeb
VK
5302intel_dp_drrs_init(struct intel_connector *intel_connector,
5303 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5304{
5305 struct drm_connector *connector = &intel_connector->base;
96178eeb 5306 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5307 struct drm_i915_private *dev_priv = dev->dev_private;
5308 struct drm_display_mode *downclock_mode = NULL;
5309
5310 if (INTEL_INFO(dev)->gen <= 6) {
5311 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5312 return NULL;
5313 }
5314
5315 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5316 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5317 return NULL;
5318 }
5319
5320 downclock_mode = intel_find_panel_downclock
5321 (dev, fixed_mode, connector);
5322
5323 if (!downclock_mode) {
a1d26342 5324 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5325 return NULL;
5326 }
5327
4e9ac947
VK
5328 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5329
96178eeb 5330 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5331
96178eeb 5332 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5333
96178eeb 5334 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5335 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5336 return downclock_mode;
5337}
5338
ed92f0b2 5339static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5340 struct intel_connector *intel_connector)
ed92f0b2
PZ
5341{
5342 struct drm_connector *connector = &intel_connector->base;
5343 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5344 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5345 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5346 struct drm_i915_private *dev_priv = dev->dev_private;
5347 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5348 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5349 bool has_dpcd;
5350 struct drm_display_mode *scan;
5351 struct edid *edid;
6517d273 5352 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5353
5354 if (!is_edp(intel_dp))
5355 return true;
5356
49e6bc51
VS
5357 pps_lock(intel_dp);
5358 intel_edp_panel_vdd_sanitize(intel_dp);
5359 pps_unlock(intel_dp);
63635217 5360
ed92f0b2 5361 /* Cache DPCD and EDID for edp. */
ed92f0b2 5362 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5363
5364 if (has_dpcd) {
5365 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5366 dev_priv->no_aux_handshake =
5367 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5368 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5369 } else {
5370 /* if this fails, presume the device is a ghost */
5371 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5372 return false;
5373 }
5374
5375 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5376 pps_lock(intel_dp);
36b5f425 5377 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5378 pps_unlock(intel_dp);
ed92f0b2 5379
060c8778 5380 mutex_lock(&dev->mode_config.mutex);
0b99836f 5381 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5382 if (edid) {
5383 if (drm_add_edid_modes(connector, edid)) {
5384 drm_mode_connector_update_edid_property(connector,
5385 edid);
5386 drm_edid_to_eld(connector, edid);
5387 } else {
5388 kfree(edid);
5389 edid = ERR_PTR(-EINVAL);
5390 }
5391 } else {
5392 edid = ERR_PTR(-ENOENT);
5393 }
5394 intel_connector->edid = edid;
5395
5396 /* prefer fixed mode from EDID if available */
5397 list_for_each_entry(scan, &connector->probed_modes, head) {
5398 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5399 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5400 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5401 intel_connector, fixed_mode);
ed92f0b2
PZ
5402 break;
5403 }
5404 }
5405
5406 /* fallback to VBT if available for eDP */
5407 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5408 fixed_mode = drm_mode_duplicate(dev,
5409 dev_priv->vbt.lfp_lvds_vbt_mode);
5410 if (fixed_mode)
5411 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5412 }
060c8778 5413 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5414
01527b31
CT
5415 if (IS_VALLEYVIEW(dev)) {
5416 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5417 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5418
5419 /*
5420 * Figure out the current pipe for the initial backlight setup.
5421 * If the current pipe isn't valid, try the PPS pipe, and if that
5422 * fails just assume pipe A.
5423 */
5424 if (IS_CHERRYVIEW(dev))
5425 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5426 else
5427 pipe = PORT_TO_PIPE(intel_dp->DP);
5428
5429 if (pipe != PIPE_A && pipe != PIPE_B)
5430 pipe = intel_dp->pps_pipe;
5431
5432 if (pipe != PIPE_A && pipe != PIPE_B)
5433 pipe = PIPE_A;
5434
5435 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5436 pipe_name(pipe));
01527b31
CT
5437 }
5438
4f9db5b5 5439 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5440 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5441 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5442
5443 return true;
5444}
5445
16c25533 5446bool
f0fec3f2
PZ
5447intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5448 struct intel_connector *intel_connector)
a4fc5ed6 5449{
f0fec3f2
PZ
5450 struct drm_connector *connector = &intel_connector->base;
5451 struct intel_dp *intel_dp = &intel_dig_port->dp;
5452 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5453 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5454 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5455 enum port port = intel_dig_port->port;
0b99836f 5456 int type;
a4fc5ed6 5457
a4a5d2f8
VS
5458 intel_dp->pps_pipe = INVALID_PIPE;
5459
ec5b01dd 5460 /* intel_dp vfuncs */
b6b5e383
DL
5461 if (INTEL_INFO(dev)->gen >= 9)
5462 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5463 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5464 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5465 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5466 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5467 else if (HAS_PCH_SPLIT(dev))
5468 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5469 else
5470 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5471
b9ca5fad
DL
5472 if (INTEL_INFO(dev)->gen >= 9)
5473 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5474 else
5475 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5476
0767935e
DV
5477 /* Preserve the current hw state. */
5478 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5479 intel_dp->attached_connector = intel_connector;
3d3dc149 5480
3b32a35b 5481 if (intel_dp_is_edp(dev, port))
b329530c 5482 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5483 else
5484 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5485
f7d24902
ID
5486 /*
5487 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5488 * for DP the encoder type can be set by the caller to
5489 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5490 */
5491 if (type == DRM_MODE_CONNECTOR_eDP)
5492 intel_encoder->type = INTEL_OUTPUT_EDP;
5493
c17ed5b5
VS
5494 /* eDP only on port B and/or C on vlv/chv */
5495 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5496 port != PORT_B && port != PORT_C))
5497 return false;
5498
e7281eab
ID
5499 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5500 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5501 port_name(port));
5502
b329530c 5503 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5504 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5505
a4fc5ed6
KP
5506 connector->interlace_allowed = true;
5507 connector->doublescan_allowed = 0;
5508
f0fec3f2 5509 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5510 edp_panel_vdd_work);
a4fc5ed6 5511
df0e9248 5512 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5513 drm_connector_register(connector);
a4fc5ed6 5514
affa9354 5515 if (HAS_DDI(dev))
bcbc889b
PZ
5516 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5517 else
5518 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5519 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5520
0b99836f 5521 /* Set up the hotplug pin. */
ab9d7c30
PZ
5522 switch (port) {
5523 case PORT_A:
1d843f9d 5524 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5525 break;
5526 case PORT_B:
1d843f9d 5527 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5528 break;
5529 case PORT_C:
1d843f9d 5530 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5531 break;
5532 case PORT_D:
1d843f9d 5533 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5534 break;
5535 default:
ad1c0b19 5536 BUG();
5eb08b69
ZW
5537 }
5538
dada1a9f 5539 if (is_edp(intel_dp)) {
773538e8 5540 pps_lock(intel_dp);
1e74a324
VS
5541 intel_dp_init_panel_power_timestamps(intel_dp);
5542 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5543 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5544 else
36b5f425 5545 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5546 pps_unlock(intel_dp);
dada1a9f 5547 }
0095e6dc 5548
9d1a1031 5549 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5550
0e32b39c 5551 /* init MST on ports that can support it */
c86ea3d0 5552 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5553 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5554 intel_dp_mst_encoder_init(intel_dig_port,
5555 intel_connector->base.base.id);
0e32b39c
DA
5556 }
5557 }
5558
36b5f425 5559 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5560 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5561 if (is_edp(intel_dp)) {
5562 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5563 /*
5564 * vdd might still be enabled do to the delayed vdd off.
5565 * Make sure vdd is actually turned off here.
5566 */
773538e8 5567 pps_lock(intel_dp);
4be73780 5568 edp_panel_vdd_off_sync(intel_dp);
773538e8 5569 pps_unlock(intel_dp);
15b1d171 5570 }
34ea3d38 5571 drm_connector_unregister(connector);
b2f246a8 5572 drm_connector_cleanup(connector);
16c25533 5573 return false;
b2f246a8 5574 }
32f9d658 5575
f684960e
CW
5576 intel_dp_add_properties(intel_dp, connector);
5577
a4fc5ed6
KP
5578 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5579 * 0xd. Failure to do so will result in spurious interrupts being
5580 * generated on the port when a cable is not attached.
5581 */
5582 if (IS_G4X(dev) && !IS_GM45(dev)) {
5583 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5584 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5585 }
16c25533 5586
aa7471d2
JN
5587 i915_debugfs_connector_add(connector);
5588
16c25533 5589 return true;
a4fc5ed6 5590}
f0fec3f2
PZ
5591
5592void
5593intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5594{
13cf5504 5595 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5596 struct intel_digital_port *intel_dig_port;
5597 struct intel_encoder *intel_encoder;
5598 struct drm_encoder *encoder;
5599 struct intel_connector *intel_connector;
5600
b14c5679 5601 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5602 if (!intel_dig_port)
5603 return;
5604
9bdbd0b9 5605 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5606 if (!intel_connector) {
5607 kfree(intel_dig_port);
5608 return;
5609 }
5610
5611 intel_encoder = &intel_dig_port->base;
5612 encoder = &intel_encoder->base;
5613
5614 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5615 DRM_MODE_ENCODER_TMDS);
5616
5bfe2ac0 5617 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5618 intel_encoder->disable = intel_disable_dp;
00c09d70 5619 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5620 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5621 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5622 if (IS_CHERRYVIEW(dev)) {
9197c88b 5623 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5624 intel_encoder->pre_enable = chv_pre_enable_dp;
5625 intel_encoder->enable = vlv_enable_dp;
580d3811 5626 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5627 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5628 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5629 intel_encoder->pre_enable = vlv_pre_enable_dp;
5630 intel_encoder->enable = vlv_enable_dp;
49277c31 5631 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5632 } else {
ecff4f3b
JN
5633 intel_encoder->pre_enable = g4x_pre_enable_dp;
5634 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5635 if (INTEL_INFO(dev)->gen >= 5)
5636 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5637 }
f0fec3f2 5638
174edf1f 5639 intel_dig_port->port = port;
f0fec3f2
PZ
5640 intel_dig_port->dp.output_reg = output_reg;
5641
00c09d70 5642 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5643 if (IS_CHERRYVIEW(dev)) {
5644 if (port == PORT_D)
5645 intel_encoder->crtc_mask = 1 << 2;
5646 else
5647 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5648 } else {
5649 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5650 }
bc079e8b 5651 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5652 intel_encoder->hot_plug = intel_dp_hot_plug;
5653
13cf5504
DA
5654 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5655 dev_priv->hpd_irq_port[port] = intel_dig_port;
5656
15b1d171
PZ
5657 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5658 drm_encoder_cleanup(encoder);
5659 kfree(intel_dig_port);
b2f246a8 5660 kfree(intel_connector);
15b1d171 5661 }
f0fec3f2 5662}
0e32b39c
DA
5663
5664void intel_dp_mst_suspend(struct drm_device *dev)
5665{
5666 struct drm_i915_private *dev_priv = dev->dev_private;
5667 int i;
5668
5669 /* disable MST */
5670 for (i = 0; i < I915_MAX_PORTS; i++) {
5671 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5672 if (!intel_dig_port)
5673 continue;
5674
5675 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5676 if (!intel_dig_port->dp.can_mst)
5677 continue;
5678 if (intel_dig_port->dp.is_mst)
5679 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5680 }
5681 }
5682}
5683
5684void intel_dp_mst_resume(struct drm_device *dev)
5685{
5686 struct drm_i915_private *dev_priv = dev->dev_private;
5687 int i;
5688
5689 for (i = 0; i < I915_MAX_PORTS; i++) {
5690 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5691 if (!intel_dig_port)
5692 continue;
5693 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5694 int ret;
5695
5696 if (!intel_dig_port->dp.can_mst)
5697 continue;
5698
5699 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5700 if (ret != 0) {
5701 intel_dp_check_mst_status(&intel_dig_port->dp);
5702 }
5703 }
5704 }
5705}