]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Clean up eDP PLL state asserts
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
cd9dde44
AJ
173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
a4fc5ed6 190static int
c898261c 191intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 192{
cd9dde44 193 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
194}
195
fe27d53e
DA
196static int
197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
c19de8eb 202static enum drm_mode_status
a4fc5ed6
KP
203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
df0e9248 206 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 211
dd06f90e
JN
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
214 return MODE_PANEL;
215
dd06f90e 216 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 217 return MODE_PANEL;
03afc4a2
DV
218
219 target_clock = fixed_mode->clock;
7de56f43
ZY
220 }
221
50fec21a 222 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 223 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
c4867936 229 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
0af78a2b
DV
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
a4fc5ed6
KP
237 return MODE_OK;
238}
239
a4f1289e 240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
c2af70e2 252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
bf13e81b
JN
261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 263 struct intel_dp *intel_dp);
bf13e81b
JN
264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 266 struct intel_dp *intel_dp);
bf13e81b 267
773538e8
VS
268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298}
299
961a0db0
VS
300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
d288f65f
VS
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
0047eedc
VS
339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
d288f65f
VS
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
a8c3344e
VS
410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
a4a5d2f8 413
a8c3344e
VS
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
36b5f425
VS
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 424
961a0db0
VS
425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
430
431 return intel_dp->pps_pipe;
432}
433
6491ab27
VS
434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
bf13e81b 454
a4a5d2f8 455static enum pipe
6491ab27
VS
456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
a4a5d2f8
VS
459{
460 enum pipe pipe;
bf13e81b 461
bf13e81b
JN
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
6491ab27
VS
469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
a4a5d2f8 472 return pipe;
bf13e81b
JN
473 }
474
a4a5d2f8
VS
475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
6491ab27
VS
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
a4a5d2f8
VS
500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
bf13e81b
JN
506 }
507
a4a5d2f8
VS
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
36b5f425
VS
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
513}
514
773538e8
VS
515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
bf13e81b
JN
542}
543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
b0a08bec
VK
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
556static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
b0a08bec
VK
560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566}
567
01527b31
CT
568/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572{
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
577
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
579 return 0;
580
773538e8 581 pps_lock(intel_dp);
e39b999a 582
01527b31 583 if (IS_VALLEYVIEW(dev)) {
e39b999a 584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
649636ef
VS
585 u32 pp_ctrl_reg, pp_div_reg;
586 u32 pp_div;
e39b999a 587
01527b31
CT
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
773538e8 599 pps_unlock(intel_dp);
e39b999a 600
01527b31
CT
601 return 0;
602}
603
4be73780 604static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 605{
30add22d 606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
607 struct drm_i915_private *dev_priv = dev->dev_private;
608
e39b999a
VS
609 lockdep_assert_held(&dev_priv->pps_mutex);
610
9a42356b
VS
611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
bf13e81b 615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
616}
617
4be73780 618static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 619{
30add22d 620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
621 struct drm_i915_private *dev_priv = dev->dev_private;
622
e39b999a
VS
623 lockdep_assert_held(&dev_priv->pps_mutex);
624
9a42356b
VS
625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
773538e8 629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
630}
631
9b984dae
KP
632static void
633intel_dp_check_edp(struct intel_dp *intel_dp)
634{
30add22d 635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 636 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 637
9b984dae
KP
638 if (!is_edp(intel_dp))
639 return;
453c5420 640
4be73780 641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
646 }
647}
648
9ee32fea
DV
649static uint32_t
650intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651{
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
656 uint32_t status;
657 bool done;
658
ef04f00d 659#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 660 if (has_aux_irq)
b18ac466 661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 662 msecs_to_jiffies_timeout(10));
9ee32fea
DV
663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668#undef C
669
670 return status;
671}
672
ec5b01dd 673static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 674{
174edf1f
PZ
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 677
ec5b01dd
DL
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 681 */
ec5b01dd
DL
682 return index ? 0 : intel_hrawclk(dev) / 2;
683}
684
685static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686{
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 689 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
05024da3
VS
695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
ec5b01dd
DL
697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700}
701
702static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703{
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
708 if (intel_dig_port->port == PORT_A) {
709 if (index)
710 return 0;
05024da3 711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
bc86625a
CW
714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
ec5b01dd 719 } else {
bc86625a 720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 721 }
b84a1cf8
RV
722}
723
ec5b01dd
DL
724static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 return index ? 0 : 100;
727}
728
b6b5e383
DL
729static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730{
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737}
738
5ed12a19
DL
739static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743{
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 759 DP_AUX_CH_CTL_DONE |
5ed12a19 760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 762 timeout |
788d4433 763 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
767}
768
b9ca5fad
DL
769static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773{
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782}
783
b84a1cf8
RV
784static int
785intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 786 const uint8_t *send, int send_bytes,
b84a1cf8
RV
787 uint8_t *recv, int recv_size)
788{
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t ch_data = ch_ctl + 4;
bc86625a 794 uint32_t aux_clock_divider;
b84a1cf8
RV
795 int i, ret, recv_bytes;
796 uint32_t status;
5ed12a19 797 int try, clock = 0;
4e6b788c 798 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
799 bool vdd;
800
773538e8 801 pps_lock(intel_dp);
e39b999a 802
72c3500a
VS
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
1e0560e0 809 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
5eb08b69 818
c67a470b
PZ
819 intel_aux_display_runtime_get(dev_priv);
820
11bee43e
JB
821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
ef04f00d 823 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825 break;
826 msleep(1);
827 }
828
829 if (try == 3) {
02196c77
MK
830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
832
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 status);
836 last_status = status;
837 }
838
9ee32fea
DV
839 ret = -EBUSY;
840 goto out;
4f7f7b7e
CW
841 }
842
46a5ae9f
PZ
843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845 ret = -E2BIG;
846 goto out;
847 }
848
ec5b01dd 849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851 has_aux_irq,
852 send_bytes,
853 aux_clock_divider);
5ed12a19 854
bc86625a
CW
855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
a4f1289e
RV
860 intel_dp_pack_aux(send + i,
861 send_bytes - i));
bc86625a
CW
862
863 /* Send the command and wait for it to complete */
5ed12a19 864 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
865
866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
867
868 /* Clear done status and any errors */
869 I915_WRITE(ch_ctl,
870 status |
871 DP_AUX_CH_CTL_DONE |
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
874
74ebf294 875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 876 continue;
74ebf294
TP
877
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
882 */
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
bc86625a 885 continue;
74ebf294 886 }
bc86625a 887 if (status & DP_AUX_CH_CTL_DONE)
e058c945 888 goto done;
bc86625a 889 }
a4fc5ed6
KP
890 }
891
a4fc5ed6 892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
894 ret = -EBUSY;
895 goto out;
a4fc5ed6
KP
896 }
897
e058c945 898done:
a4fc5ed6
KP
899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
901 */
a5b3da54 902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
904 ret = -EIO;
905 goto out;
a5b3da54 906 }
1ae8c0a5
KP
907
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
a5b3da54 910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
912 ret = -ETIMEDOUT;
913 goto out;
a4fc5ed6
KP
914 }
915
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
0206e353 921
4f7f7b7e 922 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
a4fc5ed6 925
9ee32fea
DV
926 ret = recv_bytes;
927out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 929 intel_aux_display_runtime_put(dev_priv);
9ee32fea 930
884f19e9
JN
931 if (vdd)
932 edp_panel_vdd_off(intel_dp, false);
933
773538e8 934 pps_unlock(intel_dp);
e39b999a 935
9ee32fea 936 return ret;
a4fc5ed6
KP
937}
938
a6c8aff0
JN
939#define BARE_ADDRESS_SIZE 3
940#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
941static ssize_t
942intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 943{
9d1a1031
JN
944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
a4fc5ed6 947 int ret;
a4fc5ed6 948
d2d9cbbd
VS
949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
46a5ae9f 954
9d1a1031
JN
955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
c1e74122 958 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 959 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 960 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 961
9d1a1031
JN
962 if (WARN_ON(txsize > 20))
963 return -E2BIG;
a4fc5ed6 964
9d1a1031 965 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 966
9d1a1031
JN
967 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
968 if (ret > 0) {
969 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 970
a1ddefd8
JN
971 if (ret > 1) {
972 /* Number of bytes written in a short write. */
973 ret = clamp_t(int, rxbuf[1], 0, msg->size);
974 } else {
975 /* Return payload size. */
976 ret = msg->size;
977 }
9d1a1031
JN
978 }
979 break;
46a5ae9f 980
9d1a1031
JN
981 case DP_AUX_NATIVE_READ:
982 case DP_AUX_I2C_READ:
a6c8aff0 983 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 984 rxsize = msg->size + 1;
a4fc5ed6 985
9d1a1031
JN
986 if (WARN_ON(rxsize > 20))
987 return -E2BIG;
a4fc5ed6 988
9d1a1031
JN
989 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
990 if (ret > 0) {
991 msg->reply = rxbuf[0] >> 4;
992 /*
993 * Assume happy day, and copy the data. The caller is
994 * expected to check msg->reply before touching it.
995 *
996 * Return payload size.
997 */
998 ret--;
999 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1000 }
9d1a1031
JN
1001 break;
1002
1003 default:
1004 ret = -EINVAL;
1005 break;
a4fc5ed6 1006 }
f51a44b9 1007
9d1a1031 1008 return ret;
a4fc5ed6
KP
1009}
1010
9d1a1031
JN
1011static void
1012intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1013{
1014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1015 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1017 enum port port = intel_dig_port->port;
500ea70d 1018 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1019 const char *name = NULL;
500ea70d 1020 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1021 int ret;
1022
500ea70d
RV
1023 /* On SKL we don't have Aux for port E so we rely on VBT to set
1024 * a proper alternate aux channel.
1025 */
ef11bdb3 1026 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && port == PORT_E) {
500ea70d
RV
1027 switch (info->alternate_aux_channel) {
1028 case DP_AUX_B:
1029 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1030 break;
1031 case DP_AUX_C:
1032 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1033 break;
1034 case DP_AUX_D:
1035 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1036 break;
1037 case DP_AUX_A:
1038 default:
1039 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040 }
1041 }
1042
33ad6626
JN
1043 switch (port) {
1044 case PORT_A:
1045 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1046 name = "DPDDC-A";
ab2c0672 1047 break;
33ad6626
JN
1048 case PORT_B:
1049 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1050 name = "DPDDC-B";
ab2c0672 1051 break;
33ad6626
JN
1052 case PORT_C:
1053 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1054 name = "DPDDC-C";
ab2c0672 1055 break;
33ad6626
JN
1056 case PORT_D:
1057 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1058 name = "DPDDC-D";
33ad6626 1059 break;
500ea70d
RV
1060 case PORT_E:
1061 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1062 name = "DPDDC-E";
1063 break;
33ad6626
JN
1064 default:
1065 BUG();
ab2c0672
DA
1066 }
1067
1b1aad75
DL
1068 /*
1069 * The AUX_CTL register is usually DP_CTL + 0x10.
1070 *
1071 * On Haswell and Broadwell though:
1072 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1073 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1074 *
1075 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1076 */
500ea70d 1077 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1078 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1079
0b99836f 1080 intel_dp->aux.name = name;
9d1a1031
JN
1081 intel_dp->aux.dev = dev->dev;
1082 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1083
0b99836f
JN
1084 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1085 connector->base.kdev->kobj.name);
8316f337 1086
4f71d0cb 1087 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1088 if (ret < 0) {
4f71d0cb 1089 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1090 name, ret);
1091 return;
ab2c0672 1092 }
8a5e6aeb 1093
0b99836f
JN
1094 ret = sysfs_create_link(&connector->base.kdev->kobj,
1095 &intel_dp->aux.ddc.dev.kobj,
1096 intel_dp->aux.ddc.dev.kobj.name);
1097 if (ret < 0) {
1098 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1099 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1100 }
a4fc5ed6
KP
1101}
1102
80f65de3
ID
1103static void
1104intel_dp_connector_unregister(struct intel_connector *intel_connector)
1105{
1106 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1107
0e32b39c
DA
1108 if (!intel_connector->mst_port)
1109 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1110 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1111 intel_connector_unregister(intel_connector);
1112}
1113
5416d871 1114static void
840b32b7 1115skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1116{
1117 u32 ctrl1;
1118
dd3cd74a
ACO
1119 memset(&pipe_config->dpll_hw_state, 0,
1120 sizeof(pipe_config->dpll_hw_state));
1121
5416d871
DL
1122 pipe_config->ddi_pll_sel = SKL_DPLL0;
1123 pipe_config->dpll_hw_state.cfgcr1 = 0;
1124 pipe_config->dpll_hw_state.cfgcr2 = 0;
1125
1126 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1127 switch (pipe_config->port_clock / 2) {
c3346ef6 1128 case 81000:
71cd8423 1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1130 SKL_DPLL0);
1131 break;
c3346ef6 1132 case 135000:
71cd8423 1133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1134 SKL_DPLL0);
1135 break;
c3346ef6 1136 case 270000:
71cd8423 1137 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1138 SKL_DPLL0);
1139 break;
c3346ef6 1140 case 162000:
71cd8423 1141 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1142 SKL_DPLL0);
1143 break;
1144 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1145 results in CDCLK change. Need to handle the change of CDCLK by
1146 disabling pipes and re-enabling them */
1147 case 108000:
71cd8423 1148 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1149 SKL_DPLL0);
1150 break;
1151 case 216000:
71cd8423 1152 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1153 SKL_DPLL0);
1154 break;
1155
5416d871
DL
1156 }
1157 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1158}
1159
6fa2d197 1160void
840b32b7 1161hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1162{
ee46f3c7
ACO
1163 memset(&pipe_config->dpll_hw_state, 0,
1164 sizeof(pipe_config->dpll_hw_state));
1165
840b32b7
VS
1166 switch (pipe_config->port_clock / 2) {
1167 case 81000:
0e50338c
DV
1168 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1169 break;
840b32b7 1170 case 135000:
0e50338c
DV
1171 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1172 break;
840b32b7 1173 case 270000:
0e50338c
DV
1174 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1175 break;
1176 }
1177}
1178
fc0f8e25 1179static int
12f6a2e2 1180intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1181{
94ca719e
VS
1182 if (intel_dp->num_sink_rates) {
1183 *sink_rates = intel_dp->sink_rates;
1184 return intel_dp->num_sink_rates;
fc0f8e25 1185 }
12f6a2e2
VS
1186
1187 *sink_rates = default_rates;
1188
1189 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1190}
1191
e588fa18 1192bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1193{
e588fa18
ACO
1194 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1195 struct drm_device *dev = dig_port->base.base.dev;
1196
ed63baaf 1197 /* WaDisableHBR2:skl */
e87a005d 1198 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1199 return false;
1200
1201 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1202 (INTEL_INFO(dev)->gen >= 9))
1203 return true;
1204 else
1205 return false;
1206}
1207
a8f3ef61 1208static int
e588fa18 1209intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1210{
e588fa18
ACO
1211 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1212 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1213 int size;
1214
64987fc5
SJ
1215 if (IS_BROXTON(dev)) {
1216 *source_rates = bxt_rates;
af7080f5 1217 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1218 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1219 *source_rates = skl_rates;
af7080f5
TS
1220 size = ARRAY_SIZE(skl_rates);
1221 } else {
1222 *source_rates = default_rates;
1223 size = ARRAY_SIZE(default_rates);
a8f3ef61 1224 }
636280ba 1225
ed63baaf 1226 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1227 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1228 size--;
636280ba 1229
af7080f5 1230 return size;
a8f3ef61
SJ
1231}
1232
c6bb3538
DV
1233static void
1234intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1235 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1236{
1237 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1238 const struct dp_link_dpll *divisor = NULL;
1239 int i, count = 0;
c6bb3538
DV
1240
1241 if (IS_G4X(dev)) {
9dd4ffdf
CML
1242 divisor = gen4_dpll;
1243 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1244 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1245 divisor = pch_dpll;
1246 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1247 } else if (IS_CHERRYVIEW(dev)) {
1248 divisor = chv_dpll;
1249 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1250 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1251 divisor = vlv_dpll;
1252 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1253 }
9dd4ffdf
CML
1254
1255 if (divisor && count) {
1256 for (i = 0; i < count; i++) {
840b32b7 1257 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1258 pipe_config->dpll = divisor[i].dpll;
1259 pipe_config->clock_set = true;
1260 break;
1261 }
1262 }
c6bb3538
DV
1263 }
1264}
1265
2ecae76a
VS
1266static int intersect_rates(const int *source_rates, int source_len,
1267 const int *sink_rates, int sink_len,
94ca719e 1268 int *common_rates)
a8f3ef61
SJ
1269{
1270 int i = 0, j = 0, k = 0;
1271
a8f3ef61
SJ
1272 while (i < source_len && j < sink_len) {
1273 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1274 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1275 return k;
94ca719e 1276 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1277 ++k;
1278 ++i;
1279 ++j;
1280 } else if (source_rates[i] < sink_rates[j]) {
1281 ++i;
1282 } else {
1283 ++j;
1284 }
1285 }
1286 return k;
1287}
1288
94ca719e
VS
1289static int intel_dp_common_rates(struct intel_dp *intel_dp,
1290 int *common_rates)
2ecae76a 1291{
2ecae76a
VS
1292 const int *source_rates, *sink_rates;
1293 int source_len, sink_len;
1294
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1296 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1297
1298 return intersect_rates(source_rates, source_len,
1299 sink_rates, sink_len,
94ca719e 1300 common_rates);
2ecae76a
VS
1301}
1302
0336400e
VS
1303static void snprintf_int_array(char *str, size_t len,
1304 const int *array, int nelem)
1305{
1306 int i;
1307
1308 str[0] = '\0';
1309
1310 for (i = 0; i < nelem; i++) {
b2f505be 1311 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1312 if (r >= len)
1313 return;
1314 str += r;
1315 len -= r;
1316 }
1317}
1318
1319static void intel_dp_print_rates(struct intel_dp *intel_dp)
1320{
0336400e 1321 const int *source_rates, *sink_rates;
94ca719e
VS
1322 int source_len, sink_len, common_len;
1323 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1324 char str[128]; /* FIXME: too big for stack? */
1325
1326 if ((drm_debug & DRM_UT_KMS) == 0)
1327 return;
1328
e588fa18 1329 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1330 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1331 DRM_DEBUG_KMS("source rates: %s\n", str);
1332
1333 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1334 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1335 DRM_DEBUG_KMS("sink rates: %s\n", str);
1336
94ca719e
VS
1337 common_len = intel_dp_common_rates(intel_dp, common_rates);
1338 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1339 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1340}
1341
f4896f15 1342static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1343{
1344 int i = 0;
1345
1346 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1347 if (find == rates[i])
1348 break;
1349
1350 return i;
1351}
1352
50fec21a
VS
1353int
1354intel_dp_max_link_rate(struct intel_dp *intel_dp)
1355{
1356 int rates[DP_MAX_SUPPORTED_RATES] = {};
1357 int len;
1358
94ca719e 1359 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1360 if (WARN_ON(len <= 0))
1361 return 162000;
1362
1363 return rates[rate_to_index(0, rates) - 1];
1364}
1365
ed4e9c1d
VS
1366int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1367{
94ca719e 1368 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1369}
1370
94223d04
ACO
1371void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1372 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1373{
1374 if (intel_dp->num_sink_rates) {
1375 *link_bw = 0;
1376 *rate_select =
1377 intel_dp_rate_select(intel_dp, port_clock);
1378 } else {
1379 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1380 *rate_select = 0;
1381 }
1382}
1383
00c09d70 1384bool
5bfe2ac0 1385intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1386 struct intel_crtc_state *pipe_config)
a4fc5ed6 1387{
5bfe2ac0 1388 struct drm_device *dev = encoder->base.dev;
36008365 1389 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1390 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1391 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1392 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1393 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1394 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1395 int lane_count, clock;
56071a20 1396 int min_lane_count = 1;
eeb6324d 1397 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1398 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1399 int min_clock = 0;
a8f3ef61 1400 int max_clock;
083f9560 1401 int bpp, mode_rate;
ff9a6750 1402 int link_avail, link_clock;
94ca719e
VS
1403 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1404 int common_len;
04a60f9f 1405 uint8_t link_bw, rate_select;
a8f3ef61 1406
94ca719e 1407 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1408
1409 /* No common link rates between source and sink */
94ca719e 1410 WARN_ON(common_len <= 0);
a8f3ef61 1411
94ca719e 1412 max_clock = common_len - 1;
a4fc5ed6 1413
bc7d38a4 1414 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1415 pipe_config->has_pch_encoder = true;
1416
03afc4a2 1417 pipe_config->has_dp_encoder = true;
f769cd24 1418 pipe_config->has_drrs = false;
9fcb1704 1419 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1420
dd06f90e
JN
1421 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1422 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1423 adjusted_mode);
a1b2278e
CK
1424
1425 if (INTEL_INFO(dev)->gen >= 9) {
1426 int ret;
e435d6e5 1427 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1428 if (ret)
1429 return ret;
1430 }
1431
b5667627 1432 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1433 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1434 intel_connector->panel.fitting_mode);
1435 else
b074cec8
JB
1436 intel_pch_panel_fitting(intel_crtc, pipe_config,
1437 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1438 }
1439
cb1793ce 1440 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1441 return false;
1442
083f9560 1443 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1444 "max bw %d pixel clock %iKHz\n",
94ca719e 1445 max_lane_count, common_rates[max_clock],
241bfc38 1446 adjusted_mode->crtc_clock);
083f9560 1447
36008365
DV
1448 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1449 * bpc in between. */
3e7ca985 1450 bpp = pipe_config->pipe_bpp;
56071a20 1451 if (is_edp(intel_dp)) {
22ce5628
TS
1452
1453 /* Get bpp from vbt only for panels that dont have bpp in edid */
1454 if (intel_connector->base.display_info.bpc == 0 &&
1455 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1456 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1457 dev_priv->vbt.edp_bpp);
1458 bpp = dev_priv->vbt.edp_bpp;
1459 }
1460
344c5bbc
JN
1461 /*
1462 * Use the maximum clock and number of lanes the eDP panel
1463 * advertizes being capable of. The panels are generally
1464 * designed to support only a single clock and lane
1465 * configuration, and typically these values correspond to the
1466 * native resolution of the panel.
1467 */
1468 min_lane_count = max_lane_count;
1469 min_clock = max_clock;
7984211e 1470 }
657445fe 1471
36008365 1472 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1473 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1474 bpp);
36008365 1475
c6930992 1476 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1477 for (lane_count = min_lane_count;
1478 lane_count <= max_lane_count;
1479 lane_count <<= 1) {
1480
94ca719e 1481 link_clock = common_rates[clock];
36008365
DV
1482 link_avail = intel_dp_max_data_rate(link_clock,
1483 lane_count);
1484
1485 if (mode_rate <= link_avail) {
1486 goto found;
1487 }
1488 }
1489 }
1490 }
c4867936 1491
36008365 1492 return false;
3685a8f3 1493
36008365 1494found:
55bc60db
VS
1495 if (intel_dp->color_range_auto) {
1496 /*
1497 * See:
1498 * CEA-861-E - 5.1 Default Encoding Parameters
1499 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1500 */
0f2a2a75
VS
1501 pipe_config->limited_color_range =
1502 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1503 } else {
1504 pipe_config->limited_color_range =
1505 intel_dp->limited_color_range;
55bc60db
VS
1506 }
1507
90a6b7b0 1508 pipe_config->lane_count = lane_count;
a8f3ef61 1509
657445fe 1510 pipe_config->pipe_bpp = bpp;
94ca719e 1511 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1512
04a60f9f
VS
1513 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1514 &link_bw, &rate_select);
1515
1516 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1517 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1518 pipe_config->port_clock, bpp);
36008365
DV
1519 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1520 mode_rate, link_avail);
a4fc5ed6 1521
03afc4a2 1522 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1523 adjusted_mode->crtc_clock,
1524 pipe_config->port_clock,
03afc4a2 1525 &pipe_config->dp_m_n);
9d1a455b 1526
439d7ac0 1527 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1528 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1529 pipe_config->has_drrs = true;
439d7ac0
PB
1530 intel_link_compute_m_n(bpp, lane_count,
1531 intel_connector->panel.downclock_mode->clock,
1532 pipe_config->port_clock,
1533 &pipe_config->dp_m2_n2);
1534 }
1535
ef11bdb3 1536 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1537 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1538 else if (IS_BROXTON(dev))
1539 /* handled in ddi */;
5416d871 1540 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1541 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1542 else
840b32b7 1543 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1544
03afc4a2 1545 return true;
a4fc5ed6
KP
1546}
1547
7c62a164 1548static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1549{
7c62a164
DV
1550 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1551 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1552 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1553 struct drm_i915_private *dev_priv = dev->dev_private;
1554 u32 dpa_ctl;
1555
6e3c9717
ACO
1556 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1557 crtc->config->port_clock);
ea9b6006
DV
1558 dpa_ctl = I915_READ(DP_A);
1559 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1560
6e3c9717 1561 if (crtc->config->port_clock == 162000) {
b377e0df
VS
1562 dpa_ctl |= DP_PLL_FREQ_162MHZ;
1563 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
ea9b6006
DV
1564 } else {
1565 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1566 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1567 }
1ce17038 1568
ea9b6006
DV
1569 I915_WRITE(DP_A, dpa_ctl);
1570
1571 POSTING_READ(DP_A);
1572 udelay(500);
1573}
1574
901c2daf
VS
1575void intel_dp_set_link_params(struct intel_dp *intel_dp,
1576 const struct intel_crtc_state *pipe_config)
1577{
1578 intel_dp->link_rate = pipe_config->port_clock;
1579 intel_dp->lane_count = pipe_config->lane_count;
1580}
1581
8ac33ed3 1582static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1583{
b934223d 1584 struct drm_device *dev = encoder->base.dev;
417e822d 1585 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1586 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1587 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1588 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1589 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1590
901c2daf
VS
1591 intel_dp_set_link_params(intel_dp, crtc->config);
1592
417e822d 1593 /*
1a2eb460 1594 * There are four kinds of DP registers:
417e822d
KP
1595 *
1596 * IBX PCH
1a2eb460
KP
1597 * SNB CPU
1598 * IVB CPU
417e822d
KP
1599 * CPT PCH
1600 *
1601 * IBX PCH and CPU are the same for almost everything,
1602 * except that the CPU DP PLL is configured in this
1603 * register
1604 *
1605 * CPT PCH is quite different, having many bits moved
1606 * to the TRANS_DP_CTL register instead. That
1607 * configuration happens (oddly) in ironlake_pch_enable
1608 */
9c9e7927 1609
417e822d
KP
1610 /* Preserve the BIOS-computed detected bit. This is
1611 * supposed to be read-only.
1612 */
1613 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1614
417e822d 1615 /* Handle DP bits in common between all three register formats */
417e822d 1616 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1617 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1618
6e3c9717 1619 if (crtc->config->has_audio)
ea5b213a 1620 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1621
417e822d 1622 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1623
39e5fa88 1624 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1625 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1626 intel_dp->DP |= DP_SYNC_HS_HIGH;
1627 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1628 intel_dp->DP |= DP_SYNC_VS_HIGH;
1629 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1630
6aba5b6c 1631 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1632 intel_dp->DP |= DP_ENHANCED_FRAMING;
1633
7c62a164 1634 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1635 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1636 u32 trans_dp;
1637
39e5fa88 1638 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1639
1640 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1641 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1642 trans_dp |= TRANS_DP_ENH_FRAMING;
1643 else
1644 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1645 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1646 } else {
0f2a2a75
VS
1647 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1648 crtc->config->limited_color_range)
1649 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1650
1651 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1652 intel_dp->DP |= DP_SYNC_HS_HIGH;
1653 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1654 intel_dp->DP |= DP_SYNC_VS_HIGH;
1655 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1656
6aba5b6c 1657 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1658 intel_dp->DP |= DP_ENHANCED_FRAMING;
1659
39e5fa88 1660 if (IS_CHERRYVIEW(dev))
44f37d1f 1661 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1662 else if (crtc->pipe == PIPE_B)
1663 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1664 }
a4fc5ed6
KP
1665}
1666
ffd6749d
PZ
1667#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1668#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1669
1a5ef5b7
PZ
1670#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1671#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1672
ffd6749d
PZ
1673#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1674#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1675
4be73780 1676static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1677 u32 mask,
1678 u32 value)
bd943159 1679{
30add22d 1680 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1681 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1682 u32 pp_stat_reg, pp_ctrl_reg;
1683
e39b999a
VS
1684 lockdep_assert_held(&dev_priv->pps_mutex);
1685
bf13e81b
JN
1686 pp_stat_reg = _pp_stat_reg(intel_dp);
1687 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1688
99ea7127 1689 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1690 mask, value,
1691 I915_READ(pp_stat_reg),
1692 I915_READ(pp_ctrl_reg));
32ce697c 1693
453c5420 1694 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1695 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1696 I915_READ(pp_stat_reg),
1697 I915_READ(pp_ctrl_reg));
32ce697c 1698 }
54c136d4
CW
1699
1700 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1701}
32ce697c 1702
4be73780 1703static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1704{
1705 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1706 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1707}
1708
4be73780 1709static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1710{
1711 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1712 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1713}
1714
4be73780 1715static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1716{
1717 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1718
1719 /* When we disable the VDD override bit last we have to do the manual
1720 * wait. */
1721 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1722 intel_dp->panel_power_cycle_delay);
1723
4be73780 1724 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1725}
1726
4be73780 1727static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1728{
1729 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1730 intel_dp->backlight_on_delay);
1731}
1732
4be73780 1733static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1734{
1735 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1736 intel_dp->backlight_off_delay);
1737}
99ea7127 1738
832dd3c1
KP
1739/* Read the current pp_control value, unlocking the register if it
1740 * is locked
1741 */
1742
453c5420 1743static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1744{
453c5420
JB
1745 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1746 struct drm_i915_private *dev_priv = dev->dev_private;
1747 u32 control;
832dd3c1 1748
e39b999a
VS
1749 lockdep_assert_held(&dev_priv->pps_mutex);
1750
bf13e81b 1751 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1752 if (!IS_BROXTON(dev)) {
1753 control &= ~PANEL_UNLOCK_MASK;
1754 control |= PANEL_UNLOCK_REGS;
1755 }
832dd3c1 1756 return control;
bd943159
KP
1757}
1758
951468f3
VS
1759/*
1760 * Must be paired with edp_panel_vdd_off().
1761 * Must hold pps_mutex around the whole on/off sequence.
1762 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1763 */
1e0560e0 1764static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1765{
30add22d 1766 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1767 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1768 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1769 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1770 enum intel_display_power_domain power_domain;
5d613501 1771 u32 pp;
453c5420 1772 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1773 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1774
e39b999a
VS
1775 lockdep_assert_held(&dev_priv->pps_mutex);
1776
97af61f5 1777 if (!is_edp(intel_dp))
adddaaf4 1778 return false;
bd943159 1779
2c623c11 1780 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1781 intel_dp->want_panel_vdd = true;
99ea7127 1782
4be73780 1783 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1784 return need_to_disable;
b0665d57 1785
4e6e1a54
ID
1786 power_domain = intel_display_port_power_domain(intel_encoder);
1787 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1788
3936fcf4
VS
1789 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1790 port_name(intel_dig_port->port));
bd943159 1791
4be73780
DV
1792 if (!edp_have_panel_power(intel_dp))
1793 wait_panel_power_cycle(intel_dp);
99ea7127 1794
453c5420 1795 pp = ironlake_get_pp_control(intel_dp);
5d613501 1796 pp |= EDP_FORCE_VDD;
ebf33b18 1797
bf13e81b
JN
1798 pp_stat_reg = _pp_stat_reg(intel_dp);
1799 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1800
1801 I915_WRITE(pp_ctrl_reg, pp);
1802 POSTING_READ(pp_ctrl_reg);
1803 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1804 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1805 /*
1806 * If the panel wasn't on, delay before accessing aux channel
1807 */
4be73780 1808 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1809 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1810 port_name(intel_dig_port->port));
f01eca2e 1811 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1812 }
adddaaf4
JN
1813
1814 return need_to_disable;
1815}
1816
951468f3
VS
1817/*
1818 * Must be paired with intel_edp_panel_vdd_off() or
1819 * intel_edp_panel_off().
1820 * Nested calls to these functions are not allowed since
1821 * we drop the lock. Caller must use some higher level
1822 * locking to prevent nested calls from other threads.
1823 */
b80d6c78 1824void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1825{
c695b6b6 1826 bool vdd;
adddaaf4 1827
c695b6b6
VS
1828 if (!is_edp(intel_dp))
1829 return;
1830
773538e8 1831 pps_lock(intel_dp);
c695b6b6 1832 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1833 pps_unlock(intel_dp);
c695b6b6 1834
e2c719b7 1835 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1836 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1837}
1838
4be73780 1839static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1840{
30add22d 1841 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1842 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1843 struct intel_digital_port *intel_dig_port =
1844 dp_to_dig_port(intel_dp);
1845 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1846 enum intel_display_power_domain power_domain;
5d613501 1847 u32 pp;
453c5420 1848 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1849
e39b999a 1850 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1851
15e899a0 1852 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1853
15e899a0 1854 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1855 return;
b0665d57 1856
3936fcf4
VS
1857 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1858 port_name(intel_dig_port->port));
bd943159 1859
be2c9196
VS
1860 pp = ironlake_get_pp_control(intel_dp);
1861 pp &= ~EDP_FORCE_VDD;
453c5420 1862
be2c9196
VS
1863 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1864 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1865
be2c9196
VS
1866 I915_WRITE(pp_ctrl_reg, pp);
1867 POSTING_READ(pp_ctrl_reg);
90791a5c 1868
be2c9196
VS
1869 /* Make sure sequencer is idle before allowing subsequent activity */
1870 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1871 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1872
be2c9196
VS
1873 if ((pp & POWER_TARGET_ON) == 0)
1874 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1875
be2c9196
VS
1876 power_domain = intel_display_port_power_domain(intel_encoder);
1877 intel_display_power_put(dev_priv, power_domain);
bd943159 1878}
5d613501 1879
4be73780 1880static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1881{
1882 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1883 struct intel_dp, panel_vdd_work);
bd943159 1884
773538e8 1885 pps_lock(intel_dp);
15e899a0
VS
1886 if (!intel_dp->want_panel_vdd)
1887 edp_panel_vdd_off_sync(intel_dp);
773538e8 1888 pps_unlock(intel_dp);
bd943159
KP
1889}
1890
aba86890
ID
1891static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1892{
1893 unsigned long delay;
1894
1895 /*
1896 * Queue the timer to fire a long time from now (relative to the power
1897 * down delay) to keep the panel power up across a sequence of
1898 * operations.
1899 */
1900 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1901 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1902}
1903
951468f3
VS
1904/*
1905 * Must be paired with edp_panel_vdd_on().
1906 * Must hold pps_mutex around the whole on/off sequence.
1907 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1908 */
4be73780 1909static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1910{
e39b999a
VS
1911 struct drm_i915_private *dev_priv =
1912 intel_dp_to_dev(intel_dp)->dev_private;
1913
1914 lockdep_assert_held(&dev_priv->pps_mutex);
1915
97af61f5
KP
1916 if (!is_edp(intel_dp))
1917 return;
5d613501 1918
e2c719b7 1919 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1920 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1921
bd943159
KP
1922 intel_dp->want_panel_vdd = false;
1923
aba86890 1924 if (sync)
4be73780 1925 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1926 else
1927 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1928}
1929
9f0fb5be 1930static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1931{
30add22d 1932 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1933 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1934 u32 pp;
453c5420 1935 u32 pp_ctrl_reg;
9934c132 1936
9f0fb5be
VS
1937 lockdep_assert_held(&dev_priv->pps_mutex);
1938
97af61f5 1939 if (!is_edp(intel_dp))
bd943159 1940 return;
99ea7127 1941
3936fcf4
VS
1942 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1943 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1944
e7a89ace
VS
1945 if (WARN(edp_have_panel_power(intel_dp),
1946 "eDP port %c panel power already on\n",
1947 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1948 return;
9934c132 1949
4be73780 1950 wait_panel_power_cycle(intel_dp);
37c6c9b0 1951
bf13e81b 1952 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1953 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1954 if (IS_GEN5(dev)) {
1955 /* ILK workaround: disable reset around power sequence */
1956 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1957 I915_WRITE(pp_ctrl_reg, pp);
1958 POSTING_READ(pp_ctrl_reg);
05ce1a49 1959 }
37c6c9b0 1960
1c0ae80a 1961 pp |= POWER_TARGET_ON;
99ea7127
KP
1962 if (!IS_GEN5(dev))
1963 pp |= PANEL_POWER_RESET;
1964
453c5420
JB
1965 I915_WRITE(pp_ctrl_reg, pp);
1966 POSTING_READ(pp_ctrl_reg);
9934c132 1967
4be73780 1968 wait_panel_on(intel_dp);
dce56b3c 1969 intel_dp->last_power_on = jiffies;
9934c132 1970
05ce1a49
KP
1971 if (IS_GEN5(dev)) {
1972 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1973 I915_WRITE(pp_ctrl_reg, pp);
1974 POSTING_READ(pp_ctrl_reg);
05ce1a49 1975 }
9f0fb5be 1976}
e39b999a 1977
9f0fb5be
VS
1978void intel_edp_panel_on(struct intel_dp *intel_dp)
1979{
1980 if (!is_edp(intel_dp))
1981 return;
1982
1983 pps_lock(intel_dp);
1984 edp_panel_on(intel_dp);
773538e8 1985 pps_unlock(intel_dp);
9934c132
JB
1986}
1987
9f0fb5be
VS
1988
1989static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1990{
4e6e1a54
ID
1991 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1992 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1993 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1994 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1995 enum intel_display_power_domain power_domain;
99ea7127 1996 u32 pp;
453c5420 1997 u32 pp_ctrl_reg;
9934c132 1998
9f0fb5be
VS
1999 lockdep_assert_held(&dev_priv->pps_mutex);
2000
97af61f5
KP
2001 if (!is_edp(intel_dp))
2002 return;
37c6c9b0 2003
3936fcf4
VS
2004 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2005 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2006
3936fcf4
VS
2007 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2008 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2009
453c5420 2010 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2011 /* We need to switch off panel power _and_ force vdd, for otherwise some
2012 * panels get very unhappy and cease to work. */
b3064154
PJ
2013 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2014 EDP_BLC_ENABLE);
453c5420 2015
bf13e81b 2016 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2017
849e39f5
PZ
2018 intel_dp->want_panel_vdd = false;
2019
453c5420
JB
2020 I915_WRITE(pp_ctrl_reg, pp);
2021 POSTING_READ(pp_ctrl_reg);
9934c132 2022
dce56b3c 2023 intel_dp->last_power_cycle = jiffies;
4be73780 2024 wait_panel_off(intel_dp);
849e39f5
PZ
2025
2026 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2027 power_domain = intel_display_port_power_domain(intel_encoder);
2028 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2029}
e39b999a 2030
9f0fb5be
VS
2031void intel_edp_panel_off(struct intel_dp *intel_dp)
2032{
2033 if (!is_edp(intel_dp))
2034 return;
e39b999a 2035
9f0fb5be
VS
2036 pps_lock(intel_dp);
2037 edp_panel_off(intel_dp);
773538e8 2038 pps_unlock(intel_dp);
9934c132
JB
2039}
2040
1250d107
JN
2041/* Enable backlight in the panel power control. */
2042static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2043{
da63a9f2
PZ
2044 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2045 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2046 struct drm_i915_private *dev_priv = dev->dev_private;
2047 u32 pp;
453c5420 2048 u32 pp_ctrl_reg;
32f9d658 2049
01cb9ea6
JB
2050 /*
2051 * If we enable the backlight right away following a panel power
2052 * on, we may see slight flicker as the panel syncs with the eDP
2053 * link. So delay a bit to make sure the image is solid before
2054 * allowing it to appear.
2055 */
4be73780 2056 wait_backlight_on(intel_dp);
e39b999a 2057
773538e8 2058 pps_lock(intel_dp);
e39b999a 2059
453c5420 2060 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2061 pp |= EDP_BLC_ENABLE;
453c5420 2062
bf13e81b 2063 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2064
2065 I915_WRITE(pp_ctrl_reg, pp);
2066 POSTING_READ(pp_ctrl_reg);
e39b999a 2067
773538e8 2068 pps_unlock(intel_dp);
32f9d658
ZW
2069}
2070
1250d107
JN
2071/* Enable backlight PWM and backlight PP control. */
2072void intel_edp_backlight_on(struct intel_dp *intel_dp)
2073{
2074 if (!is_edp(intel_dp))
2075 return;
2076
2077 DRM_DEBUG_KMS("\n");
2078
2079 intel_panel_enable_backlight(intel_dp->attached_connector);
2080 _intel_edp_backlight_on(intel_dp);
2081}
2082
2083/* Disable backlight in the panel power control. */
2084static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2085{
30add22d 2086 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2087 struct drm_i915_private *dev_priv = dev->dev_private;
2088 u32 pp;
453c5420 2089 u32 pp_ctrl_reg;
32f9d658 2090
f01eca2e
KP
2091 if (!is_edp(intel_dp))
2092 return;
2093
773538e8 2094 pps_lock(intel_dp);
e39b999a 2095
453c5420 2096 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2097 pp &= ~EDP_BLC_ENABLE;
453c5420 2098
bf13e81b 2099 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2100
2101 I915_WRITE(pp_ctrl_reg, pp);
2102 POSTING_READ(pp_ctrl_reg);
f7d2323c 2103
773538e8 2104 pps_unlock(intel_dp);
e39b999a
VS
2105
2106 intel_dp->last_backlight_off = jiffies;
f7d2323c 2107 edp_wait_backlight_off(intel_dp);
1250d107 2108}
f7d2323c 2109
1250d107
JN
2110/* Disable backlight PP control and backlight PWM. */
2111void intel_edp_backlight_off(struct intel_dp *intel_dp)
2112{
2113 if (!is_edp(intel_dp))
2114 return;
2115
2116 DRM_DEBUG_KMS("\n");
f7d2323c 2117
1250d107 2118 _intel_edp_backlight_off(intel_dp);
f7d2323c 2119 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2120}
a4fc5ed6 2121
73580fb7
JN
2122/*
2123 * Hook for controlling the panel power control backlight through the bl_power
2124 * sysfs attribute. Take care to handle multiple calls.
2125 */
2126static void intel_edp_backlight_power(struct intel_connector *connector,
2127 bool enable)
2128{
2129 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2130 bool is_enabled;
2131
773538e8 2132 pps_lock(intel_dp);
e39b999a 2133 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2134 pps_unlock(intel_dp);
73580fb7
JN
2135
2136 if (is_enabled == enable)
2137 return;
2138
23ba9373
JN
2139 DRM_DEBUG_KMS("panel power control backlight %s\n",
2140 enable ? "enable" : "disable");
73580fb7
JN
2141
2142 if (enable)
2143 _intel_edp_backlight_on(intel_dp);
2144 else
2145 _intel_edp_backlight_off(intel_dp);
2146}
2147
64e1077a
VS
2148static const char *state_string(bool enabled)
2149{
2150 return enabled ? "on" : "off";
2151}
2152
2153static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2154{
2155 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2156 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2157 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2158
2159 I915_STATE_WARN(cur_state != state,
2160 "DP port %c state assertion failure (expected %s, current %s)\n",
2161 port_name(dig_port->port),
2162 state_string(state), state_string(cur_state));
2163}
2164#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2165
2166static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2167{
2168 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2169
2170 I915_STATE_WARN(cur_state != state,
2171 "eDP PLL state assertion failure (expected %s, current %s)\n",
2172 state_string(state), state_string(cur_state));
2173}
2174#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2175#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2176
2bd2ad64 2177static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2178{
da63a9f2 2179 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2180 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2181 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f
JB
2182 u32 dpa_ctl;
2183
64e1077a
VS
2184 assert_pipe_disabled(dev_priv, crtc->pipe);
2185 assert_dp_port_disabled(intel_dp);
2186 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2187
d240f20f
JB
2188 DRM_DEBUG_KMS("\n");
2189 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2190
2191 /* We don't adjust intel_dp->DP while tearing down the link, to
2192 * facilitate link retraining (e.g. after hotplug). Hence clear all
2193 * enable bits here to ensure that we don't enable too much. */
2194 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2195 intel_dp->DP |= DP_PLL_ENABLE;
2196 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2197 POSTING_READ(DP_A);
2198 udelay(200);
d240f20f
JB
2199}
2200
2bd2ad64 2201static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2202{
da63a9f2 2203 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2204 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2205 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f
JB
2206 u32 dpa_ctl;
2207
64e1077a
VS
2208 assert_pipe_disabled(dev_priv, crtc->pipe);
2209 assert_dp_port_disabled(intel_dp);
2210 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2211
d240f20f 2212 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2213
2214 /* We can't rely on the value tracked for the DP register in
2215 * intel_dp->DP because link_down must not change that (otherwise link
2216 * re-training will fail. */
298b0b39 2217 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2218 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2219 POSTING_READ(DP_A);
d240f20f
JB
2220 udelay(200);
2221}
2222
c7ad3810 2223/* If the sink supports it, try to set the power state appropriately */
c19b0669 2224void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2225{
2226 int ret, i;
2227
2228 /* Should have a valid DPCD by this point */
2229 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2230 return;
2231
2232 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2233 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2234 DP_SET_POWER_D3);
c7ad3810
JB
2235 } else {
2236 /*
2237 * When turning on, we need to retry for 1ms to give the sink
2238 * time to wake up.
2239 */
2240 for (i = 0; i < 3; i++) {
9d1a1031
JN
2241 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2242 DP_SET_POWER_D0);
c7ad3810
JB
2243 if (ret == 1)
2244 break;
2245 msleep(1);
2246 }
2247 }
f9cac721
JN
2248
2249 if (ret != 1)
2250 DRM_DEBUG_KMS("failed to %s sink power state\n",
2251 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2252}
2253
19d8fe15
DV
2254static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2255 enum pipe *pipe)
d240f20f 2256{
19d8fe15 2257 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2258 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2259 struct drm_device *dev = encoder->base.dev;
2260 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2261 enum intel_display_power_domain power_domain;
2262 u32 tmp;
2263
2264 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2265 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2266 return false;
2267
2268 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2269
2270 if (!(tmp & DP_PORT_EN))
2271 return false;
2272
39e5fa88 2273 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2274 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2275 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2276 enum pipe p;
19d8fe15 2277
adc289d7
VS
2278 for_each_pipe(dev_priv, p) {
2279 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2280 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2281 *pipe = p;
19d8fe15
DV
2282 return true;
2283 }
2284 }
19d8fe15 2285
4a0833ec
DV
2286 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2287 intel_dp->output_reg);
39e5fa88
VS
2288 } else if (IS_CHERRYVIEW(dev)) {
2289 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2290 } else {
2291 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2292 }
d240f20f 2293
19d8fe15
DV
2294 return true;
2295}
d240f20f 2296
045ac3b5 2297static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2298 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2299{
2300 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2301 u32 tmp, flags = 0;
63000ef6
XZ
2302 struct drm_device *dev = encoder->base.dev;
2303 struct drm_i915_private *dev_priv = dev->dev_private;
2304 enum port port = dp_to_dig_port(intel_dp)->port;
2305 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2306 int dotclock;
045ac3b5 2307
9ed109a7 2308 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2309
2310 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2311
39e5fa88 2312 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2313 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2314
2315 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2316 flags |= DRM_MODE_FLAG_PHSYNC;
2317 else
2318 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2319
b81e34c2 2320 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2321 flags |= DRM_MODE_FLAG_PVSYNC;
2322 else
2323 flags |= DRM_MODE_FLAG_NVSYNC;
2324 } else {
39e5fa88 2325 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2326 flags |= DRM_MODE_FLAG_PHSYNC;
2327 else
2328 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2329
39e5fa88 2330 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2331 flags |= DRM_MODE_FLAG_PVSYNC;
2332 else
2333 flags |= DRM_MODE_FLAG_NVSYNC;
2334 }
045ac3b5 2335
2d112de7 2336 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2337
8c875fca
VS
2338 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2339 tmp & DP_COLOR_RANGE_16_235)
2340 pipe_config->limited_color_range = true;
2341
eb14cb74
VS
2342 pipe_config->has_dp_encoder = true;
2343
90a6b7b0
VS
2344 pipe_config->lane_count =
2345 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2346
eb14cb74
VS
2347 intel_dp_get_m_n(crtc, pipe_config);
2348
18442d08 2349 if (port == PORT_A) {
b377e0df 2350 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2351 pipe_config->port_clock = 162000;
2352 else
2353 pipe_config->port_clock = 270000;
2354 }
18442d08
VS
2355
2356 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2357 &pipe_config->dp_m_n);
2358
2359 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2360 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2361
2d112de7 2362 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2363
c6cd2ee2
JN
2364 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2365 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2366 /*
2367 * This is a big fat ugly hack.
2368 *
2369 * Some machines in UEFI boot mode provide us a VBT that has 18
2370 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2371 * unknown we fail to light up. Yet the same BIOS boots up with
2372 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2373 * max, not what it tells us to use.
2374 *
2375 * Note: This will still be broken if the eDP panel is not lit
2376 * up by the BIOS, and thus we can't get the mode at module
2377 * load.
2378 */
2379 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2380 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2381 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2382 }
045ac3b5
JB
2383}
2384
e8cb4558 2385static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2386{
e8cb4558 2387 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2388 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2389 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2390
6e3c9717 2391 if (crtc->config->has_audio)
495a5bb8 2392 intel_audio_codec_disable(encoder);
6cb49835 2393
b32c6f48
RV
2394 if (HAS_PSR(dev) && !HAS_DDI(dev))
2395 intel_psr_disable(intel_dp);
2396
6cb49835
DV
2397 /* Make sure the panel is off before trying to change the mode. But also
2398 * ensure that we have vdd while we switch off the panel. */
24f3e092 2399 intel_edp_panel_vdd_on(intel_dp);
4be73780 2400 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2401 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2402 intel_edp_panel_off(intel_dp);
3739850b 2403
08aff3fe
VS
2404 /* disable the port before the pipe on g4x */
2405 if (INTEL_INFO(dev)->gen < 5)
3739850b 2406 intel_dp_link_down(intel_dp);
d240f20f
JB
2407}
2408
08aff3fe 2409static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2410{
2bd2ad64 2411 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2412 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2413
49277c31 2414 intel_dp_link_down(intel_dp);
08aff3fe
VS
2415 if (port == PORT_A)
2416 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2417}
2418
2419static void vlv_post_disable_dp(struct intel_encoder *encoder)
2420{
2421 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2422
2423 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2424}
2425
a8f327fb
VS
2426static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2427 bool reset)
580d3811 2428{
a8f327fb
VS
2429 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2430 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2431 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2432 enum pipe pipe = crtc->pipe;
2433 uint32_t val;
580d3811 2434
a8f327fb
VS
2435 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2436 if (reset)
2437 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2438 else
2439 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2440 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2441
a8f327fb
VS
2442 if (crtc->config->lane_count > 2) {
2443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2444 if (reset)
2445 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2446 else
2447 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2448 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2449 }
580d3811 2450
97fd4d5c 2451 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2452 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2453 if (reset)
2454 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2455 else
2456 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2457 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2458
a8f327fb 2459 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2460 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2461 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2462 if (reset)
2463 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2464 else
2465 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2466 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2467 }
a8f327fb 2468}
97fd4d5c 2469
a8f327fb
VS
2470static void chv_post_disable_dp(struct intel_encoder *encoder)
2471{
2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473 struct drm_device *dev = encoder->base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2475
a8f327fb
VS
2476 intel_dp_link_down(intel_dp);
2477
2478 mutex_lock(&dev_priv->sb_lock);
2479
2480 /* Assert data lane reset */
2481 chv_data_lane_soft_reset(encoder, true);
580d3811 2482
a580516d 2483 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2484}
2485
7b13b58a
VS
2486static void
2487_intel_dp_set_link_train(struct intel_dp *intel_dp,
2488 uint32_t *DP,
2489 uint8_t dp_train_pat)
2490{
2491 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2492 struct drm_device *dev = intel_dig_port->base.base.dev;
2493 struct drm_i915_private *dev_priv = dev->dev_private;
2494 enum port port = intel_dig_port->port;
2495
2496 if (HAS_DDI(dev)) {
2497 uint32_t temp = I915_READ(DP_TP_CTL(port));
2498
2499 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2500 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2501 else
2502 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2503
2504 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2505 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2506 case DP_TRAINING_PATTERN_DISABLE:
2507 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2508
2509 break;
2510 case DP_TRAINING_PATTERN_1:
2511 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2512 break;
2513 case DP_TRAINING_PATTERN_2:
2514 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2515 break;
2516 case DP_TRAINING_PATTERN_3:
2517 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2518 break;
2519 }
2520 I915_WRITE(DP_TP_CTL(port), temp);
2521
39e5fa88
VS
2522 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2523 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2524 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2525
2526 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2527 case DP_TRAINING_PATTERN_DISABLE:
2528 *DP |= DP_LINK_TRAIN_OFF_CPT;
2529 break;
2530 case DP_TRAINING_PATTERN_1:
2531 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2532 break;
2533 case DP_TRAINING_PATTERN_2:
2534 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2535 break;
2536 case DP_TRAINING_PATTERN_3:
2537 DRM_ERROR("DP training pattern 3 not supported\n");
2538 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2539 break;
2540 }
2541
2542 } else {
2543 if (IS_CHERRYVIEW(dev))
2544 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2545 else
2546 *DP &= ~DP_LINK_TRAIN_MASK;
2547
2548 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2549 case DP_TRAINING_PATTERN_DISABLE:
2550 *DP |= DP_LINK_TRAIN_OFF;
2551 break;
2552 case DP_TRAINING_PATTERN_1:
2553 *DP |= DP_LINK_TRAIN_PAT_1;
2554 break;
2555 case DP_TRAINING_PATTERN_2:
2556 *DP |= DP_LINK_TRAIN_PAT_2;
2557 break;
2558 case DP_TRAINING_PATTERN_3:
2559 if (IS_CHERRYVIEW(dev)) {
2560 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2561 } else {
2562 DRM_ERROR("DP training pattern 3 not supported\n");
2563 *DP |= DP_LINK_TRAIN_PAT_2;
2564 }
2565 break;
2566 }
2567 }
2568}
2569
2570static void intel_dp_enable_port(struct intel_dp *intel_dp)
2571{
2572 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2573 struct drm_i915_private *dev_priv = dev->dev_private;
2574
7b13b58a
VS
2575 /* enable with pattern 1 (as per spec) */
2576 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2577 DP_TRAINING_PATTERN_1);
2578
2579 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2580 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2581
2582 /*
2583 * Magic for VLV/CHV. We _must_ first set up the register
2584 * without actually enabling the port, and then do another
2585 * write to enable the port. Otherwise link training will
2586 * fail when the power sequencer is freshly used for this port.
2587 */
2588 intel_dp->DP |= DP_PORT_EN;
2589
2590 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2591 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2592}
2593
e8cb4558 2594static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2595{
e8cb4558
DV
2596 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2597 struct drm_device *dev = encoder->base.dev;
2598 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2599 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2600 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2601 enum port port = dp_to_dig_port(intel_dp)->port;
2602 enum pipe pipe = crtc->pipe;
5d613501 2603
0c33d8d7
DV
2604 if (WARN_ON(dp_reg & DP_PORT_EN))
2605 return;
5d613501 2606
093e3f13
VS
2607 pps_lock(intel_dp);
2608
2609 if (IS_VALLEYVIEW(dev))
2610 vlv_init_panel_power_sequencer(intel_dp);
2611
7b13b58a 2612 intel_dp_enable_port(intel_dp);
093e3f13 2613
d6fbdd15
VS
2614 if (port == PORT_A && IS_GEN5(dev_priv)) {
2615 /*
2616 * Underrun reporting for the other pipe was disabled in
2617 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2618 * enabled, so it's now safe to re-enable underrun reporting.
2619 */
2620 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2621 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2622 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2623 }
2624
093e3f13
VS
2625 edp_panel_vdd_on(intel_dp);
2626 edp_panel_on(intel_dp);
2627 edp_panel_vdd_off(intel_dp, true);
2628
2629 pps_unlock(intel_dp);
2630
e0fce78f
VS
2631 if (IS_VALLEYVIEW(dev)) {
2632 unsigned int lane_mask = 0x0;
2633
2634 if (IS_CHERRYVIEW(dev))
2635 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2636
9b6de0a1
VS
2637 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2638 lane_mask);
e0fce78f 2639 }
61234fa5 2640
f01eca2e 2641 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2642 intel_dp_start_link_train(intel_dp);
3ab9c637 2643 intel_dp_stop_link_train(intel_dp);
c1dec79a 2644
6e3c9717 2645 if (crtc->config->has_audio) {
c1dec79a 2646 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2647 pipe_name(pipe));
c1dec79a
JN
2648 intel_audio_codec_enable(encoder);
2649 }
ab1f90f9 2650}
89b667f8 2651
ecff4f3b
JN
2652static void g4x_enable_dp(struct intel_encoder *encoder)
2653{
828f5c6e
JN
2654 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2655
ecff4f3b 2656 intel_enable_dp(encoder);
4be73780 2657 intel_edp_backlight_on(intel_dp);
ab1f90f9 2658}
89b667f8 2659
ab1f90f9
JN
2660static void vlv_enable_dp(struct intel_encoder *encoder)
2661{
828f5c6e
JN
2662 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2663
4be73780 2664 intel_edp_backlight_on(intel_dp);
b32c6f48 2665 intel_psr_enable(intel_dp);
d240f20f
JB
2666}
2667
ecff4f3b 2668static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2669{
d6fbdd15 2670 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2671 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2672 enum port port = dp_to_dig_port(intel_dp)->port;
2673 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2674
8ac33ed3
DV
2675 intel_dp_prepare(encoder);
2676
d6fbdd15
VS
2677 if (port == PORT_A && IS_GEN5(dev_priv)) {
2678 /*
2679 * We get FIFO underruns on the other pipe when
2680 * enabling the CPU eDP PLL, and when enabling CPU
2681 * eDP port. We could potentially avoid the PLL
2682 * underrun with a vblank wait just prior to enabling
2683 * the PLL, but that doesn't appear to help the port
2684 * enable case. Just sweep it all under the rug.
2685 */
2686 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2687 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2688 }
2689
d41f1efb 2690 /* Only ilk+ has port A */
d6fbdd15 2691 if (port == PORT_A) {
d41f1efb 2692 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2693 ironlake_edp_pll_on(intel_dp);
d41f1efb 2694 }
ab1f90f9
JN
2695}
2696
83b84597
VS
2697static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2698{
2699 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2700 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2701 enum pipe pipe = intel_dp->pps_pipe;
2702 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2703
2704 edp_panel_vdd_off_sync(intel_dp);
2705
2706 /*
2707 * VLV seems to get confused when multiple power seqeuencers
2708 * have the same port selected (even if only one has power/vdd
2709 * enabled). The failure manifests as vlv_wait_port_ready() failing
2710 * CHV on the other hand doesn't seem to mind having the same port
2711 * selected in multiple power seqeuencers, but let's clear the
2712 * port select always when logically disconnecting a power sequencer
2713 * from a port.
2714 */
2715 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2716 pipe_name(pipe), port_name(intel_dig_port->port));
2717 I915_WRITE(pp_on_reg, 0);
2718 POSTING_READ(pp_on_reg);
2719
2720 intel_dp->pps_pipe = INVALID_PIPE;
2721}
2722
a4a5d2f8
VS
2723static void vlv_steal_power_sequencer(struct drm_device *dev,
2724 enum pipe pipe)
2725{
2726 struct drm_i915_private *dev_priv = dev->dev_private;
2727 struct intel_encoder *encoder;
2728
2729 lockdep_assert_held(&dev_priv->pps_mutex);
2730
ac3c12e4
VS
2731 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2732 return;
2733
a4a5d2f8
VS
2734 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2735 base.head) {
2736 struct intel_dp *intel_dp;
773538e8 2737 enum port port;
a4a5d2f8
VS
2738
2739 if (encoder->type != INTEL_OUTPUT_EDP)
2740 continue;
2741
2742 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2743 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2744
2745 if (intel_dp->pps_pipe != pipe)
2746 continue;
2747
2748 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2749 pipe_name(pipe), port_name(port));
a4a5d2f8 2750
e02f9a06 2751 WARN(encoder->base.crtc,
034e43c6
VS
2752 "stealing pipe %c power sequencer from active eDP port %c\n",
2753 pipe_name(pipe), port_name(port));
a4a5d2f8 2754
a4a5d2f8 2755 /* make sure vdd is off before we steal it */
83b84597 2756 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2757 }
2758}
2759
2760static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2761{
2762 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2763 struct intel_encoder *encoder = &intel_dig_port->base;
2764 struct drm_device *dev = encoder->base.dev;
2765 struct drm_i915_private *dev_priv = dev->dev_private;
2766 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2767
2768 lockdep_assert_held(&dev_priv->pps_mutex);
2769
093e3f13
VS
2770 if (!is_edp(intel_dp))
2771 return;
2772
a4a5d2f8
VS
2773 if (intel_dp->pps_pipe == crtc->pipe)
2774 return;
2775
2776 /*
2777 * If another power sequencer was being used on this
2778 * port previously make sure to turn off vdd there while
2779 * we still have control of it.
2780 */
2781 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2782 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2783
2784 /*
2785 * We may be stealing the power
2786 * sequencer from another port.
2787 */
2788 vlv_steal_power_sequencer(dev, crtc->pipe);
2789
2790 /* now it's all ours */
2791 intel_dp->pps_pipe = crtc->pipe;
2792
2793 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2794 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2795
2796 /* init power sequencer on this pipe and port */
36b5f425
VS
2797 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2798 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2799}
2800
ab1f90f9 2801static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2802{
2bd2ad64 2803 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2804 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2805 struct drm_device *dev = encoder->base.dev;
89b667f8 2806 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2807 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2808 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2809 int pipe = intel_crtc->pipe;
2810 u32 val;
a4fc5ed6 2811
a580516d 2812 mutex_lock(&dev_priv->sb_lock);
89b667f8 2813
ab3c759a 2814 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2815 val = 0;
2816 if (pipe)
2817 val |= (1<<21);
2818 else
2819 val &= ~(1<<21);
2820 val |= 0x001000c4;
ab3c759a
CML
2821 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2822 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2823 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2824
a580516d 2825 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2826
2827 intel_enable_dp(encoder);
89b667f8
JB
2828}
2829
ecff4f3b 2830static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2831{
2832 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2833 struct drm_device *dev = encoder->base.dev;
2834 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2835 struct intel_crtc *intel_crtc =
2836 to_intel_crtc(encoder->base.crtc);
e4607fcf 2837 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2838 int pipe = intel_crtc->pipe;
89b667f8 2839
8ac33ed3
DV
2840 intel_dp_prepare(encoder);
2841
89b667f8 2842 /* Program Tx lane resets to default */
a580516d 2843 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2844 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2845 DPIO_PCS_TX_LANE2_RESET |
2846 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2847 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2848 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2849 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2850 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2851 DPIO_PCS_CLK_SOFT_RESET);
2852
2853 /* Fix up inter-pair skew failure */
ab3c759a
CML
2854 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2855 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2856 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2857 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2858}
2859
e4a1d846
CML
2860static void chv_pre_enable_dp(struct intel_encoder *encoder)
2861{
2862 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2863 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2864 struct drm_device *dev = encoder->base.dev;
2865 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2866 struct intel_crtc *intel_crtc =
2867 to_intel_crtc(encoder->base.crtc);
2868 enum dpio_channel ch = vlv_dport_to_channel(dport);
2869 int pipe = intel_crtc->pipe;
2e523e98 2870 int data, i, stagger;
949c1d43 2871 u32 val;
e4a1d846 2872
a580516d 2873 mutex_lock(&dev_priv->sb_lock);
949c1d43 2874
570e2a74
VS
2875 /* allow hardware to manage TX FIFO reset source */
2876 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2877 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2878 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2879
e0fce78f
VS
2880 if (intel_crtc->config->lane_count > 2) {
2881 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2882 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2883 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2884 }
570e2a74 2885
949c1d43 2886 /* Program Tx lane latency optimal setting*/
e0fce78f 2887 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 2888 /* Set the upar bit */
e0fce78f
VS
2889 if (intel_crtc->config->lane_count == 1)
2890 data = 0x0;
2891 else
2892 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
2893 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2894 data << DPIO_UPAR_SHIFT);
2895 }
2896
2897 /* Data lane stagger programming */
2e523e98
VS
2898 if (intel_crtc->config->port_clock > 270000)
2899 stagger = 0x18;
2900 else if (intel_crtc->config->port_clock > 135000)
2901 stagger = 0xd;
2902 else if (intel_crtc->config->port_clock > 67500)
2903 stagger = 0x7;
2904 else if (intel_crtc->config->port_clock > 33750)
2905 stagger = 0x4;
2906 else
2907 stagger = 0x2;
2908
2909 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2910 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2911 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2912
e0fce78f
VS
2913 if (intel_crtc->config->lane_count > 2) {
2914 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2915 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2916 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2917 }
2e523e98
VS
2918
2919 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2920 DPIO_LANESTAGGER_STRAP(stagger) |
2921 DPIO_LANESTAGGER_STRAP_OVRD |
2922 DPIO_TX1_STAGGER_MASK(0x1f) |
2923 DPIO_TX1_STAGGER_MULT(6) |
2924 DPIO_TX2_STAGGER_MULT(0));
2925
e0fce78f
VS
2926 if (intel_crtc->config->lane_count > 2) {
2927 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2928 DPIO_LANESTAGGER_STRAP(stagger) |
2929 DPIO_LANESTAGGER_STRAP_OVRD |
2930 DPIO_TX1_STAGGER_MASK(0x1f) |
2931 DPIO_TX1_STAGGER_MULT(7) |
2932 DPIO_TX2_STAGGER_MULT(5));
2933 }
e4a1d846 2934
a8f327fb
VS
2935 /* Deassert data lane reset */
2936 chv_data_lane_soft_reset(encoder, false);
2937
a580516d 2938 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2939
e4a1d846 2940 intel_enable_dp(encoder);
b0b33846
VS
2941
2942 /* Second common lane will stay alive on its own now */
2943 if (dport->release_cl2_override) {
2944 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2945 dport->release_cl2_override = false;
2946 }
e4a1d846
CML
2947}
2948
9197c88b
VS
2949static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2950{
2951 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2952 struct drm_device *dev = encoder->base.dev;
2953 struct drm_i915_private *dev_priv = dev->dev_private;
2954 struct intel_crtc *intel_crtc =
2955 to_intel_crtc(encoder->base.crtc);
2956 enum dpio_channel ch = vlv_dport_to_channel(dport);
2957 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
2958 unsigned int lane_mask =
2959 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
2960 u32 val;
2961
625695f8
VS
2962 intel_dp_prepare(encoder);
2963
b0b33846
VS
2964 /*
2965 * Must trick the second common lane into life.
2966 * Otherwise we can't even access the PLL.
2967 */
2968 if (ch == DPIO_CH0 && pipe == PIPE_B)
2969 dport->release_cl2_override =
2970 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2971
e0fce78f
VS
2972 chv_phy_powergate_lanes(encoder, true, lane_mask);
2973
a580516d 2974 mutex_lock(&dev_priv->sb_lock);
9197c88b 2975
a8f327fb
VS
2976 /* Assert data lane reset */
2977 chv_data_lane_soft_reset(encoder, true);
2978
b9e5ac3c
VS
2979 /* program left/right clock distribution */
2980 if (pipe != PIPE_B) {
2981 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2982 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2983 if (ch == DPIO_CH0)
2984 val |= CHV_BUFLEFTENA1_FORCE;
2985 if (ch == DPIO_CH1)
2986 val |= CHV_BUFRIGHTENA1_FORCE;
2987 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2988 } else {
2989 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2990 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2991 if (ch == DPIO_CH0)
2992 val |= CHV_BUFLEFTENA2_FORCE;
2993 if (ch == DPIO_CH1)
2994 val |= CHV_BUFRIGHTENA2_FORCE;
2995 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2996 }
2997
9197c88b
VS
2998 /* program clock channel usage */
2999 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3000 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3001 if (pipe != PIPE_B)
3002 val &= ~CHV_PCS_USEDCLKCHANNEL;
3003 else
3004 val |= CHV_PCS_USEDCLKCHANNEL;
3005 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3006
e0fce78f
VS
3007 if (intel_crtc->config->lane_count > 2) {
3008 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3009 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3010 if (pipe != PIPE_B)
3011 val &= ~CHV_PCS_USEDCLKCHANNEL;
3012 else
3013 val |= CHV_PCS_USEDCLKCHANNEL;
3014 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3015 }
9197c88b
VS
3016
3017 /*
3018 * This a a bit weird since generally CL
3019 * matches the pipe, but here we need to
3020 * pick the CL based on the port.
3021 */
3022 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3023 if (pipe != PIPE_B)
3024 val &= ~CHV_CMN_USEDCLKCHANNEL;
3025 else
3026 val |= CHV_CMN_USEDCLKCHANNEL;
3027 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3028
a580516d 3029 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3030}
3031
d6db995f
VS
3032static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3033{
3034 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3035 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3036 u32 val;
3037
3038 mutex_lock(&dev_priv->sb_lock);
3039
3040 /* disable left/right clock distribution */
3041 if (pipe != PIPE_B) {
3042 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3043 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3044 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3045 } else {
3046 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3047 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3048 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3049 }
3050
3051 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3052
b0b33846
VS
3053 /*
3054 * Leave the power down bit cleared for at least one
3055 * lane so that chv_powergate_phy_ch() will power
3056 * on something when the channel is otherwise unused.
3057 * When the port is off and the override is removed
3058 * the lanes power down anyway, so otherwise it doesn't
3059 * really matter what the state of power down bits is
3060 * after this.
3061 */
e0fce78f 3062 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3063}
3064
a4fc5ed6 3065/*
df0c237d
JB
3066 * Native read with retry for link status and receiver capability reads for
3067 * cases where the sink may still be asleep.
9d1a1031
JN
3068 *
3069 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3070 * supposed to retry 3 times per the spec.
a4fc5ed6 3071 */
9d1a1031
JN
3072static ssize_t
3073intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3074 void *buffer, size_t size)
a4fc5ed6 3075{
9d1a1031
JN
3076 ssize_t ret;
3077 int i;
61da5fab 3078
f6a19066
VS
3079 /*
3080 * Sometime we just get the same incorrect byte repeated
3081 * over the entire buffer. Doing just one throw away read
3082 * initially seems to "solve" it.
3083 */
3084 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3085
61da5fab 3086 for (i = 0; i < 3; i++) {
9d1a1031
JN
3087 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3088 if (ret == size)
3089 return ret;
61da5fab
JB
3090 msleep(1);
3091 }
a4fc5ed6 3092
9d1a1031 3093 return ret;
a4fc5ed6
KP
3094}
3095
3096/*
3097 * Fetch AUX CH registers 0x202 - 0x207 which contain
3098 * link status information
3099 */
94223d04 3100bool
93f62dad 3101intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3102{
9d1a1031
JN
3103 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3104 DP_LANE0_1_STATUS,
3105 link_status,
3106 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3107}
3108
1100244e 3109/* These are source-specific values. */
94223d04 3110uint8_t
1a2eb460 3111intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3112{
30add22d 3113 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3114 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3115 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3116
9314726b
VK
3117 if (IS_BROXTON(dev))
3118 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3119 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3120 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3121 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3122 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 3123 } else if (IS_VALLEYVIEW(dev))
bd60018a 3124 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3125 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3126 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3127 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3128 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3129 else
bd60018a 3130 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3131}
3132
94223d04 3133uint8_t
1a2eb460
KP
3134intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3135{
30add22d 3136 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3137 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3138
5a9d1f1a
DL
3139 if (INTEL_INFO(dev)->gen >= 9) {
3140 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3144 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3145 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3146 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3148 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3149 default:
3150 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3151 }
3152 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3153 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3154 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3155 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3156 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3157 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3158 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3159 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3160 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3161 default:
bd60018a 3162 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3163 }
e2fa6fba
P
3164 } else if (IS_VALLEYVIEW(dev)) {
3165 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3166 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3167 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3169 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3171 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3173 default:
bd60018a 3174 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3175 }
bc7d38a4 3176 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3177 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3179 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3180 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3181 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3182 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3183 default:
bd60018a 3184 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3185 }
3186 } else {
3187 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3188 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3189 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3191 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3192 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3193 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3195 default:
bd60018a 3196 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3197 }
a4fc5ed6
KP
3198 }
3199}
3200
5829975c 3201static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3202{
3203 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3204 struct drm_i915_private *dev_priv = dev->dev_private;
3205 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3206 struct intel_crtc *intel_crtc =
3207 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3208 unsigned long demph_reg_value, preemph_reg_value,
3209 uniqtranscale_reg_value;
3210 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3211 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3212 int pipe = intel_crtc->pipe;
e2fa6fba
P
3213
3214 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3215 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3216 preemph_reg_value = 0x0004000;
3217 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3218 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3219 demph_reg_value = 0x2B405555;
3220 uniqtranscale_reg_value = 0x552AB83A;
3221 break;
bd60018a 3222 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3223 demph_reg_value = 0x2B404040;
3224 uniqtranscale_reg_value = 0x5548B83A;
3225 break;
bd60018a 3226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3227 demph_reg_value = 0x2B245555;
3228 uniqtranscale_reg_value = 0x5560B83A;
3229 break;
bd60018a 3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3231 demph_reg_value = 0x2B405555;
3232 uniqtranscale_reg_value = 0x5598DA3A;
3233 break;
3234 default:
3235 return 0;
3236 }
3237 break;
bd60018a 3238 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3239 preemph_reg_value = 0x0002000;
3240 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3242 demph_reg_value = 0x2B404040;
3243 uniqtranscale_reg_value = 0x5552B83A;
3244 break;
bd60018a 3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3246 demph_reg_value = 0x2B404848;
3247 uniqtranscale_reg_value = 0x5580B83A;
3248 break;
bd60018a 3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3250 demph_reg_value = 0x2B404040;
3251 uniqtranscale_reg_value = 0x55ADDA3A;
3252 break;
3253 default:
3254 return 0;
3255 }
3256 break;
bd60018a 3257 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3258 preemph_reg_value = 0x0000000;
3259 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3260 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3261 demph_reg_value = 0x2B305555;
3262 uniqtranscale_reg_value = 0x5570B83A;
3263 break;
bd60018a 3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3265 demph_reg_value = 0x2B2B4040;
3266 uniqtranscale_reg_value = 0x55ADDA3A;
3267 break;
3268 default:
3269 return 0;
3270 }
3271 break;
bd60018a 3272 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3273 preemph_reg_value = 0x0006000;
3274 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3276 demph_reg_value = 0x1B405555;
3277 uniqtranscale_reg_value = 0x55ADDA3A;
3278 break;
3279 default:
3280 return 0;
3281 }
3282 break;
3283 default:
3284 return 0;
3285 }
3286
a580516d 3287 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3288 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3289 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3290 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3291 uniqtranscale_reg_value);
ab3c759a
CML
3292 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3293 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3294 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3295 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3296 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3297
3298 return 0;
3299}
3300
67fa24b4
VS
3301static bool chv_need_uniq_trans_scale(uint8_t train_set)
3302{
3303 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3304 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3305}
3306
5829975c 3307static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3308{
3309 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3310 struct drm_i915_private *dev_priv = dev->dev_private;
3311 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3312 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3313 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3314 uint8_t train_set = intel_dp->train_set[0];
3315 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3316 enum pipe pipe = intel_crtc->pipe;
3317 int i;
e4a1d846
CML
3318
3319 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3320 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3321 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3323 deemph_reg_value = 128;
3324 margin_reg_value = 52;
3325 break;
bd60018a 3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3327 deemph_reg_value = 128;
3328 margin_reg_value = 77;
3329 break;
bd60018a 3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3331 deemph_reg_value = 128;
3332 margin_reg_value = 102;
3333 break;
bd60018a 3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3335 deemph_reg_value = 128;
3336 margin_reg_value = 154;
3337 /* FIXME extra to set for 1200 */
3338 break;
3339 default:
3340 return 0;
3341 }
3342 break;
bd60018a 3343 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3344 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3346 deemph_reg_value = 85;
3347 margin_reg_value = 78;
3348 break;
bd60018a 3349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3350 deemph_reg_value = 85;
3351 margin_reg_value = 116;
3352 break;
bd60018a 3353 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3354 deemph_reg_value = 85;
3355 margin_reg_value = 154;
3356 break;
3357 default:
3358 return 0;
3359 }
3360 break;
bd60018a 3361 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3362 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3364 deemph_reg_value = 64;
3365 margin_reg_value = 104;
3366 break;
bd60018a 3367 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3368 deemph_reg_value = 64;
3369 margin_reg_value = 154;
3370 break;
3371 default:
3372 return 0;
3373 }
3374 break;
bd60018a 3375 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3376 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3378 deemph_reg_value = 43;
3379 margin_reg_value = 154;
3380 break;
3381 default:
3382 return 0;
3383 }
3384 break;
3385 default:
3386 return 0;
3387 }
3388
a580516d 3389 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3390
3391 /* Clear calc init */
1966e59e
VS
3392 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3393 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3394 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3395 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3396 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3397
e0fce78f
VS
3398 if (intel_crtc->config->lane_count > 2) {
3399 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3400 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3401 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3402 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3403 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3404 }
e4a1d846 3405
a02ef3c7
VS
3406 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3407 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3408 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3409 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3410
e0fce78f
VS
3411 if (intel_crtc->config->lane_count > 2) {
3412 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3413 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3414 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3415 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3416 }
a02ef3c7 3417
e4a1d846 3418 /* Program swing deemph */
e0fce78f 3419 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3420 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3421 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3422 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3423 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3424 }
e4a1d846
CML
3425
3426 /* Program swing margin */
e0fce78f 3427 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3428 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3429
1fb44505
VS
3430 val &= ~DPIO_SWING_MARGIN000_MASK;
3431 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3432
3433 /*
3434 * Supposedly this value shouldn't matter when unique transition
3435 * scale is disabled, but in fact it does matter. Let's just
3436 * always program the same value and hope it's OK.
3437 */
3438 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3439 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3440
f72df8db
VS
3441 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3442 }
e4a1d846 3443
67fa24b4
VS
3444 /*
3445 * The document said it needs to set bit 27 for ch0 and bit 26
3446 * for ch1. Might be a typo in the doc.
3447 * For now, for this unique transition scale selection, set bit
3448 * 27 for ch0 and ch1.
3449 */
e0fce78f 3450 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3451 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3452 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3453 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3454 else
3455 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3456 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3457 }
3458
3459 /* Start swing calculation */
1966e59e
VS
3460 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3461 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3462 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3463
e0fce78f
VS
3464 if (intel_crtc->config->lane_count > 2) {
3465 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3466 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3467 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3468 }
e4a1d846 3469
a580516d 3470 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3471
3472 return 0;
3473}
3474
a4fc5ed6 3475static uint32_t
5829975c 3476gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3477{
3cf2efb1 3478 uint32_t signal_levels = 0;
a4fc5ed6 3479
3cf2efb1 3480 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3481 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3482 default:
3483 signal_levels |= DP_VOLTAGE_0_4;
3484 break;
bd60018a 3485 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3486 signal_levels |= DP_VOLTAGE_0_6;
3487 break;
bd60018a 3488 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3489 signal_levels |= DP_VOLTAGE_0_8;
3490 break;
bd60018a 3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3492 signal_levels |= DP_VOLTAGE_1_2;
3493 break;
3494 }
3cf2efb1 3495 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3496 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3497 default:
3498 signal_levels |= DP_PRE_EMPHASIS_0;
3499 break;
bd60018a 3500 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3501 signal_levels |= DP_PRE_EMPHASIS_3_5;
3502 break;
bd60018a 3503 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3504 signal_levels |= DP_PRE_EMPHASIS_6;
3505 break;
bd60018a 3506 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3507 signal_levels |= DP_PRE_EMPHASIS_9_5;
3508 break;
3509 }
3510 return signal_levels;
3511}
3512
e3421a18
ZW
3513/* Gen6's DP voltage swing and pre-emphasis control */
3514static uint32_t
5829975c 3515gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3516{
3c5a62b5
YL
3517 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3518 DP_TRAIN_PRE_EMPHASIS_MASK);
3519 switch (signal_levels) {
bd60018a
SJ
3520 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3521 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3522 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3523 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3524 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3525 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3526 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3527 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3528 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3529 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3530 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3531 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3532 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3533 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3534 default:
3c5a62b5
YL
3535 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3536 "0x%x\n", signal_levels);
3537 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3538 }
3539}
3540
1a2eb460
KP
3541/* Gen7's DP voltage swing and pre-emphasis control */
3542static uint32_t
5829975c 3543gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3544{
3545 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3546 DP_TRAIN_PRE_EMPHASIS_MASK);
3547 switch (signal_levels) {
bd60018a 3548 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3549 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3550 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3551 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3552 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3553 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3554
bd60018a 3555 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3556 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3557 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3558 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3559
bd60018a 3560 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3561 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3562 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3563 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3564
3565 default:
3566 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3567 "0x%x\n", signal_levels);
3568 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3569 }
3570}
3571
94223d04 3572void
f4eb692e 3573intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3574{
3575 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3576 enum port port = intel_dig_port->port;
f0a3424e 3577 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3578 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3579 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3580 uint8_t train_set = intel_dp->train_set[0];
3581
f8896f5d
DW
3582 if (HAS_DDI(dev)) {
3583 signal_levels = ddi_signal_levels(intel_dp);
3584
3585 if (IS_BROXTON(dev))
3586 signal_levels = 0;
3587 else
3588 mask = DDI_BUF_EMP_MASK;
e4a1d846 3589 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3590 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3591 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3592 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3593 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3594 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3595 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3596 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3597 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3598 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3599 } else {
5829975c 3600 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3601 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3602 }
3603
96fb9f9b
VK
3604 if (mask)
3605 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3606
3607 DRM_DEBUG_KMS("Using vswing level %d\n",
3608 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3609 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3610 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3611 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3612
f4eb692e 3613 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3614
3615 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3616 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3617}
3618
94223d04 3619void
e9c176d5
ACO
3620intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3621 uint8_t dp_train_pat)
a4fc5ed6 3622{
174edf1f 3623 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3624 struct drm_i915_private *dev_priv =
3625 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3626
f4eb692e 3627 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3628
f4eb692e 3629 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3630 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3631}
3632
94223d04 3633void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3634{
3635 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3636 struct drm_device *dev = intel_dig_port->base.base.dev;
3637 struct drm_i915_private *dev_priv = dev->dev_private;
3638 enum port port = intel_dig_port->port;
3639 uint32_t val;
3640
3641 if (!HAS_DDI(dev))
3642 return;
3643
3644 val = I915_READ(DP_TP_CTL(port));
3645 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3646 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3647 I915_WRITE(DP_TP_CTL(port), val);
3648
3649 /*
3650 * On PORT_A we can have only eDP in SST mode. There the only reason
3651 * we need to set idle transmission mode is to work around a HW issue
3652 * where we enable the pipe while not in idle link-training mode.
3653 * In this case there is requirement to wait for a minimum number of
3654 * idle patterns to be sent.
3655 */
3656 if (port == PORT_A)
3657 return;
3658
3659 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3660 1))
3661 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3662}
3663
a4fc5ed6 3664static void
ea5b213a 3665intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3666{
da63a9f2 3667 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3668 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3669 enum port port = intel_dig_port->port;
da63a9f2 3670 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3671 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3672 uint32_t DP = intel_dp->DP;
a4fc5ed6 3673
bc76e320 3674 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3675 return;
3676
0c33d8d7 3677 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3678 return;
3679
28c97730 3680 DRM_DEBUG_KMS("\n");
32f9d658 3681
39e5fa88
VS
3682 if ((IS_GEN7(dev) && port == PORT_A) ||
3683 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3684 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3685 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3686 } else {
aad3d14d
VS
3687 if (IS_CHERRYVIEW(dev))
3688 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3689 else
3690 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3691 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3692 }
1612c8bd 3693 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3694 POSTING_READ(intel_dp->output_reg);
5eb08b69 3695
1612c8bd
VS
3696 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3697 I915_WRITE(intel_dp->output_reg, DP);
3698 POSTING_READ(intel_dp->output_reg);
3699
3700 /*
3701 * HW workaround for IBX, we need to move the port
3702 * to transcoder A after disabling it to allow the
3703 * matching HDMI port to be enabled on transcoder A.
3704 */
3705 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3706 /*
3707 * We get CPU/PCH FIFO underruns on the other pipe when
3708 * doing the workaround. Sweep them under the rug.
3709 */
3710 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3711 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3712
1612c8bd
VS
3713 /* always enable with pattern 1 (as per spec) */
3714 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3715 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3716 I915_WRITE(intel_dp->output_reg, DP);
3717 POSTING_READ(intel_dp->output_reg);
3718
3719 DP &= ~DP_PORT_EN;
5bddd17f 3720 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3721 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3722
3723 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3724 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3725 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3726 }
3727
f01eca2e 3728 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3729}
3730
26d61aad
KP
3731static bool
3732intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3733{
a031d709
RV
3734 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3735 struct drm_device *dev = dig_port->base.base.dev;
3736 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3737 uint8_t rev;
a031d709 3738
9d1a1031
JN
3739 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3740 sizeof(intel_dp->dpcd)) < 0)
edb39244 3741 return false; /* aux transfer failed */
92fd8fd1 3742
a8e98153 3743 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3744
edb39244
AJ
3745 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3746 return false; /* DPCD not present */
3747
2293bb5c
SK
3748 /* Check if the panel supports PSR */
3749 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3750 if (is_edp(intel_dp)) {
9d1a1031
JN
3751 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3752 intel_dp->psr_dpcd,
3753 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3754 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3755 dev_priv->psr.sink_support = true;
50003939 3756 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3757 }
474d1ec4
SJ
3758
3759 if (INTEL_INFO(dev)->gen >= 9 &&
3760 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3761 uint8_t frame_sync_cap;
3762
3763 dev_priv->psr.sink_support = true;
3764 intel_dp_dpcd_read_wake(&intel_dp->aux,
3765 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3766 &frame_sync_cap, 1);
3767 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3768 /* PSR2 needs frame sync as well */
3769 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3770 DRM_DEBUG_KMS("PSR2 %s on sink",
3771 dev_priv->psr.psr2_support ? "supported" : "not supported");
3772 }
50003939
JN
3773 }
3774
bc5133d5 3775 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3776 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3777 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3778
fc0f8e25
SJ
3779 /* Intermediate frequency support */
3780 if (is_edp(intel_dp) &&
3781 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3782 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3783 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3784 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3785 int i;
3786
fc0f8e25
SJ
3787 intel_dp_dpcd_read_wake(&intel_dp->aux,
3788 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3789 sink_rates,
3790 sizeof(sink_rates));
ea2d8a42 3791
94ca719e
VS
3792 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3793 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3794
3795 if (val == 0)
3796 break;
3797
af77b974
SJ
3798 /* Value read is in kHz while drm clock is saved in deca-kHz */
3799 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3800 }
94ca719e 3801 intel_dp->num_sink_rates = i;
fc0f8e25 3802 }
0336400e
VS
3803
3804 intel_dp_print_rates(intel_dp);
3805
edb39244
AJ
3806 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3807 DP_DWN_STRM_PORT_PRESENT))
3808 return true; /* native DP sink */
3809
3810 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3811 return true; /* no per-port downstream info */
3812
9d1a1031
JN
3813 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3814 intel_dp->downstream_ports,
3815 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3816 return false; /* downstream port status fetch failed */
3817
3818 return true;
92fd8fd1
KP
3819}
3820
0d198328
AJ
3821static void
3822intel_dp_probe_oui(struct intel_dp *intel_dp)
3823{
3824 u8 buf[3];
3825
3826 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3827 return;
3828
9d1a1031 3829 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3830 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3831 buf[0], buf[1], buf[2]);
3832
9d1a1031 3833 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3834 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3835 buf[0], buf[1], buf[2]);
3836}
3837
0e32b39c
DA
3838static bool
3839intel_dp_probe_mst(struct intel_dp *intel_dp)
3840{
3841 u8 buf[1];
3842
3843 if (!intel_dp->can_mst)
3844 return false;
3845
3846 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3847 return false;
3848
0e32b39c
DA
3849 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3850 if (buf[0] & DP_MST_CAP) {
3851 DRM_DEBUG_KMS("Sink is MST capable\n");
3852 intel_dp->is_mst = true;
3853 } else {
3854 DRM_DEBUG_KMS("Sink is not MST capable\n");
3855 intel_dp->is_mst = false;
3856 }
3857 }
0e32b39c
DA
3858
3859 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3860 return intel_dp->is_mst;
3861}
3862
e5a1cab5 3863static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3864{
082dcc7c
RV
3865 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3866 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3867 u8 buf;
e5a1cab5 3868 int ret = 0;
d2e216d0 3869
082dcc7c
RV
3870 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3871 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3872 ret = -EIO;
3873 goto out;
4373f0f2
PZ
3874 }
3875
082dcc7c 3876 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 3877 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 3878 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3879 ret = -EIO;
3880 goto out;
3881 }
d2e216d0 3882
621d4c76 3883 intel_dp->sink_crc.started = false;
e5a1cab5 3884 out:
082dcc7c 3885 hsw_enable_ips(intel_crtc);
e5a1cab5 3886 return ret;
082dcc7c
RV
3887}
3888
3889static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3890{
3891 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3892 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3893 u8 buf;
e5a1cab5
RV
3894 int ret;
3895
621d4c76 3896 if (intel_dp->sink_crc.started) {
e5a1cab5
RV
3897 ret = intel_dp_sink_crc_stop(intel_dp);
3898 if (ret)
3899 return ret;
3900 }
082dcc7c
RV
3901
3902 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3903 return -EIO;
3904
3905 if (!(buf & DP_TEST_CRC_SUPPORTED))
3906 return -ENOTTY;
3907
621d4c76
RV
3908 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3909
082dcc7c
RV
3910 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3911 return -EIO;
3912
3913 hsw_disable_ips(intel_crtc);
1dda5f93 3914
9d1a1031 3915 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
3916 buf | DP_TEST_SINK_START) < 0) {
3917 hsw_enable_ips(intel_crtc);
3918 return -EIO;
4373f0f2
PZ
3919 }
3920
621d4c76 3921 intel_dp->sink_crc.started = true;
082dcc7c
RV
3922 return 0;
3923}
3924
3925int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3926{
3927 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3928 struct drm_device *dev = dig_port->base.base.dev;
3929 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3930 u8 buf;
621d4c76 3931 int count, ret;
082dcc7c 3932 int attempts = 6;
aabc95dc 3933 bool old_equal_new;
082dcc7c
RV
3934
3935 ret = intel_dp_sink_crc_start(intel_dp);
3936 if (ret)
3937 return ret;
3938
ad9dc91b 3939 do {
621d4c76
RV
3940 intel_wait_for_vblank(dev, intel_crtc->pipe);
3941
1dda5f93 3942 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
3943 DP_TEST_SINK_MISC, &buf) < 0) {
3944 ret = -EIO;
afe0d67e 3945 goto stop;
4373f0f2 3946 }
621d4c76 3947 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 3948
621d4c76
RV
3949 /*
3950 * Count might be reset during the loop. In this case
3951 * last known count needs to be reset as well.
3952 */
3953 if (count == 0)
3954 intel_dp->sink_crc.last_count = 0;
3955
3956 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3957 ret = -EIO;
3958 goto stop;
3959 }
aabc95dc
RV
3960
3961 old_equal_new = (count == intel_dp->sink_crc.last_count &&
3962 !memcmp(intel_dp->sink_crc.last_crc, crc,
3963 6 * sizeof(u8)));
3964
3965 } while (--attempts && (count == 0 || old_equal_new));
621d4c76
RV
3966
3967 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3968 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
ad9dc91b
RV
3969
3970 if (attempts == 0) {
aabc95dc
RV
3971 if (old_equal_new) {
3972 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
3973 } else {
3974 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3975 ret = -ETIMEDOUT;
3976 goto stop;
3977 }
ad9dc91b 3978 }
d2e216d0 3979
afe0d67e 3980stop:
082dcc7c 3981 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 3982 return ret;
d2e216d0
RV
3983}
3984
a60f0e38
JB
3985static bool
3986intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3987{
9d1a1031
JN
3988 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3989 DP_DEVICE_SERVICE_IRQ_VECTOR,
3990 sink_irq_vector, 1) == 1;
a60f0e38
JB
3991}
3992
0e32b39c
DA
3993static bool
3994intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3995{
3996 int ret;
3997
3998 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3999 DP_SINK_COUNT_ESI,
4000 sink_irq_vector, 14);
4001 if (ret != 14)
4002 return false;
4003
4004 return true;
4005}
4006
c5d5ab7a
TP
4007static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4008{
4009 uint8_t test_result = DP_TEST_ACK;
4010 return test_result;
4011}
4012
4013static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4014{
4015 uint8_t test_result = DP_TEST_NAK;
4016 return test_result;
4017}
4018
4019static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4020{
c5d5ab7a 4021 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4022 struct intel_connector *intel_connector = intel_dp->attached_connector;
4023 struct drm_connector *connector = &intel_connector->base;
4024
4025 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4026 connector->edid_corrupt ||
559be30c
TP
4027 intel_dp->aux.i2c_defer_count > 6) {
4028 /* Check EDID read for NACKs, DEFERs and corruption
4029 * (DP CTS 1.2 Core r1.1)
4030 * 4.2.2.4 : Failed EDID read, I2C_NAK
4031 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4032 * 4.2.2.6 : EDID corruption detected
4033 * Use failsafe mode for all cases
4034 */
4035 if (intel_dp->aux.i2c_nack_count > 0 ||
4036 intel_dp->aux.i2c_defer_count > 0)
4037 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4038 intel_dp->aux.i2c_nack_count,
4039 intel_dp->aux.i2c_defer_count);
4040 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4041 } else {
f79b468e
TS
4042 struct edid *block = intel_connector->detect_edid;
4043
4044 /* We have to write the checksum
4045 * of the last block read
4046 */
4047 block += intel_connector->detect_edid->extensions;
4048
559be30c
TP
4049 if (!drm_dp_dpcd_write(&intel_dp->aux,
4050 DP_TEST_EDID_CHECKSUM,
f79b468e 4051 &block->checksum,
5a1cc655 4052 1))
559be30c
TP
4053 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4054
4055 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4056 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4057 }
4058
4059 /* Set test active flag here so userspace doesn't interrupt things */
4060 intel_dp->compliance_test_active = 1;
4061
c5d5ab7a
TP
4062 return test_result;
4063}
4064
4065static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4066{
c5d5ab7a
TP
4067 uint8_t test_result = DP_TEST_NAK;
4068 return test_result;
4069}
4070
4071static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4072{
4073 uint8_t response = DP_TEST_NAK;
4074 uint8_t rxdata = 0;
4075 int status = 0;
4076
559be30c 4077 intel_dp->compliance_test_active = 0;
c5d5ab7a 4078 intel_dp->compliance_test_type = 0;
559be30c
TP
4079 intel_dp->compliance_test_data = 0;
4080
c5d5ab7a
TP
4081 intel_dp->aux.i2c_nack_count = 0;
4082 intel_dp->aux.i2c_defer_count = 0;
4083
4084 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4085 if (status <= 0) {
4086 DRM_DEBUG_KMS("Could not read test request from sink\n");
4087 goto update_status;
4088 }
4089
4090 switch (rxdata) {
4091 case DP_TEST_LINK_TRAINING:
4092 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4093 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4094 response = intel_dp_autotest_link_training(intel_dp);
4095 break;
4096 case DP_TEST_LINK_VIDEO_PATTERN:
4097 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4098 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4099 response = intel_dp_autotest_video_pattern(intel_dp);
4100 break;
4101 case DP_TEST_LINK_EDID_READ:
4102 DRM_DEBUG_KMS("EDID test requested\n");
4103 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4104 response = intel_dp_autotest_edid(intel_dp);
4105 break;
4106 case DP_TEST_LINK_PHY_TEST_PATTERN:
4107 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4108 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4109 response = intel_dp_autotest_phy_pattern(intel_dp);
4110 break;
4111 default:
4112 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4113 break;
4114 }
4115
4116update_status:
4117 status = drm_dp_dpcd_write(&intel_dp->aux,
4118 DP_TEST_RESPONSE,
4119 &response, 1);
4120 if (status <= 0)
4121 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4122}
4123
0e32b39c
DA
4124static int
4125intel_dp_check_mst_status(struct intel_dp *intel_dp)
4126{
4127 bool bret;
4128
4129 if (intel_dp->is_mst) {
4130 u8 esi[16] = { 0 };
4131 int ret = 0;
4132 int retry;
4133 bool handled;
4134 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4135go_again:
4136 if (bret == true) {
4137
4138 /* check link status - esi[10] = 0x200c */
90a6b7b0 4139 if (intel_dp->active_mst_links &&
901c2daf 4140 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4141 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4142 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4143 intel_dp_stop_link_train(intel_dp);
4144 }
4145
6f34cc39 4146 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4147 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4148
4149 if (handled) {
4150 for (retry = 0; retry < 3; retry++) {
4151 int wret;
4152 wret = drm_dp_dpcd_write(&intel_dp->aux,
4153 DP_SINK_COUNT_ESI+1,
4154 &esi[1], 3);
4155 if (wret == 3) {
4156 break;
4157 }
4158 }
4159
4160 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4161 if (bret == true) {
6f34cc39 4162 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4163 goto go_again;
4164 }
4165 } else
4166 ret = 0;
4167
4168 return ret;
4169 } else {
4170 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4171 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4172 intel_dp->is_mst = false;
4173 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4174 /* send a hotplug event */
4175 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4176 }
4177 }
4178 return -EINVAL;
4179}
4180
a4fc5ed6
KP
4181/*
4182 * According to DP spec
4183 * 5.1.2:
4184 * 1. Read DPCD
4185 * 2. Configure link according to Receiver Capabilities
4186 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4187 * 4. Check link status on receipt of hot-plug interrupt
4188 */
a5146200 4189static void
ea5b213a 4190intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4191{
5b215bcf 4192 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4193 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4194 u8 sink_irq_vector;
93f62dad 4195 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4196
5b215bcf
DA
4197 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4198
e02f9a06 4199 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4200 return;
4201
1a125d8a
ID
4202 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4203 return;
4204
92fd8fd1 4205 /* Try to read receiver status if the link appears to be up */
93f62dad 4206 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4207 return;
4208 }
4209
92fd8fd1 4210 /* Now read the DPCD to see if it's actually running */
26d61aad 4211 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4212 return;
4213 }
4214
a60f0e38
JB
4215 /* Try to read the source of the interrupt */
4216 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4217 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4218 /* Clear interrupt source */
9d1a1031
JN
4219 drm_dp_dpcd_writeb(&intel_dp->aux,
4220 DP_DEVICE_SERVICE_IRQ_VECTOR,
4221 sink_irq_vector);
a60f0e38
JB
4222
4223 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4224 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4225 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4226 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4227 }
4228
901c2daf 4229 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4230 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4231 intel_encoder->base.name);
33a34e4e 4232 intel_dp_start_link_train(intel_dp);
3ab9c637 4233 intel_dp_stop_link_train(intel_dp);
33a34e4e 4234 }
a4fc5ed6 4235}
a4fc5ed6 4236
caf9ab24 4237/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4238static enum drm_connector_status
26d61aad 4239intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4240{
caf9ab24 4241 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4242 uint8_t type;
4243
4244 if (!intel_dp_get_dpcd(intel_dp))
4245 return connector_status_disconnected;
4246
4247 /* if there's no downstream port, we're done */
4248 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4249 return connector_status_connected;
caf9ab24
AJ
4250
4251 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4252 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4253 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4254 uint8_t reg;
9d1a1031
JN
4255
4256 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4257 &reg, 1) < 0)
caf9ab24 4258 return connector_status_unknown;
9d1a1031 4259
23235177
AJ
4260 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4261 : connector_status_disconnected;
caf9ab24
AJ
4262 }
4263
4264 /* If no HPD, poke DDC gently */
0b99836f 4265 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4266 return connector_status_connected;
caf9ab24
AJ
4267
4268 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4269 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4270 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4271 if (type == DP_DS_PORT_TYPE_VGA ||
4272 type == DP_DS_PORT_TYPE_NON_EDID)
4273 return connector_status_unknown;
4274 } else {
4275 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4276 DP_DWN_STRM_PORT_TYPE_MASK;
4277 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4278 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4279 return connector_status_unknown;
4280 }
caf9ab24
AJ
4281
4282 /* Anything else is out of spec, warn and ignore */
4283 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4284 return connector_status_disconnected;
71ba9000
AJ
4285}
4286
d410b56d
CW
4287static enum drm_connector_status
4288edp_detect(struct intel_dp *intel_dp)
4289{
4290 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4291 enum drm_connector_status status;
4292
4293 status = intel_panel_detect(dev);
4294 if (status == connector_status_unknown)
4295 status = connector_status_connected;
4296
4297 return status;
4298}
4299
b93433cc
JN
4300static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4301 struct intel_digital_port *port)
5eb08b69 4302{
b93433cc 4303 u32 bit;
01cb9ea6 4304
0df53b77
JN
4305 switch (port->port) {
4306 case PORT_A:
4307 return true;
4308 case PORT_B:
4309 bit = SDE_PORTB_HOTPLUG;
4310 break;
4311 case PORT_C:
4312 bit = SDE_PORTC_HOTPLUG;
4313 break;
4314 case PORT_D:
4315 bit = SDE_PORTD_HOTPLUG;
4316 break;
4317 default:
4318 MISSING_CASE(port->port);
4319 return false;
4320 }
4321
4322 return I915_READ(SDEISR) & bit;
4323}
4324
4325static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4326 struct intel_digital_port *port)
4327{
4328 u32 bit;
4329
4330 switch (port->port) {
4331 case PORT_A:
4332 return true;
4333 case PORT_B:
4334 bit = SDE_PORTB_HOTPLUG_CPT;
4335 break;
4336 case PORT_C:
4337 bit = SDE_PORTC_HOTPLUG_CPT;
4338 break;
4339 case PORT_D:
4340 bit = SDE_PORTD_HOTPLUG_CPT;
4341 break;
a78695d3
JN
4342 case PORT_E:
4343 bit = SDE_PORTE_HOTPLUG_SPT;
4344 break;
0df53b77
JN
4345 default:
4346 MISSING_CASE(port->port);
4347 return false;
b93433cc 4348 }
1b469639 4349
b93433cc 4350 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4351}
4352
7e66bcf2 4353static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4354 struct intel_digital_port *port)
a4fc5ed6 4355{
9642c81c 4356 u32 bit;
5eb08b69 4357
9642c81c
JN
4358 switch (port->port) {
4359 case PORT_B:
4360 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4361 break;
4362 case PORT_C:
4363 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4364 break;
4365 case PORT_D:
4366 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4367 break;
4368 default:
4369 MISSING_CASE(port->port);
4370 return false;
4371 }
4372
4373 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4374}
4375
4376static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4377 struct intel_digital_port *port)
4378{
4379 u32 bit;
4380
4381 switch (port->port) {
4382 case PORT_B:
4383 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4384 break;
4385 case PORT_C:
4386 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4387 break;
4388 case PORT_D:
4389 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4390 break;
4391 default:
4392 MISSING_CASE(port->port);
4393 return false;
a4fc5ed6
KP
4394 }
4395
1d245987 4396 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4397}
4398
e464bfde 4399static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4400 struct intel_digital_port *intel_dig_port)
e464bfde 4401{
e2ec35a5
SJ
4402 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4403 enum port port;
e464bfde
JN
4404 u32 bit;
4405
e2ec35a5
SJ
4406 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4407 switch (port) {
e464bfde
JN
4408 case PORT_A:
4409 bit = BXT_DE_PORT_HP_DDIA;
4410 break;
4411 case PORT_B:
4412 bit = BXT_DE_PORT_HP_DDIB;
4413 break;
4414 case PORT_C:
4415 bit = BXT_DE_PORT_HP_DDIC;
4416 break;
4417 default:
e2ec35a5 4418 MISSING_CASE(port);
e464bfde
JN
4419 return false;
4420 }
4421
4422 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4423}
4424
7e66bcf2
JN
4425/*
4426 * intel_digital_port_connected - is the specified port connected?
4427 * @dev_priv: i915 private structure
4428 * @port: the port to test
4429 *
4430 * Return %true if @port is connected, %false otherwise.
4431 */
237ed86c 4432bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4433 struct intel_digital_port *port)
4434{
0df53b77 4435 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4436 return ibx_digital_port_connected(dev_priv, port);
0df53b77
JN
4437 if (HAS_PCH_SPLIT(dev_priv))
4438 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4439 else if (IS_BROXTON(dev_priv))
4440 return bxt_digital_port_connected(dev_priv, port);
9642c81c
JN
4441 else if (IS_VALLEYVIEW(dev_priv))
4442 return vlv_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4443 else
4444 return g4x_digital_port_connected(dev_priv, port);
4445}
4446
b93433cc
JN
4447static enum drm_connector_status
4448ironlake_dp_detect(struct intel_dp *intel_dp)
4449{
4450 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4451 struct drm_i915_private *dev_priv = dev->dev_private;
4452 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4453
7e66bcf2 4454 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
b93433cc
JN
4455 return connector_status_disconnected;
4456
4457 return intel_dp_detect_dpcd(intel_dp);
4458}
4459
2a592bec
DA
4460static enum drm_connector_status
4461g4x_dp_detect(struct intel_dp *intel_dp)
4462{
4463 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4464 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2a592bec
DA
4465
4466 /* Can't disconnect eDP, but you can close the lid... */
4467 if (is_edp(intel_dp)) {
4468 enum drm_connector_status status;
4469
4470 status = intel_panel_detect(dev);
4471 if (status == connector_status_unknown)
4472 status = connector_status_connected;
4473 return status;
4474 }
4475
7e66bcf2 4476 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
a4fc5ed6
KP
4477 return connector_status_disconnected;
4478
26d61aad 4479 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4480}
4481
8c241fef 4482static struct edid *
beb60608 4483intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4484{
beb60608 4485 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4486
9cd300e0
JN
4487 /* use cached edid if we have one */
4488 if (intel_connector->edid) {
9cd300e0
JN
4489 /* invalid edid */
4490 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4491 return NULL;
4492
55e9edeb 4493 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4494 } else
4495 return drm_get_edid(&intel_connector->base,
4496 &intel_dp->aux.ddc);
4497}
8c241fef 4498
beb60608
CW
4499static void
4500intel_dp_set_edid(struct intel_dp *intel_dp)
4501{
4502 struct intel_connector *intel_connector = intel_dp->attached_connector;
4503 struct edid *edid;
8c241fef 4504
beb60608
CW
4505 edid = intel_dp_get_edid(intel_dp);
4506 intel_connector->detect_edid = edid;
4507
4508 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4509 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4510 else
4511 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4512}
4513
beb60608
CW
4514static void
4515intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4516{
beb60608 4517 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4518
beb60608
CW
4519 kfree(intel_connector->detect_edid);
4520 intel_connector->detect_edid = NULL;
9cd300e0 4521
beb60608
CW
4522 intel_dp->has_audio = false;
4523}
d6f24d0f 4524
beb60608
CW
4525static enum intel_display_power_domain
4526intel_dp_power_get(struct intel_dp *dp)
4527{
4528 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4529 enum intel_display_power_domain power_domain;
4530
4531 power_domain = intel_display_port_power_domain(encoder);
4532 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4533
4534 return power_domain;
4535}
d6f24d0f 4536
beb60608
CW
4537static void
4538intel_dp_power_put(struct intel_dp *dp,
4539 enum intel_display_power_domain power_domain)
4540{
4541 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4542 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4543}
4544
a9756bb5
ZW
4545static enum drm_connector_status
4546intel_dp_detect(struct drm_connector *connector, bool force)
4547{
4548 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4549 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4550 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4551 struct drm_device *dev = connector->dev;
a9756bb5 4552 enum drm_connector_status status;
671dedd2 4553 enum intel_display_power_domain power_domain;
0e32b39c 4554 bool ret;
09b1eb13 4555 u8 sink_irq_vector;
a9756bb5 4556
164c8598 4557 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4558 connector->base.id, connector->name);
beb60608 4559 intel_dp_unset_edid(intel_dp);
164c8598 4560
0e32b39c
DA
4561 if (intel_dp->is_mst) {
4562 /* MST devices are disconnected from a monitor POV */
4563 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4564 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4565 return connector_status_disconnected;
0e32b39c
DA
4566 }
4567
beb60608 4568 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4569
d410b56d
CW
4570 /* Can't disconnect eDP, but you can close the lid... */
4571 if (is_edp(intel_dp))
4572 status = edp_detect(intel_dp);
4573 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4574 status = ironlake_dp_detect(intel_dp);
4575 else
4576 status = g4x_dp_detect(intel_dp);
4577 if (status != connector_status_connected)
c8c8fb33 4578 goto out;
a9756bb5 4579
0d198328
AJ
4580 intel_dp_probe_oui(intel_dp);
4581
0e32b39c
DA
4582 ret = intel_dp_probe_mst(intel_dp);
4583 if (ret) {
4584 /* if we are in MST mode then this connector
4585 won't appear connected or have anything with EDID on it */
4586 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4587 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4588 status = connector_status_disconnected;
4589 goto out;
4590 }
4591
beb60608 4592 intel_dp_set_edid(intel_dp);
a9756bb5 4593
d63885da
PZ
4594 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4595 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4596 status = connector_status_connected;
4597
09b1eb13
TP
4598 /* Try to read the source of the interrupt */
4599 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4600 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4601 /* Clear interrupt source */
4602 drm_dp_dpcd_writeb(&intel_dp->aux,
4603 DP_DEVICE_SERVICE_IRQ_VECTOR,
4604 sink_irq_vector);
4605
4606 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4607 intel_dp_handle_test_request(intel_dp);
4608 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4609 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4610 }
4611
c8c8fb33 4612out:
beb60608 4613 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4614 return status;
a4fc5ed6
KP
4615}
4616
beb60608
CW
4617static void
4618intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4619{
df0e9248 4620 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4621 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4622 enum intel_display_power_domain power_domain;
a4fc5ed6 4623
beb60608
CW
4624 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4625 connector->base.id, connector->name);
4626 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4627
beb60608
CW
4628 if (connector->status != connector_status_connected)
4629 return;
671dedd2 4630
beb60608
CW
4631 power_domain = intel_dp_power_get(intel_dp);
4632
4633 intel_dp_set_edid(intel_dp);
4634
4635 intel_dp_power_put(intel_dp, power_domain);
4636
4637 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4638 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4639}
4640
4641static int intel_dp_get_modes(struct drm_connector *connector)
4642{
4643 struct intel_connector *intel_connector = to_intel_connector(connector);
4644 struct edid *edid;
4645
4646 edid = intel_connector->detect_edid;
4647 if (edid) {
4648 int ret = intel_connector_update_modes(connector, edid);
4649 if (ret)
4650 return ret;
4651 }
32f9d658 4652
f8779fda 4653 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4654 if (is_edp(intel_attached_dp(connector)) &&
4655 intel_connector->panel.fixed_mode) {
f8779fda 4656 struct drm_display_mode *mode;
beb60608
CW
4657
4658 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4659 intel_connector->panel.fixed_mode);
f8779fda 4660 if (mode) {
32f9d658
ZW
4661 drm_mode_probed_add(connector, mode);
4662 return 1;
4663 }
4664 }
beb60608 4665
32f9d658 4666 return 0;
a4fc5ed6
KP
4667}
4668
1aad7ac0
CW
4669static bool
4670intel_dp_detect_audio(struct drm_connector *connector)
4671{
1aad7ac0 4672 bool has_audio = false;
beb60608 4673 struct edid *edid;
1aad7ac0 4674
beb60608
CW
4675 edid = to_intel_connector(connector)->detect_edid;
4676 if (edid)
1aad7ac0 4677 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4678
1aad7ac0
CW
4679 return has_audio;
4680}
4681
f684960e
CW
4682static int
4683intel_dp_set_property(struct drm_connector *connector,
4684 struct drm_property *property,
4685 uint64_t val)
4686{
e953fd7b 4687 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4688 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4689 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4690 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4691 int ret;
4692
662595df 4693 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4694 if (ret)
4695 return ret;
4696
3f43c48d 4697 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4698 int i = val;
4699 bool has_audio;
4700
4701 if (i == intel_dp->force_audio)
f684960e
CW
4702 return 0;
4703
1aad7ac0 4704 intel_dp->force_audio = i;
f684960e 4705
c3e5f67b 4706 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4707 has_audio = intel_dp_detect_audio(connector);
4708 else
c3e5f67b 4709 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4710
4711 if (has_audio == intel_dp->has_audio)
f684960e
CW
4712 return 0;
4713
1aad7ac0 4714 intel_dp->has_audio = has_audio;
f684960e
CW
4715 goto done;
4716 }
4717
e953fd7b 4718 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4719 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4720 bool old_range = intel_dp->limited_color_range;
ae4edb80 4721
55bc60db
VS
4722 switch (val) {
4723 case INTEL_BROADCAST_RGB_AUTO:
4724 intel_dp->color_range_auto = true;
4725 break;
4726 case INTEL_BROADCAST_RGB_FULL:
4727 intel_dp->color_range_auto = false;
0f2a2a75 4728 intel_dp->limited_color_range = false;
55bc60db
VS
4729 break;
4730 case INTEL_BROADCAST_RGB_LIMITED:
4731 intel_dp->color_range_auto = false;
0f2a2a75 4732 intel_dp->limited_color_range = true;
55bc60db
VS
4733 break;
4734 default:
4735 return -EINVAL;
4736 }
ae4edb80
DV
4737
4738 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4739 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4740 return 0;
4741
e953fd7b
CW
4742 goto done;
4743 }
4744
53b41837
YN
4745 if (is_edp(intel_dp) &&
4746 property == connector->dev->mode_config.scaling_mode_property) {
4747 if (val == DRM_MODE_SCALE_NONE) {
4748 DRM_DEBUG_KMS("no scaling not supported\n");
4749 return -EINVAL;
4750 }
4751
4752 if (intel_connector->panel.fitting_mode == val) {
4753 /* the eDP scaling property is not changed */
4754 return 0;
4755 }
4756 intel_connector->panel.fitting_mode = val;
4757
4758 goto done;
4759 }
4760
f684960e
CW
4761 return -EINVAL;
4762
4763done:
c0c36b94
CW
4764 if (intel_encoder->base.crtc)
4765 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4766
4767 return 0;
4768}
4769
a4fc5ed6 4770static void
73845adf 4771intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4772{
1d508706 4773 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4774
10e972d3 4775 kfree(intel_connector->detect_edid);
beb60608 4776
9cd300e0
JN
4777 if (!IS_ERR_OR_NULL(intel_connector->edid))
4778 kfree(intel_connector->edid);
4779
acd8db10
PZ
4780 /* Can't call is_edp() since the encoder may have been destroyed
4781 * already. */
4782 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4783 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4784
a4fc5ed6 4785 drm_connector_cleanup(connector);
55f78c43 4786 kfree(connector);
a4fc5ed6
KP
4787}
4788
00c09d70 4789void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4790{
da63a9f2
PZ
4791 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4792 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4793
4f71d0cb 4794 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4795 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4796 if (is_edp(intel_dp)) {
4797 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4798 /*
4799 * vdd might still be enabled do to the delayed vdd off.
4800 * Make sure vdd is actually turned off here.
4801 */
773538e8 4802 pps_lock(intel_dp);
4be73780 4803 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4804 pps_unlock(intel_dp);
4805
01527b31
CT
4806 if (intel_dp->edp_notifier.notifier_call) {
4807 unregister_reboot_notifier(&intel_dp->edp_notifier);
4808 intel_dp->edp_notifier.notifier_call = NULL;
4809 }
bd943159 4810 }
c8bd0e49 4811 drm_encoder_cleanup(encoder);
da63a9f2 4812 kfree(intel_dig_port);
24d05927
DV
4813}
4814
07f9cd0b
ID
4815static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4816{
4817 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4818
4819 if (!is_edp(intel_dp))
4820 return;
4821
951468f3
VS
4822 /*
4823 * vdd might still be enabled do to the delayed vdd off.
4824 * Make sure vdd is actually turned off here.
4825 */
afa4e53a 4826 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4827 pps_lock(intel_dp);
07f9cd0b 4828 edp_panel_vdd_off_sync(intel_dp);
773538e8 4829 pps_unlock(intel_dp);
07f9cd0b
ID
4830}
4831
49e6bc51
VS
4832static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4833{
4834 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4835 struct drm_device *dev = intel_dig_port->base.base.dev;
4836 struct drm_i915_private *dev_priv = dev->dev_private;
4837 enum intel_display_power_domain power_domain;
4838
4839 lockdep_assert_held(&dev_priv->pps_mutex);
4840
4841 if (!edp_have_panel_vdd(intel_dp))
4842 return;
4843
4844 /*
4845 * The VDD bit needs a power domain reference, so if the bit is
4846 * already enabled when we boot or resume, grab this reference and
4847 * schedule a vdd off, so we don't hold on to the reference
4848 * indefinitely.
4849 */
4850 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4851 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4852 intel_display_power_get(dev_priv, power_domain);
4853
4854 edp_panel_vdd_schedule_off(intel_dp);
4855}
4856
6d93c0c4
ID
4857static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4858{
49e6bc51
VS
4859 struct intel_dp *intel_dp;
4860
4861 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4862 return;
4863
4864 intel_dp = enc_to_intel_dp(encoder);
4865
4866 pps_lock(intel_dp);
4867
4868 /*
4869 * Read out the current power sequencer assignment,
4870 * in case the BIOS did something with it.
4871 */
4872 if (IS_VALLEYVIEW(encoder->dev))
4873 vlv_initial_power_sequencer_setup(intel_dp);
4874
4875 intel_edp_panel_vdd_sanitize(intel_dp);
4876
4877 pps_unlock(intel_dp);
6d93c0c4
ID
4878}
4879
a4fc5ed6 4880static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4881 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4882 .detect = intel_dp_detect,
beb60608 4883 .force = intel_dp_force,
a4fc5ed6 4884 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4885 .set_property = intel_dp_set_property,
2545e4a6 4886 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4887 .destroy = intel_dp_connector_destroy,
c6f95f27 4888 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4889 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4890};
4891
4892static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4893 .get_modes = intel_dp_get_modes,
4894 .mode_valid = intel_dp_mode_valid,
df0e9248 4895 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4896};
4897
a4fc5ed6 4898static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4899 .reset = intel_dp_encoder_reset,
24d05927 4900 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4901};
4902
b2c5c181 4903enum irqreturn
13cf5504
DA
4904intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4905{
4906 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4907 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4908 struct drm_device *dev = intel_dig_port->base.base.dev;
4909 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4910 enum intel_display_power_domain power_domain;
b2c5c181 4911 enum irqreturn ret = IRQ_NONE;
1c767b33 4912
0e32b39c
DA
4913 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4914 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4915
7a7f84cc
VS
4916 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4917 /*
4918 * vdd off can generate a long pulse on eDP which
4919 * would require vdd on to handle it, and thus we
4920 * would end up in an endless cycle of
4921 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4922 */
4923 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4924 port_name(intel_dig_port->port));
a8b3d52f 4925 return IRQ_HANDLED;
7a7f84cc
VS
4926 }
4927
26fbb774
VS
4928 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4929 port_name(intel_dig_port->port),
0e32b39c 4930 long_hpd ? "long" : "short");
13cf5504 4931
1c767b33
ID
4932 power_domain = intel_display_port_power_domain(intel_encoder);
4933 intel_display_power_get(dev_priv, power_domain);
4934
0e32b39c 4935 if (long_hpd) {
5fa836a9
MK
4936 /* indicate that we need to restart link training */
4937 intel_dp->train_set_valid = false;
2a592bec 4938
7e66bcf2
JN
4939 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4940 goto mst_fail;
0e32b39c
DA
4941
4942 if (!intel_dp_get_dpcd(intel_dp)) {
4943 goto mst_fail;
4944 }
4945
4946 intel_dp_probe_oui(intel_dp);
4947
d14e7b6d
VS
4948 if (!intel_dp_probe_mst(intel_dp)) {
4949 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4950 intel_dp_check_link_status(intel_dp);
4951 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 4952 goto mst_fail;
d14e7b6d 4953 }
0e32b39c
DA
4954 } else {
4955 if (intel_dp->is_mst) {
1c767b33 4956 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4957 goto mst_fail;
4958 }
4959
4960 if (!intel_dp->is_mst) {
5b215bcf 4961 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4962 intel_dp_check_link_status(intel_dp);
5b215bcf 4963 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4964 }
4965 }
b2c5c181
DV
4966
4967 ret = IRQ_HANDLED;
4968
1c767b33 4969 goto put_power;
0e32b39c
DA
4970mst_fail:
4971 /* if we were in MST mode, and device is not there get out of MST mode */
4972 if (intel_dp->is_mst) {
4973 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4974 intel_dp->is_mst = false;
4975 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4976 }
1c767b33
ID
4977put_power:
4978 intel_display_power_put(dev_priv, power_domain);
4979
4980 return ret;
13cf5504
DA
4981}
4982
e3421a18
ZW
4983/* Return which DP Port should be selected for Transcoder DP control */
4984int
0206e353 4985intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4986{
4987 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4988 struct intel_encoder *intel_encoder;
4989 struct intel_dp *intel_dp;
e3421a18 4990
fa90ecef
PZ
4991 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4992 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4993
fa90ecef
PZ
4994 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4995 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4996 return intel_dp->output_reg;
e3421a18 4997 }
ea5b213a 4998
e3421a18
ZW
4999 return -1;
5000}
5001
477ec328 5002/* check the VBT to see whether the eDP is on another port */
5d8a7752 5003bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5004{
5005 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5006 union child_device_config *p_child;
36e83a18 5007 int i;
5d8a7752 5008 static const short port_mapping[] = {
477ec328
RV
5009 [PORT_B] = DVO_PORT_DPB,
5010 [PORT_C] = DVO_PORT_DPC,
5011 [PORT_D] = DVO_PORT_DPD,
5012 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5013 };
36e83a18 5014
53ce81a7
VS
5015 /*
5016 * eDP not supported on g4x. so bail out early just
5017 * for a bit extra safety in case the VBT is bonkers.
5018 */
5019 if (INTEL_INFO(dev)->gen < 5)
5020 return false;
5021
3b32a35b
VS
5022 if (port == PORT_A)
5023 return true;
5024
41aa3448 5025 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5026 return false;
5027
41aa3448
RV
5028 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5029 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5030
5d8a7752 5031 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5032 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5033 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5034 return true;
5035 }
5036 return false;
5037}
5038
0e32b39c 5039void
f684960e
CW
5040intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5041{
53b41837
YN
5042 struct intel_connector *intel_connector = to_intel_connector(connector);
5043
3f43c48d 5044 intel_attach_force_audio_property(connector);
e953fd7b 5045 intel_attach_broadcast_rgb_property(connector);
55bc60db 5046 intel_dp->color_range_auto = true;
53b41837
YN
5047
5048 if (is_edp(intel_dp)) {
5049 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5050 drm_object_attach_property(
5051 &connector->base,
53b41837 5052 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5053 DRM_MODE_SCALE_ASPECT);
5054 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5055 }
f684960e
CW
5056}
5057
dada1a9f
ID
5058static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5059{
5060 intel_dp->last_power_cycle = jiffies;
5061 intel_dp->last_power_on = jiffies;
5062 intel_dp->last_backlight_off = jiffies;
5063}
5064
67a54566
DV
5065static void
5066intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5067 struct intel_dp *intel_dp)
67a54566
DV
5068{
5069 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5070 struct edp_power_seq cur, vbt, spec,
5071 *final = &intel_dp->pps_delays;
b0a08bec
VK
5072 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5073 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5074
e39b999a
VS
5075 lockdep_assert_held(&dev_priv->pps_mutex);
5076
81ddbc69
VS
5077 /* already initialized? */
5078 if (final->t11_t12 != 0)
5079 return;
5080
b0a08bec
VK
5081 if (IS_BROXTON(dev)) {
5082 /*
5083 * TODO: BXT has 2 sets of PPS registers.
5084 * Correct Register for Broxton need to be identified
5085 * using VBT. hardcoding for now
5086 */
5087 pp_ctrl_reg = BXT_PP_CONTROL(0);
5088 pp_on_reg = BXT_PP_ON_DELAYS(0);
5089 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5090 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5091 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5092 pp_on_reg = PCH_PP_ON_DELAYS;
5093 pp_off_reg = PCH_PP_OFF_DELAYS;
5094 pp_div_reg = PCH_PP_DIVISOR;
5095 } else {
bf13e81b
JN
5096 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5097
5098 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5099 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5100 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5101 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5102 }
67a54566
DV
5103
5104 /* Workaround: Need to write PP_CONTROL with the unlock key as
5105 * the very first thing. */
b0a08bec 5106 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5107
453c5420
JB
5108 pp_on = I915_READ(pp_on_reg);
5109 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5110 if (!IS_BROXTON(dev)) {
5111 I915_WRITE(pp_ctrl_reg, pp_ctl);
5112 pp_div = I915_READ(pp_div_reg);
5113 }
67a54566
DV
5114
5115 /* Pull timing values out of registers */
5116 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5117 PANEL_POWER_UP_DELAY_SHIFT;
5118
5119 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5120 PANEL_LIGHT_ON_DELAY_SHIFT;
5121
5122 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5123 PANEL_LIGHT_OFF_DELAY_SHIFT;
5124
5125 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5126 PANEL_POWER_DOWN_DELAY_SHIFT;
5127
b0a08bec
VK
5128 if (IS_BROXTON(dev)) {
5129 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5130 BXT_POWER_CYCLE_DELAY_SHIFT;
5131 if (tmp > 0)
5132 cur.t11_t12 = (tmp - 1) * 1000;
5133 else
5134 cur.t11_t12 = 0;
5135 } else {
5136 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5137 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5138 }
67a54566
DV
5139
5140 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5141 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5142
41aa3448 5143 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5144
5145 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5146 * our hw here, which are all in 100usec. */
5147 spec.t1_t3 = 210 * 10;
5148 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5149 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5150 spec.t10 = 500 * 10;
5151 /* This one is special and actually in units of 100ms, but zero
5152 * based in the hw (so we need to add 100 ms). But the sw vbt
5153 * table multiplies it with 1000 to make it in units of 100usec,
5154 * too. */
5155 spec.t11_t12 = (510 + 100) * 10;
5156
5157 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5158 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5159
5160 /* Use the max of the register settings and vbt. If both are
5161 * unset, fall back to the spec limits. */
36b5f425 5162#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5163 spec.field : \
5164 max(cur.field, vbt.field))
5165 assign_final(t1_t3);
5166 assign_final(t8);
5167 assign_final(t9);
5168 assign_final(t10);
5169 assign_final(t11_t12);
5170#undef assign_final
5171
36b5f425 5172#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5173 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5174 intel_dp->backlight_on_delay = get_delay(t8);
5175 intel_dp->backlight_off_delay = get_delay(t9);
5176 intel_dp->panel_power_down_delay = get_delay(t10);
5177 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5178#undef get_delay
5179
f30d26e4
JN
5180 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5181 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5182 intel_dp->panel_power_cycle_delay);
5183
5184 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5185 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5186}
5187
5188static void
5189intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5190 struct intel_dp *intel_dp)
f30d26e4
JN
5191{
5192 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5193 u32 pp_on, pp_off, pp_div, port_sel = 0;
5194 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5195 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5196 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5197 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5198
e39b999a 5199 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5200
b0a08bec
VK
5201 if (IS_BROXTON(dev)) {
5202 /*
5203 * TODO: BXT has 2 sets of PPS registers.
5204 * Correct Register for Broxton need to be identified
5205 * using VBT. hardcoding for now
5206 */
5207 pp_ctrl_reg = BXT_PP_CONTROL(0);
5208 pp_on_reg = BXT_PP_ON_DELAYS(0);
5209 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5210
5211 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5212 pp_on_reg = PCH_PP_ON_DELAYS;
5213 pp_off_reg = PCH_PP_OFF_DELAYS;
5214 pp_div_reg = PCH_PP_DIVISOR;
5215 } else {
bf13e81b
JN
5216 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5217
5218 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5219 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5220 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5221 }
5222
b2f19d1a
PZ
5223 /*
5224 * And finally store the new values in the power sequencer. The
5225 * backlight delays are set to 1 because we do manual waits on them. For
5226 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5227 * we'll end up waiting for the backlight off delay twice: once when we
5228 * do the manual sleep, and once when we disable the panel and wait for
5229 * the PP_STATUS bit to become zero.
5230 */
f30d26e4 5231 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5232 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5233 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5234 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5235 /* Compute the divisor for the pp clock, simply match the Bspec
5236 * formula. */
b0a08bec
VK
5237 if (IS_BROXTON(dev)) {
5238 pp_div = I915_READ(pp_ctrl_reg);
5239 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5240 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5241 << BXT_POWER_CYCLE_DELAY_SHIFT);
5242 } else {
5243 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5244 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5245 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5246 }
67a54566
DV
5247
5248 /* Haswell doesn't have any port selection bits for the panel
5249 * power sequencer any more. */
bc7d38a4 5250 if (IS_VALLEYVIEW(dev)) {
ad933b56 5251 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5252 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5253 if (port == PORT_A)
a24c144c 5254 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5255 else
a24c144c 5256 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5257 }
5258
453c5420
JB
5259 pp_on |= port_sel;
5260
5261 I915_WRITE(pp_on_reg, pp_on);
5262 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5263 if (IS_BROXTON(dev))
5264 I915_WRITE(pp_ctrl_reg, pp_div);
5265 else
5266 I915_WRITE(pp_div_reg, pp_div);
67a54566 5267
67a54566 5268 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5269 I915_READ(pp_on_reg),
5270 I915_READ(pp_off_reg),
b0a08bec
VK
5271 IS_BROXTON(dev) ?
5272 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5273 I915_READ(pp_div_reg));
f684960e
CW
5274}
5275
b33a2815
VK
5276/**
5277 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5278 * @dev: DRM device
5279 * @refresh_rate: RR to be programmed
5280 *
5281 * This function gets called when refresh rate (RR) has to be changed from
5282 * one frequency to another. Switches can be between high and low RR
5283 * supported by the panel or to any other RR based on media playback (in
5284 * this case, RR value needs to be passed from user space).
5285 *
5286 * The caller of this function needs to take a lock on dev_priv->drrs.
5287 */
96178eeb 5288static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5289{
5290 struct drm_i915_private *dev_priv = dev->dev_private;
5291 struct intel_encoder *encoder;
96178eeb
VK
5292 struct intel_digital_port *dig_port = NULL;
5293 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5294 struct intel_crtc_state *config = NULL;
439d7ac0 5295 struct intel_crtc *intel_crtc = NULL;
96178eeb 5296 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5297
5298 if (refresh_rate <= 0) {
5299 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5300 return;
5301 }
5302
96178eeb
VK
5303 if (intel_dp == NULL) {
5304 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5305 return;
5306 }
5307
1fcc9d1c 5308 /*
e4d59f6b
RV
5309 * FIXME: This needs proper synchronization with psr state for some
5310 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5311 */
439d7ac0 5312
96178eeb
VK
5313 dig_port = dp_to_dig_port(intel_dp);
5314 encoder = &dig_port->base;
723f9aab 5315 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5316
5317 if (!intel_crtc) {
5318 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5319 return;
5320 }
5321
6e3c9717 5322 config = intel_crtc->config;
439d7ac0 5323
96178eeb 5324 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5325 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5326 return;
5327 }
5328
96178eeb
VK
5329 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5330 refresh_rate)
439d7ac0
PB
5331 index = DRRS_LOW_RR;
5332
96178eeb 5333 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5334 DRM_DEBUG_KMS(
5335 "DRRS requested for previously set RR...ignoring\n");
5336 return;
5337 }
5338
5339 if (!intel_crtc->active) {
5340 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5341 return;
5342 }
5343
44395bfe 5344 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5345 switch (index) {
5346 case DRRS_HIGH_RR:
5347 intel_dp_set_m_n(intel_crtc, M1_N1);
5348 break;
5349 case DRRS_LOW_RR:
5350 intel_dp_set_m_n(intel_crtc, M2_N2);
5351 break;
5352 case DRRS_MAX_RR:
5353 default:
5354 DRM_ERROR("Unsupported refreshrate type\n");
5355 }
5356 } else if (INTEL_INFO(dev)->gen > 6) {
649636ef
VS
5357 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5358 u32 val;
a4c30b1d 5359
649636ef 5360 val = I915_READ(reg);
439d7ac0 5361 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5362 if (IS_VALLEYVIEW(dev))
5363 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5364 else
5365 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5366 } else {
6fa7aec1
VK
5367 if (IS_VALLEYVIEW(dev))
5368 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5369 else
5370 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5371 }
5372 I915_WRITE(reg, val);
5373 }
5374
4e9ac947
VK
5375 dev_priv->drrs.refresh_rate_type = index;
5376
5377 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5378}
5379
b33a2815
VK
5380/**
5381 * intel_edp_drrs_enable - init drrs struct if supported
5382 * @intel_dp: DP struct
5383 *
5384 * Initializes frontbuffer_bits and drrs.dp
5385 */
c395578e
VK
5386void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5387{
5388 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5389 struct drm_i915_private *dev_priv = dev->dev_private;
5390 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5391 struct drm_crtc *crtc = dig_port->base.base.crtc;
5392 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5393
5394 if (!intel_crtc->config->has_drrs) {
5395 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5396 return;
5397 }
5398
5399 mutex_lock(&dev_priv->drrs.mutex);
5400 if (WARN_ON(dev_priv->drrs.dp)) {
5401 DRM_ERROR("DRRS already enabled\n");
5402 goto unlock;
5403 }
5404
5405 dev_priv->drrs.busy_frontbuffer_bits = 0;
5406
5407 dev_priv->drrs.dp = intel_dp;
5408
5409unlock:
5410 mutex_unlock(&dev_priv->drrs.mutex);
5411}
5412
b33a2815
VK
5413/**
5414 * intel_edp_drrs_disable - Disable DRRS
5415 * @intel_dp: DP struct
5416 *
5417 */
c395578e
VK
5418void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5419{
5420 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5421 struct drm_i915_private *dev_priv = dev->dev_private;
5422 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5423 struct drm_crtc *crtc = dig_port->base.base.crtc;
5424 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5425
5426 if (!intel_crtc->config->has_drrs)
5427 return;
5428
5429 mutex_lock(&dev_priv->drrs.mutex);
5430 if (!dev_priv->drrs.dp) {
5431 mutex_unlock(&dev_priv->drrs.mutex);
5432 return;
5433 }
5434
5435 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5436 intel_dp_set_drrs_state(dev_priv->dev,
5437 intel_dp->attached_connector->panel.
5438 fixed_mode->vrefresh);
5439
5440 dev_priv->drrs.dp = NULL;
5441 mutex_unlock(&dev_priv->drrs.mutex);
5442
5443 cancel_delayed_work_sync(&dev_priv->drrs.work);
5444}
5445
4e9ac947
VK
5446static void intel_edp_drrs_downclock_work(struct work_struct *work)
5447{
5448 struct drm_i915_private *dev_priv =
5449 container_of(work, typeof(*dev_priv), drrs.work.work);
5450 struct intel_dp *intel_dp;
5451
5452 mutex_lock(&dev_priv->drrs.mutex);
5453
5454 intel_dp = dev_priv->drrs.dp;
5455
5456 if (!intel_dp)
5457 goto unlock;
5458
439d7ac0 5459 /*
4e9ac947
VK
5460 * The delayed work can race with an invalidate hence we need to
5461 * recheck.
439d7ac0
PB
5462 */
5463
4e9ac947
VK
5464 if (dev_priv->drrs.busy_frontbuffer_bits)
5465 goto unlock;
439d7ac0 5466
4e9ac947
VK
5467 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5468 intel_dp_set_drrs_state(dev_priv->dev,
5469 intel_dp->attached_connector->panel.
5470 downclock_mode->vrefresh);
439d7ac0 5471
4e9ac947 5472unlock:
4e9ac947 5473 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5474}
5475
b33a2815 5476/**
0ddfd203 5477 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5478 * @dev: DRM device
5479 * @frontbuffer_bits: frontbuffer plane tracking bits
5480 *
0ddfd203
R
5481 * This function gets called everytime rendering on the given planes start.
5482 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5483 *
5484 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5485 */
a93fad0f
VK
5486void intel_edp_drrs_invalidate(struct drm_device *dev,
5487 unsigned frontbuffer_bits)
5488{
5489 struct drm_i915_private *dev_priv = dev->dev_private;
5490 struct drm_crtc *crtc;
5491 enum pipe pipe;
5492
9da7d693 5493 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5494 return;
5495
88f933a8 5496 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5497
a93fad0f 5498 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5499 if (!dev_priv->drrs.dp) {
5500 mutex_unlock(&dev_priv->drrs.mutex);
5501 return;
5502 }
5503
a93fad0f
VK
5504 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5505 pipe = to_intel_crtc(crtc)->pipe;
5506
c1d038c6
DV
5507 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5508 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5509
0ddfd203 5510 /* invalidate means busy screen hence upclock */
c1d038c6 5511 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5512 intel_dp_set_drrs_state(dev_priv->dev,
5513 dev_priv->drrs.dp->attached_connector->panel.
5514 fixed_mode->vrefresh);
a93fad0f 5515
a93fad0f
VK
5516 mutex_unlock(&dev_priv->drrs.mutex);
5517}
5518
b33a2815 5519/**
0ddfd203 5520 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5521 * @dev: DRM device
5522 * @frontbuffer_bits: frontbuffer plane tracking bits
5523 *
0ddfd203
R
5524 * This function gets called every time rendering on the given planes has
5525 * completed or flip on a crtc is completed. So DRRS should be upclocked
5526 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5527 * if no other planes are dirty.
b33a2815
VK
5528 *
5529 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5530 */
a93fad0f
VK
5531void intel_edp_drrs_flush(struct drm_device *dev,
5532 unsigned frontbuffer_bits)
5533{
5534 struct drm_i915_private *dev_priv = dev->dev_private;
5535 struct drm_crtc *crtc;
5536 enum pipe pipe;
5537
9da7d693 5538 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5539 return;
5540
88f933a8 5541 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5542
a93fad0f 5543 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5544 if (!dev_priv->drrs.dp) {
5545 mutex_unlock(&dev_priv->drrs.mutex);
5546 return;
5547 }
5548
a93fad0f
VK
5549 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5550 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5551
5552 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5553 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5554
0ddfd203 5555 /* flush means busy screen hence upclock */
c1d038c6 5556 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5557 intel_dp_set_drrs_state(dev_priv->dev,
5558 dev_priv->drrs.dp->attached_connector->panel.
5559 fixed_mode->vrefresh);
5560
5561 /*
5562 * flush also means no more activity hence schedule downclock, if all
5563 * other fbs are quiescent too
5564 */
5565 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5566 schedule_delayed_work(&dev_priv->drrs.work,
5567 msecs_to_jiffies(1000));
5568 mutex_unlock(&dev_priv->drrs.mutex);
5569}
5570
b33a2815
VK
5571/**
5572 * DOC: Display Refresh Rate Switching (DRRS)
5573 *
5574 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5575 * which enables swtching between low and high refresh rates,
5576 * dynamically, based on the usage scenario. This feature is applicable
5577 * for internal panels.
5578 *
5579 * Indication that the panel supports DRRS is given by the panel EDID, which
5580 * would list multiple refresh rates for one resolution.
5581 *
5582 * DRRS is of 2 types - static and seamless.
5583 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5584 * (may appear as a blink on screen) and is used in dock-undock scenario.
5585 * Seamless DRRS involves changing RR without any visual effect to the user
5586 * and can be used during normal system usage. This is done by programming
5587 * certain registers.
5588 *
5589 * Support for static/seamless DRRS may be indicated in the VBT based on
5590 * inputs from the panel spec.
5591 *
5592 * DRRS saves power by switching to low RR based on usage scenarios.
5593 *
5594 * eDP DRRS:-
5595 * The implementation is based on frontbuffer tracking implementation.
5596 * When there is a disturbance on the screen triggered by user activity or a
5597 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5598 * When there is no movement on screen, after a timeout of 1 second, a switch
5599 * to low RR is made.
5600 * For integration with frontbuffer tracking code,
5601 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5602 *
5603 * DRRS can be further extended to support other internal panels and also
5604 * the scenario of video playback wherein RR is set based on the rate
5605 * requested by userspace.
5606 */
5607
5608/**
5609 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5610 * @intel_connector: eDP connector
5611 * @fixed_mode: preferred mode of panel
5612 *
5613 * This function is called only once at driver load to initialize basic
5614 * DRRS stuff.
5615 *
5616 * Returns:
5617 * Downclock mode if panel supports it, else return NULL.
5618 * DRRS support is determined by the presence of downclock mode (apart
5619 * from VBT setting).
5620 */
4f9db5b5 5621static struct drm_display_mode *
96178eeb
VK
5622intel_dp_drrs_init(struct intel_connector *intel_connector,
5623 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5624{
5625 struct drm_connector *connector = &intel_connector->base;
96178eeb 5626 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5627 struct drm_i915_private *dev_priv = dev->dev_private;
5628 struct drm_display_mode *downclock_mode = NULL;
5629
9da7d693
DV
5630 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5631 mutex_init(&dev_priv->drrs.mutex);
5632
4f9db5b5
PB
5633 if (INTEL_INFO(dev)->gen <= 6) {
5634 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5635 return NULL;
5636 }
5637
5638 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5639 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5640 return NULL;
5641 }
5642
5643 downclock_mode = intel_find_panel_downclock
5644 (dev, fixed_mode, connector);
5645
5646 if (!downclock_mode) {
a1d26342 5647 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5648 return NULL;
5649 }
5650
96178eeb 5651 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5652
96178eeb 5653 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5654 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5655 return downclock_mode;
5656}
5657
ed92f0b2 5658static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5659 struct intel_connector *intel_connector)
ed92f0b2
PZ
5660{
5661 struct drm_connector *connector = &intel_connector->base;
5662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5663 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5664 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5665 struct drm_i915_private *dev_priv = dev->dev_private;
5666 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5667 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5668 bool has_dpcd;
5669 struct drm_display_mode *scan;
5670 struct edid *edid;
6517d273 5671 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5672
5673 if (!is_edp(intel_dp))
5674 return true;
5675
49e6bc51
VS
5676 pps_lock(intel_dp);
5677 intel_edp_panel_vdd_sanitize(intel_dp);
5678 pps_unlock(intel_dp);
63635217 5679
ed92f0b2 5680 /* Cache DPCD and EDID for edp. */
ed92f0b2 5681 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5682
5683 if (has_dpcd) {
5684 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5685 dev_priv->no_aux_handshake =
5686 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5687 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5688 } else {
5689 /* if this fails, presume the device is a ghost */
5690 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5691 return false;
5692 }
5693
5694 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5695 pps_lock(intel_dp);
36b5f425 5696 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5697 pps_unlock(intel_dp);
ed92f0b2 5698
060c8778 5699 mutex_lock(&dev->mode_config.mutex);
0b99836f 5700 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5701 if (edid) {
5702 if (drm_add_edid_modes(connector, edid)) {
5703 drm_mode_connector_update_edid_property(connector,
5704 edid);
5705 drm_edid_to_eld(connector, edid);
5706 } else {
5707 kfree(edid);
5708 edid = ERR_PTR(-EINVAL);
5709 }
5710 } else {
5711 edid = ERR_PTR(-ENOENT);
5712 }
5713 intel_connector->edid = edid;
5714
5715 /* prefer fixed mode from EDID if available */
5716 list_for_each_entry(scan, &connector->probed_modes, head) {
5717 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5718 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5719 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5720 intel_connector, fixed_mode);
ed92f0b2
PZ
5721 break;
5722 }
5723 }
5724
5725 /* fallback to VBT if available for eDP */
5726 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5727 fixed_mode = drm_mode_duplicate(dev,
5728 dev_priv->vbt.lfp_lvds_vbt_mode);
5729 if (fixed_mode)
5730 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5731 }
060c8778 5732 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5733
01527b31
CT
5734 if (IS_VALLEYVIEW(dev)) {
5735 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5736 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5737
5738 /*
5739 * Figure out the current pipe for the initial backlight setup.
5740 * If the current pipe isn't valid, try the PPS pipe, and if that
5741 * fails just assume pipe A.
5742 */
5743 if (IS_CHERRYVIEW(dev))
5744 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5745 else
5746 pipe = PORT_TO_PIPE(intel_dp->DP);
5747
5748 if (pipe != PIPE_A && pipe != PIPE_B)
5749 pipe = intel_dp->pps_pipe;
5750
5751 if (pipe != PIPE_A && pipe != PIPE_B)
5752 pipe = PIPE_A;
5753
5754 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5755 pipe_name(pipe));
01527b31
CT
5756 }
5757
4f9db5b5 5758 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5759 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5760 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5761
5762 return true;
5763}
5764
16c25533 5765bool
f0fec3f2
PZ
5766intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5767 struct intel_connector *intel_connector)
a4fc5ed6 5768{
f0fec3f2
PZ
5769 struct drm_connector *connector = &intel_connector->base;
5770 struct intel_dp *intel_dp = &intel_dig_port->dp;
5771 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5772 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5773 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5774 enum port port = intel_dig_port->port;
0b99836f 5775 int type;
a4fc5ed6 5776
a4a5d2f8
VS
5777 intel_dp->pps_pipe = INVALID_PIPE;
5778
ec5b01dd 5779 /* intel_dp vfuncs */
b6b5e383
DL
5780 if (INTEL_INFO(dev)->gen >= 9)
5781 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5782 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5783 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5784 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5785 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5786 else if (HAS_PCH_SPLIT(dev))
5787 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5788 else
5789 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5790
b9ca5fad
DL
5791 if (INTEL_INFO(dev)->gen >= 9)
5792 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5793 else
5794 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5795
ad64217b
ACO
5796 if (HAS_DDI(dev))
5797 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5798
0767935e
DV
5799 /* Preserve the current hw state. */
5800 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5801 intel_dp->attached_connector = intel_connector;
3d3dc149 5802
3b32a35b 5803 if (intel_dp_is_edp(dev, port))
b329530c 5804 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5805 else
5806 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5807
f7d24902
ID
5808 /*
5809 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5810 * for DP the encoder type can be set by the caller to
5811 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5812 */
5813 if (type == DRM_MODE_CONNECTOR_eDP)
5814 intel_encoder->type = INTEL_OUTPUT_EDP;
5815
c17ed5b5
VS
5816 /* eDP only on port B and/or C on vlv/chv */
5817 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5818 port != PORT_B && port != PORT_C))
5819 return false;
5820
e7281eab
ID
5821 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5822 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5823 port_name(port));
5824
b329530c 5825 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5826 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5827
a4fc5ed6
KP
5828 connector->interlace_allowed = true;
5829 connector->doublescan_allowed = 0;
5830
f0fec3f2 5831 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5832 edp_panel_vdd_work);
a4fc5ed6 5833
df0e9248 5834 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5835 drm_connector_register(connector);
a4fc5ed6 5836
affa9354 5837 if (HAS_DDI(dev))
bcbc889b
PZ
5838 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5839 else
5840 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5841 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5842
0b99836f 5843 /* Set up the hotplug pin. */
ab9d7c30
PZ
5844 switch (port) {
5845 case PORT_A:
1d843f9d 5846 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5847 break;
5848 case PORT_B:
1d843f9d 5849 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5850 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5851 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5852 break;
5853 case PORT_C:
1d843f9d 5854 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5855 break;
5856 case PORT_D:
1d843f9d 5857 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5858 break;
26951caf
XZ
5859 case PORT_E:
5860 intel_encoder->hpd_pin = HPD_PORT_E;
5861 break;
ab9d7c30 5862 default:
ad1c0b19 5863 BUG();
5eb08b69
ZW
5864 }
5865
dada1a9f 5866 if (is_edp(intel_dp)) {
773538e8 5867 pps_lock(intel_dp);
1e74a324
VS
5868 intel_dp_init_panel_power_timestamps(intel_dp);
5869 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5870 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5871 else
36b5f425 5872 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5873 pps_unlock(intel_dp);
dada1a9f 5874 }
0095e6dc 5875
9d1a1031 5876 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5877
0e32b39c 5878 /* init MST on ports that can support it */
0c9b3715
JN
5879 if (HAS_DP_MST(dev) &&
5880 (port == PORT_B || port == PORT_C || port == PORT_D))
5881 intel_dp_mst_encoder_init(intel_dig_port,
5882 intel_connector->base.base.id);
0e32b39c 5883
36b5f425 5884 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5885 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5886 if (is_edp(intel_dp)) {
5887 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5888 /*
5889 * vdd might still be enabled do to the delayed vdd off.
5890 * Make sure vdd is actually turned off here.
5891 */
773538e8 5892 pps_lock(intel_dp);
4be73780 5893 edp_panel_vdd_off_sync(intel_dp);
773538e8 5894 pps_unlock(intel_dp);
15b1d171 5895 }
34ea3d38 5896 drm_connector_unregister(connector);
b2f246a8 5897 drm_connector_cleanup(connector);
16c25533 5898 return false;
b2f246a8 5899 }
32f9d658 5900
f684960e
CW
5901 intel_dp_add_properties(intel_dp, connector);
5902
a4fc5ed6
KP
5903 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5904 * 0xd. Failure to do so will result in spurious interrupts being
5905 * generated on the port when a cable is not attached.
5906 */
5907 if (IS_G4X(dev) && !IS_GM45(dev)) {
5908 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5909 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5910 }
16c25533 5911
aa7471d2
JN
5912 i915_debugfs_connector_add(connector);
5913
16c25533 5914 return true;
a4fc5ed6 5915}
f0fec3f2
PZ
5916
5917void
5918intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5919{
13cf5504 5920 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5921 struct intel_digital_port *intel_dig_port;
5922 struct intel_encoder *intel_encoder;
5923 struct drm_encoder *encoder;
5924 struct intel_connector *intel_connector;
5925
b14c5679 5926 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5927 if (!intel_dig_port)
5928 return;
5929
08d9bc92 5930 intel_connector = intel_connector_alloc();
11aee0f6
SM
5931 if (!intel_connector)
5932 goto err_connector_alloc;
f0fec3f2
PZ
5933
5934 intel_encoder = &intel_dig_port->base;
5935 encoder = &intel_encoder->base;
5936
5937 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5938 DRM_MODE_ENCODER_TMDS);
5939
5bfe2ac0 5940 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5941 intel_encoder->disable = intel_disable_dp;
00c09d70 5942 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5943 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5944 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5945 if (IS_CHERRYVIEW(dev)) {
9197c88b 5946 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5947 intel_encoder->pre_enable = chv_pre_enable_dp;
5948 intel_encoder->enable = vlv_enable_dp;
580d3811 5949 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 5950 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 5951 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5952 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5953 intel_encoder->pre_enable = vlv_pre_enable_dp;
5954 intel_encoder->enable = vlv_enable_dp;
49277c31 5955 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5956 } else {
ecff4f3b
JN
5957 intel_encoder->pre_enable = g4x_pre_enable_dp;
5958 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5959 if (INTEL_INFO(dev)->gen >= 5)
5960 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5961 }
f0fec3f2 5962
174edf1f 5963 intel_dig_port->port = port;
f0fec3f2
PZ
5964 intel_dig_port->dp.output_reg = output_reg;
5965
00c09d70 5966 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5967 if (IS_CHERRYVIEW(dev)) {
5968 if (port == PORT_D)
5969 intel_encoder->crtc_mask = 1 << 2;
5970 else
5971 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5972 } else {
5973 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5974 }
bc079e8b 5975 intel_encoder->cloneable = 0;
f0fec3f2 5976
13cf5504 5977 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 5978 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 5979
11aee0f6
SM
5980 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5981 goto err_init_connector;
5982
5983 return;
5984
5985err_init_connector:
5986 drm_encoder_cleanup(encoder);
5987 kfree(intel_connector);
5988err_connector_alloc:
5989 kfree(intel_dig_port);
5990
5991 return;
f0fec3f2 5992}
0e32b39c
DA
5993
5994void intel_dp_mst_suspend(struct drm_device *dev)
5995{
5996 struct drm_i915_private *dev_priv = dev->dev_private;
5997 int i;
5998
5999 /* disable MST */
6000 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6001 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6002 if (!intel_dig_port)
6003 continue;
6004
6005 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6006 if (!intel_dig_port->dp.can_mst)
6007 continue;
6008 if (intel_dig_port->dp.is_mst)
6009 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6010 }
6011 }
6012}
6013
6014void intel_dp_mst_resume(struct drm_device *dev)
6015{
6016 struct drm_i915_private *dev_priv = dev->dev_private;
6017 int i;
6018
6019 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6020 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6021 if (!intel_dig_port)
6022 continue;
6023 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6024 int ret;
6025
6026 if (!intel_dig_port->dp.can_mst)
6027 continue;
6028
6029 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6030 if (ret != 0) {
6031 intel_dp_check_mst_status(&intel_dig_port->dp);
6032 }
6033 }
6034 }
6035}