]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Dump ddi_pll_sel in hex instead of decimal on HSW/BDW
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
eeb6324d
PZ
160 u8 source_max, sink_max;
161
ccb1a831 162 source_max = intel_dig_port->max_lanes;
eeb6324d
PZ
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
cd9dde44
AJ
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
a4fc5ed6 185static int
c898261c 186intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 187{
cd9dde44 188 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
189}
190
fe27d53e
DA
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
c19de8eb 197static enum drm_mode_status
a4fc5ed6
KP
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
df0e9248 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
799487f5 206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
a4fc5ed6 207
dd06f90e
JN
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
210 return MODE_PANEL;
211
dd06f90e 212 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 213 return MODE_PANEL;
03afc4a2
DV
214
215 target_clock = fixed_mode->clock;
7de56f43
ZY
216 }
217
50fec21a 218 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 219 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
799487f5 224 if (mode_rate > max_rate || target_clock > max_dotclk)
c4867936 225 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
0af78a2b
DV
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
a4fc5ed6
KP
233 return MODE_OK;
234}
235
a4f1289e 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
c2af70e2 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
bf13e81b
JN
257static void
258intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 259 struct intel_dp *intel_dp);
bf13e81b
JN
260static void
261intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 262 struct intel_dp *intel_dp);
bf13e81b 263
773538e8
VS
264static void pps_lock(struct intel_dp *intel_dp)
265{
266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
267 struct intel_encoder *encoder = &intel_dig_port->base;
268 struct drm_device *dev = encoder->base.dev;
269 struct drm_i915_private *dev_priv = dev->dev_private;
270 enum intel_display_power_domain power_domain;
271
272 /*
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
275 */
25f78f58 276 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
277 intel_display_power_get(dev_priv, power_domain);
278
279 mutex_lock(&dev_priv->pps_mutex);
280}
281
282static void pps_unlock(struct intel_dp *intel_dp)
283{
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct intel_encoder *encoder = &intel_dig_port->base;
286 struct drm_device *dev = encoder->base.dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 enum intel_display_power_domain power_domain;
289
290 mutex_unlock(&dev_priv->pps_mutex);
291
25f78f58 292 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
293 intel_display_power_put(dev_priv, power_domain);
294}
295
961a0db0
VS
296static void
297vlv_power_sequencer_kick(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
303 bool pll_enabled, release_cl_override = false;
304 enum dpio_phy phy = DPIO_PHY(pipe);
305 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
306 uint32_t DP;
307
308 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe), port_name(intel_dig_port->port)))
311 return;
312
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe), port_name(intel_dig_port->port));
315
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
318 */
319 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
320 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
321 DP |= DP_PORT_WIDTH(1);
322 DP |= DP_LINK_TRAIN_PAT_1;
323
324 if (IS_CHERRYVIEW(dev))
325 DP |= DP_PIPE_SELECT_CHV(pipe);
326 else if (pipe == PIPE_B)
327 DP |= DP_PIPEB_SELECT;
328
d288f65f
VS
329 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
330
331 /*
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
334 */
0047eedc
VS
335 if (!pll_enabled) {
336 release_cl_override = IS_CHERRYVIEW(dev) &&
337 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
338
3f36b937
TU
339 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
340 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
342 pipe_name(pipe));
343 return;
344 }
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
19c8054c 392 for_each_intel_encoder(dev, encoder) {
a4a5d2f8
VS
393 struct intel_dp *tmp;
394
395 if (encoder->type != INTEL_OUTPUT_EDP)
396 continue;
397
398 tmp = enc_to_intel_dp(&encoder->base);
399
400 if (tmp->pps_pipe != INVALID_PIPE)
401 pipes &= ~(1 << tmp->pps_pipe);
402 }
403
404 /*
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
407 */
408 if (WARN_ON(pipes == 0))
a8c3344e
VS
409 pipe = PIPE_A;
410 else
411 pipe = ffs(pipes) - 1;
a4a5d2f8 412
a8c3344e
VS
413 vlv_steal_power_sequencer(dev, pipe);
414 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
415
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp->pps_pipe),
418 port_name(intel_dig_port->port));
419
420 /* init power sequencer on this pipe and port */
36b5f425
VS
421 intel_dp_init_panel_power_sequencer(dev, intel_dp);
422 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 423
961a0db0
VS
424 /*
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
427 */
428 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
429
430 return intel_dp->pps_pipe;
431}
432
6491ab27
VS
433typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
434 enum pipe pipe);
435
436static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
437 enum pipe pipe)
438{
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
440}
441
442static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
443 enum pipe pipe)
444{
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
446}
447
448static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return true;
452}
bf13e81b 453
a4a5d2f8 454static enum pipe
6491ab27
VS
455vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
456 enum port port,
457 vlv_pipe_check pipe_check)
a4a5d2f8
VS
458{
459 enum pipe pipe;
bf13e81b 460
bf13e81b
JN
461 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
462 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
463 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
464
465 if (port_sel != PANEL_PORT_SELECT_VLV(port))
466 continue;
467
6491ab27
VS
468 if (!pipe_check(dev_priv, pipe))
469 continue;
470
a4a5d2f8 471 return pipe;
bf13e81b
JN
472 }
473
a4a5d2f8
VS
474 return INVALID_PIPE;
475}
476
477static void
478vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
479{
480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
481 struct drm_device *dev = intel_dig_port->base.base.dev;
482 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
483 enum port port = intel_dig_port->port;
484
485 lockdep_assert_held(&dev_priv->pps_mutex);
486
487 /* try to find a pipe with this port selected */
6491ab27
VS
488 /* first pick one where the panel is on */
489 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490 vlv_pipe_has_pp_on);
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp->pps_pipe == INVALID_PIPE)
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 vlv_pipe_has_vdd_on);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498 vlv_pipe_any);
a4a5d2f8
VS
499
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp->pps_pipe == INVALID_PIPE) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
503 port_name(port));
504 return;
bf13e81b
JN
505 }
506
a4a5d2f8
VS
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port), pipe_name(intel_dp->pps_pipe));
509
36b5f425
VS
510 intel_dp_init_panel_power_sequencer(dev, intel_dp);
511 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
512}
513
773538e8
VS
514void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
515{
516 struct drm_device *dev = dev_priv->dev;
517 struct intel_encoder *encoder;
518
666a4537 519 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
773538e8
VS
520 return;
521
522 /*
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
530 */
531
19c8054c 532 for_each_intel_encoder(dev, encoder) {
773538e8
VS
533 struct intel_dp *intel_dp;
534
535 if (encoder->type != INTEL_OUTPUT_EDP)
536 continue;
537
538 intel_dp = enc_to_intel_dp(&encoder->base);
539 intel_dp->pps_pipe = INVALID_PIPE;
540 }
bf13e81b
JN
541}
542
f0f59a00
VS
543static i915_reg_t
544_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b
JN
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
b0a08bec
VK
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
f0f59a00
VS
556static i915_reg_t
557_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b
JN
558{
559 struct drm_device *dev = intel_dp_to_dev(intel_dp);
560
b0a08bec
VK
561 if (IS_BROXTON(dev))
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
564 return PCH_PP_STATUS;
565 else
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
567}
568
01527b31
CT
569/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572 void *unused)
573{
574 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 edp_notifier);
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
578
579 if (!is_edp(intel_dp) || code != SYS_RESTART)
580 return 0;
581
773538e8 582 pps_lock(intel_dp);
e39b999a 583
666a4537 584 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e39b999a 585 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
f0f59a00 586 i915_reg_t pp_ctrl_reg, pp_div_reg;
649636ef 587 u32 pp_div;
e39b999a 588
01527b31
CT
589 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
591 pp_div = I915_READ(pp_div_reg);
592 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg, pp_div | 0x1F);
596 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597 msleep(intel_dp->panel_power_cycle_delay);
598 }
599
773538e8 600 pps_unlock(intel_dp);
e39b999a 601
01527b31
CT
602 return 0;
603}
604
4be73780 605static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 606{
30add22d 607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
608 struct drm_i915_private *dev_priv = dev->dev_private;
609
e39b999a
VS
610 lockdep_assert_held(&dev_priv->pps_mutex);
611
666a4537 612 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
613 intel_dp->pps_pipe == INVALID_PIPE)
614 return false;
615
bf13e81b 616 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
617}
618
4be73780 619static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 620{
30add22d 621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
622 struct drm_i915_private *dev_priv = dev->dev_private;
623
e39b999a
VS
624 lockdep_assert_held(&dev_priv->pps_mutex);
625
666a4537 626 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
627 intel_dp->pps_pipe == INVALID_PIPE)
628 return false;
629
773538e8 630 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
631}
632
9b984dae
KP
633static void
634intel_dp_check_edp(struct intel_dp *intel_dp)
635{
30add22d 636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 637 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 638
9b984dae
KP
639 if (!is_edp(intel_dp))
640 return;
453c5420 641
4be73780 642 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
645 I915_READ(_pp_stat_reg(intel_dp)),
646 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
647 }
648}
649
9ee32fea
DV
650static uint32_t
651intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652{
653 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654 struct drm_device *dev = intel_dig_port->base.base.dev;
655 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 656 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
657 uint32_t status;
658 bool done;
659
ef04f00d 660#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 661 if (has_aux_irq)
b18ac466 662 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 663 msecs_to_jiffies_timeout(10));
9ee32fea
DV
664 else
665 done = wait_for_atomic(C, 10) == 0;
666 if (!done)
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
668 has_aux_irq);
669#undef C
670
671 return status;
672}
673
ec5b01dd 674static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 675{
174edf1f
PZ
676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
677 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 678
ec5b01dd
DL
679 /*
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 682 */
fce18c4c 683 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
ec5b01dd
DL
684}
685
686static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687{
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 690 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
691
692 if (index)
693 return 0;
694
695 if (intel_dig_port->port == PORT_A) {
fce18c4c 696 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
05024da3 697
ec5b01dd 698 } else {
fce18c4c 699 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
ec5b01dd
DL
700 }
701}
702
703static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704{
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private;
708
709 if (intel_dig_port->port == PORT_A) {
710 if (index)
711 return 0;
05024da3 712 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
56f5f700 713 } else if (HAS_PCH_LPT_H(dev_priv)) {
2c55c336 714 /* Workaround for non-ULT HSW */
bc86625a
CW
715 switch (index) {
716 case 0: return 63;
717 case 1: return 72;
718 default: return 0;
719 }
ec5b01dd 720 } else {
fce18c4c 721 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
2c55c336 722 }
b84a1cf8
RV
723}
724
ec5b01dd
DL
725static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726{
727 return index ? 0 : 100;
728}
729
b6b5e383
DL
730static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
731{
732 /*
733 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 * derive the clock from CDCLK automatically). We still implement the
735 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 */
737 return index ? 0 : 1;
738}
739
5ed12a19
DL
740static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
741 bool has_aux_irq,
742 int send_bytes,
743 uint32_t aux_clock_divider)
744{
745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
746 struct drm_device *dev = intel_dig_port->base.base.dev;
747 uint32_t precharge, timeout;
748
749 if (IS_GEN6(dev))
750 precharge = 3;
751 else
752 precharge = 5;
753
f3c6a3a7 754 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5ed12a19
DL
755 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 else
757 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758
759 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 760 DP_AUX_CH_CTL_DONE |
5ed12a19 761 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 762 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 763 timeout |
788d4433 764 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
765 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
766 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 767 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
768}
769
b9ca5fad
DL
770static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
771 bool has_aux_irq,
772 int send_bytes,
773 uint32_t unused)
774{
775 return DP_AUX_CH_CTL_SEND_BUSY |
776 DP_AUX_CH_CTL_DONE |
777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 DP_AUX_CH_CTL_TIME_OUT_1600us |
780 DP_AUX_CH_CTL_RECEIVE_ERROR |
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
783}
784
b84a1cf8
RV
785static int
786intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 787 const uint8_t *send, int send_bytes,
b84a1cf8
RV
788 uint8_t *recv, int recv_size)
789{
790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791 struct drm_device *dev = intel_dig_port->base.base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 793 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
bc86625a 794 uint32_t aux_clock_divider;
b84a1cf8
RV
795 int i, ret, recv_bytes;
796 uint32_t status;
5ed12a19 797 int try, clock = 0;
4e6b788c 798 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
799 bool vdd;
800
773538e8 801 pps_lock(intel_dp);
e39b999a 802
72c3500a
VS
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
1e0560e0 809 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
5eb08b69 818
11bee43e
JB
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
ef04f00d 821 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
822 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823 break;
824 msleep(1);
825 }
826
827 if (try == 3) {
02196c77
MK
828 static u32 last_status = -1;
829 const u32 status = I915_READ(ch_ctl);
830
831 if (status != last_status) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 status);
834 last_status = status;
835 }
836
9ee32fea
DV
837 ret = -EBUSY;
838 goto out;
4f7f7b7e
CW
839 }
840
46a5ae9f
PZ
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843 ret = -E2BIG;
844 goto out;
845 }
846
ec5b01dd 847 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
848 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849 has_aux_irq,
850 send_bytes,
851 aux_clock_divider);
5ed12a19 852
bc86625a
CW
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i = 0; i < send_bytes; i += 4)
330e20ec 857 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
a4f1289e
RV
858 intel_dp_pack_aux(send + i,
859 send_bytes - i));
bc86625a
CW
860
861 /* Send the command and wait for it to complete */
5ed12a19 862 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
863
864 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865
866 /* Clear done status and any errors */
867 I915_WRITE(ch_ctl,
868 status |
869 DP_AUX_CH_CTL_DONE |
870 DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 DP_AUX_CH_CTL_RECEIVE_ERROR);
872
74ebf294 873 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 874 continue;
74ebf294
TP
875
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
880 */
881 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 usleep_range(400, 500);
bc86625a 883 continue;
74ebf294 884 }
bc86625a 885 if (status & DP_AUX_CH_CTL_DONE)
e058c945 886 goto done;
bc86625a 887 }
a4fc5ed6
KP
888 }
889
a4fc5ed6 890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
892 ret = -EBUSY;
893 goto out;
a4fc5ed6
KP
894 }
895
e058c945 896done:
a4fc5ed6
KP
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
899 */
a5b3da54 900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
902 ret = -EIO;
903 goto out;
a5b3da54 904 }
1ae8c0a5
KP
905
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
a5b3da54 908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
910 ret = -ETIMEDOUT;
911 goto out;
a4fc5ed6
KP
912 }
913
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
917
918 /*
919 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920 * We have no idea of what happened so we return -EBUSY so
921 * drm layer takes care for the necessary retries.
922 */
923 if (recv_bytes == 0 || recv_bytes > 20) {
924 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
925 recv_bytes);
926 /*
927 * FIXME: This patch was created on top of a series that
928 * organize the retries at drm level. There EBUSY should
929 * also take care for 1ms wait before retrying.
930 * That aux retries re-org is still needed and after that is
931 * merged we remove this sleep from here.
932 */
933 usleep_range(1000, 1500);
934 ret = -EBUSY;
935 goto out;
936 }
937
a4fc5ed6
KP
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
0206e353 940
4f7f7b7e 941 for (i = 0; i < recv_bytes; i += 4)
330e20ec 942 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
a4f1289e 943 recv + i, recv_bytes - i);
a4fc5ed6 944
9ee32fea
DV
945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
948
884f19e9
JN
949 if (vdd)
950 edp_panel_vdd_off(intel_dp, false);
951
773538e8 952 pps_unlock(intel_dp);
e39b999a 953
9ee32fea 954 return ret;
a4fc5ed6
KP
955}
956
a6c8aff0
JN
957#define BARE_ADDRESS_SIZE 3
958#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
959static ssize_t
960intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 961{
9d1a1031
JN
962 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
963 uint8_t txbuf[20], rxbuf[20];
964 size_t txsize, rxsize;
a4fc5ed6 965 int ret;
a4fc5ed6 966
d2d9cbbd
VS
967 txbuf[0] = (msg->request << 4) |
968 ((msg->address >> 16) & 0xf);
969 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
970 txbuf[2] = msg->address & 0xff;
971 txbuf[3] = msg->size - 1;
46a5ae9f 972
9d1a1031
JN
973 switch (msg->request & ~DP_AUX_I2C_MOT) {
974 case DP_AUX_NATIVE_WRITE:
975 case DP_AUX_I2C_WRITE:
c1e74122 976 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 978 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 979
9d1a1031
JN
980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
a4fc5ed6 982
d81a67cc
ID
983 if (msg->buffer)
984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
985 else
986 WARN_ON(msg->size);
a4fc5ed6 987
9d1a1031
JN
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 991
a1ddefd8
JN
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
9d1a1031
JN
999 }
1000 break;
46a5ae9f 1001
9d1a1031
JN
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
a6c8aff0 1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1005 rxsize = msg->size + 1;
a4fc5ed6 1006
9d1a1031
JN
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
a4fc5ed6 1009
9d1a1031
JN
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1021 }
9d1a1031
JN
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
a4fc5ed6 1027 }
f51a44b9 1028
9d1a1031 1029 return ret;
a4fc5ed6
KP
1030}
1031
f0f59a00
VS
1032static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1033 enum port port)
da00bdcf
VS
1034{
1035 switch (port) {
1036 case PORT_B:
1037 case PORT_C:
1038 case PORT_D:
1039 return DP_AUX_CH_CTL(port);
1040 default:
1041 MISSING_CASE(port);
1042 return DP_AUX_CH_CTL(PORT_B);
1043 }
1044}
1045
f0f59a00
VS
1046static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1047 enum port port, int index)
330e20ec
VS
1048{
1049 switch (port) {
1050 case PORT_B:
1051 case PORT_C:
1052 case PORT_D:
1053 return DP_AUX_CH_DATA(port, index);
1054 default:
1055 MISSING_CASE(port);
1056 return DP_AUX_CH_DATA(PORT_B, index);
1057 }
1058}
1059
f0f59a00
VS
1060static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1061 enum port port)
da00bdcf
VS
1062{
1063 switch (port) {
1064 case PORT_A:
1065 return DP_AUX_CH_CTL(port);
1066 case PORT_B:
1067 case PORT_C:
1068 case PORT_D:
1069 return PCH_DP_AUX_CH_CTL(port);
1070 default:
1071 MISSING_CASE(port);
1072 return DP_AUX_CH_CTL(PORT_A);
1073 }
1074}
1075
f0f59a00
VS
1076static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1077 enum port port, int index)
330e20ec
VS
1078{
1079 switch (port) {
1080 case PORT_A:
1081 return DP_AUX_CH_DATA(port, index);
1082 case PORT_B:
1083 case PORT_C:
1084 case PORT_D:
1085 return PCH_DP_AUX_CH_DATA(port, index);
1086 default:
1087 MISSING_CASE(port);
1088 return DP_AUX_CH_DATA(PORT_A, index);
1089 }
1090}
1091
da00bdcf
VS
1092/*
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1095 */
1096static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1097{
1098 const struct ddi_vbt_port_info *info =
1099 &dev_priv->vbt.ddi_port_info[PORT_E];
1100
1101 switch (info->alternate_aux_channel) {
1102 case DP_AUX_A:
1103 return PORT_A;
1104 case DP_AUX_B:
1105 return PORT_B;
1106 case DP_AUX_C:
1107 return PORT_C;
1108 case DP_AUX_D:
1109 return PORT_D;
1110 default:
1111 MISSING_CASE(info->alternate_aux_channel);
1112 return PORT_A;
1113 }
1114}
1115
f0f59a00
VS
1116static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1117 enum port port)
da00bdcf
VS
1118{
1119 if (port == PORT_E)
1120 port = skl_porte_aux_port(dev_priv);
1121
1122 switch (port) {
1123 case PORT_A:
1124 case PORT_B:
1125 case PORT_C:
1126 case PORT_D:
1127 return DP_AUX_CH_CTL(port);
1128 default:
1129 MISSING_CASE(port);
1130 return DP_AUX_CH_CTL(PORT_A);
1131 }
1132}
1133
f0f59a00
VS
1134static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1135 enum port port, int index)
330e20ec
VS
1136{
1137 if (port == PORT_E)
1138 port = skl_porte_aux_port(dev_priv);
1139
1140 switch (port) {
1141 case PORT_A:
1142 case PORT_B:
1143 case PORT_C:
1144 case PORT_D:
1145 return DP_AUX_CH_DATA(port, index);
1146 default:
1147 MISSING_CASE(port);
1148 return DP_AUX_CH_DATA(PORT_A, index);
1149 }
1150}
1151
f0f59a00
VS
1152static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1153 enum port port)
330e20ec
VS
1154{
1155 if (INTEL_INFO(dev_priv)->gen >= 9)
1156 return skl_aux_ctl_reg(dev_priv, port);
1157 else if (HAS_PCH_SPLIT(dev_priv))
1158 return ilk_aux_ctl_reg(dev_priv, port);
1159 else
1160 return g4x_aux_ctl_reg(dev_priv, port);
1161}
1162
f0f59a00
VS
1163static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1164 enum port port, int index)
330e20ec
VS
1165{
1166 if (INTEL_INFO(dev_priv)->gen >= 9)
1167 return skl_aux_data_reg(dev_priv, port, index);
1168 else if (HAS_PCH_SPLIT(dev_priv))
1169 return ilk_aux_data_reg(dev_priv, port, index);
1170 else
1171 return g4x_aux_data_reg(dev_priv, port, index);
1172}
1173
1174static void intel_aux_reg_init(struct intel_dp *intel_dp)
1175{
1176 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1177 enum port port = dp_to_dig_port(intel_dp)->port;
1178 int i;
1179
1180 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1181 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1182 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1183}
1184
9d1a1031 1185static void
a121f4e5
VS
1186intel_dp_aux_fini(struct intel_dp *intel_dp)
1187{
1188 drm_dp_aux_unregister(&intel_dp->aux);
1189 kfree(intel_dp->aux.name);
1190}
1191
1192static int
9d1a1031
JN
1193intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1194{
1195 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1196 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1197 enum port port = intel_dig_port->port;
ab2c0672
DA
1198 int ret;
1199
330e20ec 1200 intel_aux_reg_init(intel_dp);
8316f337 1201
a121f4e5
VS
1202 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1203 if (!intel_dp->aux.name)
1204 return -ENOMEM;
1205
9d1a1031
JN
1206 intel_dp->aux.dev = dev->dev;
1207 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1208
a121f4e5
VS
1209 DRM_DEBUG_KMS("registering %s bus for %s\n",
1210 intel_dp->aux.name,
0b99836f 1211 connector->base.kdev->kobj.name);
8316f337 1212
4f71d0cb 1213 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1214 if (ret < 0) {
4f71d0cb 1215 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
a121f4e5
VS
1216 intel_dp->aux.name, ret);
1217 kfree(intel_dp->aux.name);
1218 return ret;
ab2c0672 1219 }
8a5e6aeb 1220
0b99836f
JN
1221 ret = sysfs_create_link(&connector->base.kdev->kobj,
1222 &intel_dp->aux.ddc.dev.kobj,
1223 intel_dp->aux.ddc.dev.kobj.name);
1224 if (ret < 0) {
a121f4e5
VS
1225 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1226 intel_dp->aux.name, ret);
1227 intel_dp_aux_fini(intel_dp);
1228 return ret;
ab2c0672 1229 }
a121f4e5
VS
1230
1231 return 0;
a4fc5ed6
KP
1232}
1233
80f65de3
ID
1234static void
1235intel_dp_connector_unregister(struct intel_connector *intel_connector)
1236{
1237 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1238
0e32b39c
DA
1239 if (!intel_connector->mst_port)
1240 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1241 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1242 intel_connector_unregister(intel_connector);
1243}
1244
5416d871 1245static void
840b32b7 1246skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1247{
1248 u32 ctrl1;
1249
dd3cd74a
ACO
1250 memset(&pipe_config->dpll_hw_state, 0,
1251 sizeof(pipe_config->dpll_hw_state));
1252
5416d871
DL
1253 pipe_config->ddi_pll_sel = SKL_DPLL0;
1254 pipe_config->dpll_hw_state.cfgcr1 = 0;
1255 pipe_config->dpll_hw_state.cfgcr2 = 0;
1256
1257 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1258 switch (pipe_config->port_clock / 2) {
c3346ef6 1259 case 81000:
71cd8423 1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1261 SKL_DPLL0);
1262 break;
c3346ef6 1263 case 135000:
71cd8423 1264 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1265 SKL_DPLL0);
1266 break;
c3346ef6 1267 case 270000:
71cd8423 1268 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1269 SKL_DPLL0);
1270 break;
c3346ef6 1271 case 162000:
71cd8423 1272 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1273 SKL_DPLL0);
1274 break;
1275 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1276 results in CDCLK change. Need to handle the change of CDCLK by
1277 disabling pipes and re-enabling them */
1278 case 108000:
71cd8423 1279 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1280 SKL_DPLL0);
1281 break;
1282 case 216000:
71cd8423 1283 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1284 SKL_DPLL0);
1285 break;
1286
5416d871
DL
1287 }
1288 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1289}
1290
6fa2d197 1291void
840b32b7 1292hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1293{
ee46f3c7
ACO
1294 memset(&pipe_config->dpll_hw_state, 0,
1295 sizeof(pipe_config->dpll_hw_state));
1296
840b32b7
VS
1297 switch (pipe_config->port_clock / 2) {
1298 case 81000:
0e50338c
DV
1299 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1300 break;
840b32b7 1301 case 135000:
0e50338c
DV
1302 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1303 break;
840b32b7 1304 case 270000:
0e50338c
DV
1305 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1306 break;
1307 }
1308}
1309
fc0f8e25 1310static int
12f6a2e2 1311intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1312{
94ca719e
VS
1313 if (intel_dp->num_sink_rates) {
1314 *sink_rates = intel_dp->sink_rates;
1315 return intel_dp->num_sink_rates;
fc0f8e25 1316 }
12f6a2e2
VS
1317
1318 *sink_rates = default_rates;
1319
1320 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1321}
1322
e588fa18 1323bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1324{
e588fa18
ACO
1325 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1326 struct drm_device *dev = dig_port->base.base.dev;
1327
ed63baaf 1328 /* WaDisableHBR2:skl */
e87a005d 1329 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1330 return false;
1331
1332 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1333 (INTEL_INFO(dev)->gen >= 9))
1334 return true;
1335 else
1336 return false;
1337}
1338
a8f3ef61 1339static int
e588fa18 1340intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1341{
e588fa18
ACO
1342 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1343 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1344 int size;
1345
64987fc5
SJ
1346 if (IS_BROXTON(dev)) {
1347 *source_rates = bxt_rates;
af7080f5 1348 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1349 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1350 *source_rates = skl_rates;
af7080f5
TS
1351 size = ARRAY_SIZE(skl_rates);
1352 } else {
1353 *source_rates = default_rates;
1354 size = ARRAY_SIZE(default_rates);
a8f3ef61 1355 }
636280ba 1356
ed63baaf 1357 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1358 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1359 size--;
636280ba 1360
af7080f5 1361 return size;
a8f3ef61
SJ
1362}
1363
c6bb3538
DV
1364static void
1365intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1366 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1367{
1368 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1369 const struct dp_link_dpll *divisor = NULL;
1370 int i, count = 0;
c6bb3538
DV
1371
1372 if (IS_G4X(dev)) {
9dd4ffdf
CML
1373 divisor = gen4_dpll;
1374 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1375 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1376 divisor = pch_dpll;
1377 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1378 } else if (IS_CHERRYVIEW(dev)) {
1379 divisor = chv_dpll;
1380 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1381 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1382 divisor = vlv_dpll;
1383 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1384 }
9dd4ffdf
CML
1385
1386 if (divisor && count) {
1387 for (i = 0; i < count; i++) {
840b32b7 1388 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1389 pipe_config->dpll = divisor[i].dpll;
1390 pipe_config->clock_set = true;
1391 break;
1392 }
1393 }
c6bb3538
DV
1394 }
1395}
1396
2ecae76a
VS
1397static int intersect_rates(const int *source_rates, int source_len,
1398 const int *sink_rates, int sink_len,
94ca719e 1399 int *common_rates)
a8f3ef61
SJ
1400{
1401 int i = 0, j = 0, k = 0;
1402
a8f3ef61
SJ
1403 while (i < source_len && j < sink_len) {
1404 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1405 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1406 return k;
94ca719e 1407 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1408 ++k;
1409 ++i;
1410 ++j;
1411 } else if (source_rates[i] < sink_rates[j]) {
1412 ++i;
1413 } else {
1414 ++j;
1415 }
1416 }
1417 return k;
1418}
1419
94ca719e
VS
1420static int intel_dp_common_rates(struct intel_dp *intel_dp,
1421 int *common_rates)
2ecae76a 1422{
2ecae76a
VS
1423 const int *source_rates, *sink_rates;
1424 int source_len, sink_len;
1425
1426 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1427 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1428
1429 return intersect_rates(source_rates, source_len,
1430 sink_rates, sink_len,
94ca719e 1431 common_rates);
2ecae76a
VS
1432}
1433
0336400e
VS
1434static void snprintf_int_array(char *str, size_t len,
1435 const int *array, int nelem)
1436{
1437 int i;
1438
1439 str[0] = '\0';
1440
1441 for (i = 0; i < nelem; i++) {
b2f505be 1442 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1443 if (r >= len)
1444 return;
1445 str += r;
1446 len -= r;
1447 }
1448}
1449
1450static void intel_dp_print_rates(struct intel_dp *intel_dp)
1451{
0336400e 1452 const int *source_rates, *sink_rates;
94ca719e
VS
1453 int source_len, sink_len, common_len;
1454 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1455 char str[128]; /* FIXME: too big for stack? */
1456
1457 if ((drm_debug & DRM_UT_KMS) == 0)
1458 return;
1459
e588fa18 1460 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1461 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1462 DRM_DEBUG_KMS("source rates: %s\n", str);
1463
1464 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1465 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1466 DRM_DEBUG_KMS("sink rates: %s\n", str);
1467
94ca719e
VS
1468 common_len = intel_dp_common_rates(intel_dp, common_rates);
1469 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1470 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1471}
1472
f4896f15 1473static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1474{
1475 int i = 0;
1476
1477 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1478 if (find == rates[i])
1479 break;
1480
1481 return i;
1482}
1483
50fec21a
VS
1484int
1485intel_dp_max_link_rate(struct intel_dp *intel_dp)
1486{
1487 int rates[DP_MAX_SUPPORTED_RATES] = {};
1488 int len;
1489
94ca719e 1490 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1491 if (WARN_ON(len <= 0))
1492 return 162000;
1493
1494 return rates[rate_to_index(0, rates) - 1];
1495}
1496
ed4e9c1d
VS
1497int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1498{
94ca719e 1499 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1500}
1501
94223d04
ACO
1502void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1503 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1504{
1505 if (intel_dp->num_sink_rates) {
1506 *link_bw = 0;
1507 *rate_select =
1508 intel_dp_rate_select(intel_dp, port_clock);
1509 } else {
1510 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1511 *rate_select = 0;
1512 }
1513}
1514
00c09d70 1515bool
5bfe2ac0 1516intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1517 struct intel_crtc_state *pipe_config)
a4fc5ed6 1518{
5bfe2ac0 1519 struct drm_device *dev = encoder->base.dev;
36008365 1520 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1521 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1522 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1523 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1524 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1525 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1526 int lane_count, clock;
56071a20 1527 int min_lane_count = 1;
eeb6324d 1528 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1529 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1530 int min_clock = 0;
a8f3ef61 1531 int max_clock;
083f9560 1532 int bpp, mode_rate;
ff9a6750 1533 int link_avail, link_clock;
94ca719e
VS
1534 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1535 int common_len;
04a60f9f 1536 uint8_t link_bw, rate_select;
a8f3ef61 1537
94ca719e 1538 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1539
1540 /* No common link rates between source and sink */
94ca719e 1541 WARN_ON(common_len <= 0);
a8f3ef61 1542
94ca719e 1543 max_clock = common_len - 1;
a4fc5ed6 1544
bc7d38a4 1545 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1546 pipe_config->has_pch_encoder = true;
1547
03afc4a2 1548 pipe_config->has_dp_encoder = true;
f769cd24 1549 pipe_config->has_drrs = false;
9fcb1704 1550 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1551
dd06f90e
JN
1552 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1553 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1554 adjusted_mode);
a1b2278e
CK
1555
1556 if (INTEL_INFO(dev)->gen >= 9) {
1557 int ret;
e435d6e5 1558 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1559 if (ret)
1560 return ret;
1561 }
1562
b5667627 1563 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1564 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1565 intel_connector->panel.fitting_mode);
1566 else
b074cec8
JB
1567 intel_pch_panel_fitting(intel_crtc, pipe_config,
1568 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1569 }
1570
cb1793ce 1571 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1572 return false;
1573
083f9560 1574 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1575 "max bw %d pixel clock %iKHz\n",
94ca719e 1576 max_lane_count, common_rates[max_clock],
241bfc38 1577 adjusted_mode->crtc_clock);
083f9560 1578
36008365
DV
1579 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1580 * bpc in between. */
3e7ca985 1581 bpp = pipe_config->pipe_bpp;
56071a20 1582 if (is_edp(intel_dp)) {
22ce5628
TS
1583
1584 /* Get bpp from vbt only for panels that dont have bpp in edid */
1585 if (intel_connector->base.display_info.bpc == 0 &&
1586 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1587 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1588 dev_priv->vbt.edp_bpp);
1589 bpp = dev_priv->vbt.edp_bpp;
1590 }
1591
344c5bbc
JN
1592 /*
1593 * Use the maximum clock and number of lanes the eDP panel
1594 * advertizes being capable of. The panels are generally
1595 * designed to support only a single clock and lane
1596 * configuration, and typically these values correspond to the
1597 * native resolution of the panel.
1598 */
1599 min_lane_count = max_lane_count;
1600 min_clock = max_clock;
7984211e 1601 }
657445fe 1602
36008365 1603 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1604 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1605 bpp);
36008365 1606
c6930992 1607 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1608 for (lane_count = min_lane_count;
1609 lane_count <= max_lane_count;
1610 lane_count <<= 1) {
1611
94ca719e 1612 link_clock = common_rates[clock];
36008365
DV
1613 link_avail = intel_dp_max_data_rate(link_clock,
1614 lane_count);
1615
1616 if (mode_rate <= link_avail) {
1617 goto found;
1618 }
1619 }
1620 }
1621 }
c4867936 1622
36008365 1623 return false;
3685a8f3 1624
36008365 1625found:
55bc60db
VS
1626 if (intel_dp->color_range_auto) {
1627 /*
1628 * See:
1629 * CEA-861-E - 5.1 Default Encoding Parameters
1630 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1631 */
0f2a2a75
VS
1632 pipe_config->limited_color_range =
1633 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1634 } else {
1635 pipe_config->limited_color_range =
1636 intel_dp->limited_color_range;
55bc60db
VS
1637 }
1638
90a6b7b0 1639 pipe_config->lane_count = lane_count;
a8f3ef61 1640
657445fe 1641 pipe_config->pipe_bpp = bpp;
94ca719e 1642 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1643
04a60f9f
VS
1644 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1645 &link_bw, &rate_select);
1646
1647 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1648 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1649 pipe_config->port_clock, bpp);
36008365
DV
1650 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1651 mode_rate, link_avail);
a4fc5ed6 1652
03afc4a2 1653 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1654 adjusted_mode->crtc_clock,
1655 pipe_config->port_clock,
03afc4a2 1656 &pipe_config->dp_m_n);
9d1a455b 1657
439d7ac0 1658 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1659 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1660 pipe_config->has_drrs = true;
439d7ac0
PB
1661 intel_link_compute_m_n(bpp, lane_count,
1662 intel_connector->panel.downclock_mode->clock,
1663 pipe_config->port_clock,
1664 &pipe_config->dp_m2_n2);
1665 }
1666
ef11bdb3 1667 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1668 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1669 else if (IS_BROXTON(dev))
1670 /* handled in ddi */;
5416d871 1671 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1672 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1673 else
840b32b7 1674 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1675
03afc4a2 1676 return true;
a4fc5ed6
KP
1677}
1678
901c2daf
VS
1679void intel_dp_set_link_params(struct intel_dp *intel_dp,
1680 const struct intel_crtc_state *pipe_config)
1681{
1682 intel_dp->link_rate = pipe_config->port_clock;
1683 intel_dp->lane_count = pipe_config->lane_count;
1684}
1685
8ac33ed3 1686static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1687{
b934223d 1688 struct drm_device *dev = encoder->base.dev;
417e822d 1689 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1690 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1691 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1692 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1693 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1694
901c2daf
VS
1695 intel_dp_set_link_params(intel_dp, crtc->config);
1696
417e822d 1697 /*
1a2eb460 1698 * There are four kinds of DP registers:
417e822d
KP
1699 *
1700 * IBX PCH
1a2eb460
KP
1701 * SNB CPU
1702 * IVB CPU
417e822d
KP
1703 * CPT PCH
1704 *
1705 * IBX PCH and CPU are the same for almost everything,
1706 * except that the CPU DP PLL is configured in this
1707 * register
1708 *
1709 * CPT PCH is quite different, having many bits moved
1710 * to the TRANS_DP_CTL register instead. That
1711 * configuration happens (oddly) in ironlake_pch_enable
1712 */
9c9e7927 1713
417e822d
KP
1714 /* Preserve the BIOS-computed detected bit. This is
1715 * supposed to be read-only.
1716 */
1717 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1718
417e822d 1719 /* Handle DP bits in common between all three register formats */
417e822d 1720 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1721 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1722
417e822d 1723 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1724
39e5fa88 1725 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1726 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1727 intel_dp->DP |= DP_SYNC_HS_HIGH;
1728 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1729 intel_dp->DP |= DP_SYNC_VS_HIGH;
1730 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1731
6aba5b6c 1732 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1733 intel_dp->DP |= DP_ENHANCED_FRAMING;
1734
7c62a164 1735 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1736 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1737 u32 trans_dp;
1738
39e5fa88 1739 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1740
1741 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1742 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1743 trans_dp |= TRANS_DP_ENH_FRAMING;
1744 else
1745 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1746 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1747 } else {
0f2a2a75 1748 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 1749 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
0f2a2a75 1750 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1751
1752 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1753 intel_dp->DP |= DP_SYNC_HS_HIGH;
1754 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1755 intel_dp->DP |= DP_SYNC_VS_HIGH;
1756 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1757
6aba5b6c 1758 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1759 intel_dp->DP |= DP_ENHANCED_FRAMING;
1760
39e5fa88 1761 if (IS_CHERRYVIEW(dev))
44f37d1f 1762 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1763 else if (crtc->pipe == PIPE_B)
1764 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1765 }
a4fc5ed6
KP
1766}
1767
ffd6749d
PZ
1768#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1769#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1770
1a5ef5b7
PZ
1771#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1772#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1773
ffd6749d
PZ
1774#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1775#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1776
4be73780 1777static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1778 u32 mask,
1779 u32 value)
bd943159 1780{
30add22d 1781 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1782 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1783 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 1784
e39b999a
VS
1785 lockdep_assert_held(&dev_priv->pps_mutex);
1786
bf13e81b
JN
1787 pp_stat_reg = _pp_stat_reg(intel_dp);
1788 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1789
99ea7127 1790 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1791 mask, value,
1792 I915_READ(pp_stat_reg),
1793 I915_READ(pp_ctrl_reg));
32ce697c 1794
453c5420 1795 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1796 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1797 I915_READ(pp_stat_reg),
1798 I915_READ(pp_ctrl_reg));
32ce697c 1799 }
54c136d4
CW
1800
1801 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1802}
32ce697c 1803
4be73780 1804static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1805{
1806 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1807 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1808}
1809
4be73780 1810static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1811{
1812 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1813 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1814}
1815
4be73780 1816static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 1817{
d28d4731
AK
1818 ktime_t panel_power_on_time;
1819 s64 panel_power_off_duration;
1820
99ea7127 1821 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c 1822
d28d4731
AK
1823 /* take the difference of currrent time and panel power off time
1824 * and then make panel wait for t11_t12 if needed. */
1825 panel_power_on_time = ktime_get_boottime();
1826 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1827
dce56b3c
PZ
1828 /* When we disable the VDD override bit last we have to do the manual
1829 * wait. */
d28d4731
AK
1830 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1831 wait_remaining_ms_from_jiffies(jiffies,
1832 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 1833
4be73780 1834 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1835}
1836
4be73780 1837static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1838{
1839 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1840 intel_dp->backlight_on_delay);
1841}
1842
4be73780 1843static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1844{
1845 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1846 intel_dp->backlight_off_delay);
1847}
99ea7127 1848
832dd3c1
KP
1849/* Read the current pp_control value, unlocking the register if it
1850 * is locked
1851 */
1852
453c5420 1853static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1854{
453c5420
JB
1855 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1856 struct drm_i915_private *dev_priv = dev->dev_private;
1857 u32 control;
832dd3c1 1858
e39b999a
VS
1859 lockdep_assert_held(&dev_priv->pps_mutex);
1860
bf13e81b 1861 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1862 if (!IS_BROXTON(dev)) {
1863 control &= ~PANEL_UNLOCK_MASK;
1864 control |= PANEL_UNLOCK_REGS;
1865 }
832dd3c1 1866 return control;
bd943159
KP
1867}
1868
951468f3
VS
1869/*
1870 * Must be paired with edp_panel_vdd_off().
1871 * Must hold pps_mutex around the whole on/off sequence.
1872 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1873 */
1e0560e0 1874static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1875{
30add22d 1876 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1877 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1878 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1879 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1880 enum intel_display_power_domain power_domain;
5d613501 1881 u32 pp;
f0f59a00 1882 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 1883 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1884
e39b999a
VS
1885 lockdep_assert_held(&dev_priv->pps_mutex);
1886
97af61f5 1887 if (!is_edp(intel_dp))
adddaaf4 1888 return false;
bd943159 1889
2c623c11 1890 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1891 intel_dp->want_panel_vdd = true;
99ea7127 1892
4be73780 1893 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1894 return need_to_disable;
b0665d57 1895
25f78f58 1896 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 1897 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1898
3936fcf4
VS
1899 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1900 port_name(intel_dig_port->port));
bd943159 1901
4be73780
DV
1902 if (!edp_have_panel_power(intel_dp))
1903 wait_panel_power_cycle(intel_dp);
99ea7127 1904
453c5420 1905 pp = ironlake_get_pp_control(intel_dp);
5d613501 1906 pp |= EDP_FORCE_VDD;
ebf33b18 1907
bf13e81b
JN
1908 pp_stat_reg = _pp_stat_reg(intel_dp);
1909 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1910
1911 I915_WRITE(pp_ctrl_reg, pp);
1912 POSTING_READ(pp_ctrl_reg);
1913 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1914 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1915 /*
1916 * If the panel wasn't on, delay before accessing aux channel
1917 */
4be73780 1918 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1919 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1920 port_name(intel_dig_port->port));
f01eca2e 1921 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1922 }
adddaaf4
JN
1923
1924 return need_to_disable;
1925}
1926
951468f3
VS
1927/*
1928 * Must be paired with intel_edp_panel_vdd_off() or
1929 * intel_edp_panel_off().
1930 * Nested calls to these functions are not allowed since
1931 * we drop the lock. Caller must use some higher level
1932 * locking to prevent nested calls from other threads.
1933 */
b80d6c78 1934void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1935{
c695b6b6 1936 bool vdd;
adddaaf4 1937
c695b6b6
VS
1938 if (!is_edp(intel_dp))
1939 return;
1940
773538e8 1941 pps_lock(intel_dp);
c695b6b6 1942 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1943 pps_unlock(intel_dp);
c695b6b6 1944
e2c719b7 1945 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1946 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1947}
1948
4be73780 1949static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1950{
30add22d 1951 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1952 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1953 struct intel_digital_port *intel_dig_port =
1954 dp_to_dig_port(intel_dp);
1955 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1956 enum intel_display_power_domain power_domain;
5d613501 1957 u32 pp;
f0f59a00 1958 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 1959
e39b999a 1960 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1961
15e899a0 1962 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1963
15e899a0 1964 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1965 return;
b0665d57 1966
3936fcf4
VS
1967 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1968 port_name(intel_dig_port->port));
bd943159 1969
be2c9196
VS
1970 pp = ironlake_get_pp_control(intel_dp);
1971 pp &= ~EDP_FORCE_VDD;
453c5420 1972
be2c9196
VS
1973 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1974 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1975
be2c9196
VS
1976 I915_WRITE(pp_ctrl_reg, pp);
1977 POSTING_READ(pp_ctrl_reg);
90791a5c 1978
be2c9196
VS
1979 /* Make sure sequencer is idle before allowing subsequent activity */
1980 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1981 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1982
be2c9196 1983 if ((pp & POWER_TARGET_ON) == 0)
d28d4731 1984 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 1985
25f78f58 1986 power_domain = intel_display_port_aux_power_domain(intel_encoder);
be2c9196 1987 intel_display_power_put(dev_priv, power_domain);
bd943159 1988}
5d613501 1989
4be73780 1990static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1991{
1992 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1993 struct intel_dp, panel_vdd_work);
bd943159 1994
773538e8 1995 pps_lock(intel_dp);
15e899a0
VS
1996 if (!intel_dp->want_panel_vdd)
1997 edp_panel_vdd_off_sync(intel_dp);
773538e8 1998 pps_unlock(intel_dp);
bd943159
KP
1999}
2000
aba86890
ID
2001static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2002{
2003 unsigned long delay;
2004
2005 /*
2006 * Queue the timer to fire a long time from now (relative to the power
2007 * down delay) to keep the panel power up across a sequence of
2008 * operations.
2009 */
2010 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2011 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2012}
2013
951468f3
VS
2014/*
2015 * Must be paired with edp_panel_vdd_on().
2016 * Must hold pps_mutex around the whole on/off sequence.
2017 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2018 */
4be73780 2019static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2020{
e39b999a
VS
2021 struct drm_i915_private *dev_priv =
2022 intel_dp_to_dev(intel_dp)->dev_private;
2023
2024 lockdep_assert_held(&dev_priv->pps_mutex);
2025
97af61f5
KP
2026 if (!is_edp(intel_dp))
2027 return;
5d613501 2028
e2c719b7 2029 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 2030 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 2031
bd943159
KP
2032 intel_dp->want_panel_vdd = false;
2033
aba86890 2034 if (sync)
4be73780 2035 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2036 else
2037 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2038}
2039
9f0fb5be 2040static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2041{
30add22d 2042 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2043 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 2044 u32 pp;
f0f59a00 2045 i915_reg_t pp_ctrl_reg;
9934c132 2046
9f0fb5be
VS
2047 lockdep_assert_held(&dev_priv->pps_mutex);
2048
97af61f5 2049 if (!is_edp(intel_dp))
bd943159 2050 return;
99ea7127 2051
3936fcf4
VS
2052 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2053 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 2054
e7a89ace
VS
2055 if (WARN(edp_have_panel_power(intel_dp),
2056 "eDP port %c panel power already on\n",
2057 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 2058 return;
9934c132 2059
4be73780 2060 wait_panel_power_cycle(intel_dp);
37c6c9b0 2061
bf13e81b 2062 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2063 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
2064 if (IS_GEN5(dev)) {
2065 /* ILK workaround: disable reset around power sequence */
2066 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2067 I915_WRITE(pp_ctrl_reg, pp);
2068 POSTING_READ(pp_ctrl_reg);
05ce1a49 2069 }
37c6c9b0 2070
1c0ae80a 2071 pp |= POWER_TARGET_ON;
99ea7127
KP
2072 if (!IS_GEN5(dev))
2073 pp |= PANEL_POWER_RESET;
2074
453c5420
JB
2075 I915_WRITE(pp_ctrl_reg, pp);
2076 POSTING_READ(pp_ctrl_reg);
9934c132 2077
4be73780 2078 wait_panel_on(intel_dp);
dce56b3c 2079 intel_dp->last_power_on = jiffies;
9934c132 2080
05ce1a49
KP
2081 if (IS_GEN5(dev)) {
2082 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2083 I915_WRITE(pp_ctrl_reg, pp);
2084 POSTING_READ(pp_ctrl_reg);
05ce1a49 2085 }
9f0fb5be 2086}
e39b999a 2087
9f0fb5be
VS
2088void intel_edp_panel_on(struct intel_dp *intel_dp)
2089{
2090 if (!is_edp(intel_dp))
2091 return;
2092
2093 pps_lock(intel_dp);
2094 edp_panel_on(intel_dp);
773538e8 2095 pps_unlock(intel_dp);
9934c132
JB
2096}
2097
9f0fb5be
VS
2098
2099static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2100{
4e6e1a54
ID
2101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2102 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2103 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2104 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2105 enum intel_display_power_domain power_domain;
99ea7127 2106 u32 pp;
f0f59a00 2107 i915_reg_t pp_ctrl_reg;
9934c132 2108
9f0fb5be
VS
2109 lockdep_assert_held(&dev_priv->pps_mutex);
2110
97af61f5
KP
2111 if (!is_edp(intel_dp))
2112 return;
37c6c9b0 2113
3936fcf4
VS
2114 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2115 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2116
3936fcf4
VS
2117 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2118 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2119
453c5420 2120 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2121 /* We need to switch off panel power _and_ force vdd, for otherwise some
2122 * panels get very unhappy and cease to work. */
b3064154
PJ
2123 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2124 EDP_BLC_ENABLE);
453c5420 2125
bf13e81b 2126 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2127
849e39f5
PZ
2128 intel_dp->want_panel_vdd = false;
2129
453c5420
JB
2130 I915_WRITE(pp_ctrl_reg, pp);
2131 POSTING_READ(pp_ctrl_reg);
9934c132 2132
d28d4731 2133 intel_dp->panel_power_off_time = ktime_get_boottime();
4be73780 2134 wait_panel_off(intel_dp);
849e39f5
PZ
2135
2136 /* We got a reference when we enabled the VDD. */
25f78f58 2137 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 2138 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2139}
e39b999a 2140
9f0fb5be
VS
2141void intel_edp_panel_off(struct intel_dp *intel_dp)
2142{
2143 if (!is_edp(intel_dp))
2144 return;
e39b999a 2145
9f0fb5be
VS
2146 pps_lock(intel_dp);
2147 edp_panel_off(intel_dp);
773538e8 2148 pps_unlock(intel_dp);
9934c132
JB
2149}
2150
1250d107
JN
2151/* Enable backlight in the panel power control. */
2152static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2153{
da63a9f2
PZ
2154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2155 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2156 struct drm_i915_private *dev_priv = dev->dev_private;
2157 u32 pp;
f0f59a00 2158 i915_reg_t pp_ctrl_reg;
32f9d658 2159
01cb9ea6
JB
2160 /*
2161 * If we enable the backlight right away following a panel power
2162 * on, we may see slight flicker as the panel syncs with the eDP
2163 * link. So delay a bit to make sure the image is solid before
2164 * allowing it to appear.
2165 */
4be73780 2166 wait_backlight_on(intel_dp);
e39b999a 2167
773538e8 2168 pps_lock(intel_dp);
e39b999a 2169
453c5420 2170 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2171 pp |= EDP_BLC_ENABLE;
453c5420 2172
bf13e81b 2173 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2174
2175 I915_WRITE(pp_ctrl_reg, pp);
2176 POSTING_READ(pp_ctrl_reg);
e39b999a 2177
773538e8 2178 pps_unlock(intel_dp);
32f9d658
ZW
2179}
2180
1250d107
JN
2181/* Enable backlight PWM and backlight PP control. */
2182void intel_edp_backlight_on(struct intel_dp *intel_dp)
2183{
2184 if (!is_edp(intel_dp))
2185 return;
2186
2187 DRM_DEBUG_KMS("\n");
2188
2189 intel_panel_enable_backlight(intel_dp->attached_connector);
2190 _intel_edp_backlight_on(intel_dp);
2191}
2192
2193/* Disable backlight in the panel power control. */
2194static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2195{
30add22d 2196 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2197 struct drm_i915_private *dev_priv = dev->dev_private;
2198 u32 pp;
f0f59a00 2199 i915_reg_t pp_ctrl_reg;
32f9d658 2200
f01eca2e
KP
2201 if (!is_edp(intel_dp))
2202 return;
2203
773538e8 2204 pps_lock(intel_dp);
e39b999a 2205
453c5420 2206 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2207 pp &= ~EDP_BLC_ENABLE;
453c5420 2208
bf13e81b 2209 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2210
2211 I915_WRITE(pp_ctrl_reg, pp);
2212 POSTING_READ(pp_ctrl_reg);
f7d2323c 2213
773538e8 2214 pps_unlock(intel_dp);
e39b999a
VS
2215
2216 intel_dp->last_backlight_off = jiffies;
f7d2323c 2217 edp_wait_backlight_off(intel_dp);
1250d107 2218}
f7d2323c 2219
1250d107
JN
2220/* Disable backlight PP control and backlight PWM. */
2221void intel_edp_backlight_off(struct intel_dp *intel_dp)
2222{
2223 if (!is_edp(intel_dp))
2224 return;
2225
2226 DRM_DEBUG_KMS("\n");
f7d2323c 2227
1250d107 2228 _intel_edp_backlight_off(intel_dp);
f7d2323c 2229 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2230}
a4fc5ed6 2231
73580fb7
JN
2232/*
2233 * Hook for controlling the panel power control backlight through the bl_power
2234 * sysfs attribute. Take care to handle multiple calls.
2235 */
2236static void intel_edp_backlight_power(struct intel_connector *connector,
2237 bool enable)
2238{
2239 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2240 bool is_enabled;
2241
773538e8 2242 pps_lock(intel_dp);
e39b999a 2243 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2244 pps_unlock(intel_dp);
73580fb7
JN
2245
2246 if (is_enabled == enable)
2247 return;
2248
23ba9373
JN
2249 DRM_DEBUG_KMS("panel power control backlight %s\n",
2250 enable ? "enable" : "disable");
73580fb7
JN
2251
2252 if (enable)
2253 _intel_edp_backlight_on(intel_dp);
2254 else
2255 _intel_edp_backlight_off(intel_dp);
2256}
2257
64e1077a
VS
2258static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2259{
2260 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2261 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2262 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2263
2264 I915_STATE_WARN(cur_state != state,
2265 "DP port %c state assertion failure (expected %s, current %s)\n",
2266 port_name(dig_port->port),
87ad3212 2267 onoff(state), onoff(cur_state));
64e1077a
VS
2268}
2269#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2270
2271static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2272{
2273 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2274
2275 I915_STATE_WARN(cur_state != state,
2276 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 2277 onoff(state), onoff(cur_state));
64e1077a
VS
2278}
2279#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2280#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2281
2bd2ad64 2282static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2283{
da63a9f2 2284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2285 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2286 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2287
64e1077a
VS
2288 assert_pipe_disabled(dev_priv, crtc->pipe);
2289 assert_dp_port_disabled(intel_dp);
2290 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2291
abfce949
VS
2292 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2293 crtc->config->port_clock);
2294
2295 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2296
2297 if (crtc->config->port_clock == 162000)
2298 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2299 else
2300 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2301
2302 I915_WRITE(DP_A, intel_dp->DP);
2303 POSTING_READ(DP_A);
2304 udelay(500);
2305
0767935e 2306 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2307
0767935e 2308 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2309 POSTING_READ(DP_A);
2310 udelay(200);
d240f20f
JB
2311}
2312
2bd2ad64 2313static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2314{
da63a9f2 2315 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2316 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2317 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2318
64e1077a
VS
2319 assert_pipe_disabled(dev_priv, crtc->pipe);
2320 assert_dp_port_disabled(intel_dp);
2321 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2322
abfce949
VS
2323 DRM_DEBUG_KMS("disabling eDP PLL\n");
2324
6fec7662 2325 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2326
6fec7662 2327 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2328 POSTING_READ(DP_A);
d240f20f
JB
2329 udelay(200);
2330}
2331
c7ad3810 2332/* If the sink supports it, try to set the power state appropriately */
c19b0669 2333void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2334{
2335 int ret, i;
2336
2337 /* Should have a valid DPCD by this point */
2338 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2339 return;
2340
2341 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2342 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2343 DP_SET_POWER_D3);
c7ad3810
JB
2344 } else {
2345 /*
2346 * When turning on, we need to retry for 1ms to give the sink
2347 * time to wake up.
2348 */
2349 for (i = 0; i < 3; i++) {
9d1a1031
JN
2350 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2351 DP_SET_POWER_D0);
c7ad3810
JB
2352 if (ret == 1)
2353 break;
2354 msleep(1);
2355 }
2356 }
f9cac721
JN
2357
2358 if (ret != 1)
2359 DRM_DEBUG_KMS("failed to %s sink power state\n",
2360 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2361}
2362
19d8fe15
DV
2363static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2364 enum pipe *pipe)
d240f20f 2365{
19d8fe15 2366 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2367 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2368 struct drm_device *dev = encoder->base.dev;
2369 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2370 enum intel_display_power_domain power_domain;
2371 u32 tmp;
6fa9a5ec 2372 bool ret;
6d129bea
ID
2373
2374 power_domain = intel_display_port_power_domain(encoder);
6fa9a5ec 2375 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
6d129bea
ID
2376 return false;
2377
6fa9a5ec
ID
2378 ret = false;
2379
6d129bea 2380 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2381
2382 if (!(tmp & DP_PORT_EN))
6fa9a5ec 2383 goto out;
19d8fe15 2384
39e5fa88 2385 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2386 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2387 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2388 enum pipe p;
19d8fe15 2389
adc289d7
VS
2390 for_each_pipe(dev_priv, p) {
2391 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2392 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2393 *pipe = p;
6fa9a5ec
ID
2394 ret = true;
2395
2396 goto out;
19d8fe15
DV
2397 }
2398 }
19d8fe15 2399
4a0833ec 2400 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
f0f59a00 2401 i915_mmio_reg_offset(intel_dp->output_reg));
39e5fa88
VS
2402 } else if (IS_CHERRYVIEW(dev)) {
2403 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2404 } else {
2405 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2406 }
d240f20f 2407
6fa9a5ec
ID
2408 ret = true;
2409
2410out:
2411 intel_display_power_put(dev_priv, power_domain);
2412
2413 return ret;
19d8fe15 2414}
d240f20f 2415
045ac3b5 2416static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2417 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2418{
2419 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2420 u32 tmp, flags = 0;
63000ef6
XZ
2421 struct drm_device *dev = encoder->base.dev;
2422 struct drm_i915_private *dev_priv = dev->dev_private;
2423 enum port port = dp_to_dig_port(intel_dp)->port;
2424 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2425 int dotclock;
045ac3b5 2426
9ed109a7 2427 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2428
2429 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2430
39e5fa88 2431 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2432 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2433
2434 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2435 flags |= DRM_MODE_FLAG_PHSYNC;
2436 else
2437 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2438
b81e34c2 2439 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2440 flags |= DRM_MODE_FLAG_PVSYNC;
2441 else
2442 flags |= DRM_MODE_FLAG_NVSYNC;
2443 } else {
39e5fa88 2444 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2445 flags |= DRM_MODE_FLAG_PHSYNC;
2446 else
2447 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2448
39e5fa88 2449 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2450 flags |= DRM_MODE_FLAG_PVSYNC;
2451 else
2452 flags |= DRM_MODE_FLAG_NVSYNC;
2453 }
045ac3b5 2454
2d112de7 2455 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2456
8c875fca 2457 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 2458 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
2459 pipe_config->limited_color_range = true;
2460
eb14cb74
VS
2461 pipe_config->has_dp_encoder = true;
2462
90a6b7b0
VS
2463 pipe_config->lane_count =
2464 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2465
eb14cb74
VS
2466 intel_dp_get_m_n(crtc, pipe_config);
2467
18442d08 2468 if (port == PORT_A) {
b377e0df 2469 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2470 pipe_config->port_clock = 162000;
2471 else
2472 pipe_config->port_clock = 270000;
2473 }
18442d08
VS
2474
2475 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2476 &pipe_config->dp_m_n);
2477
2478 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2479 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2480
2d112de7 2481 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2482
c6cd2ee2
JN
2483 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2484 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2485 /*
2486 * This is a big fat ugly hack.
2487 *
2488 * Some machines in UEFI boot mode provide us a VBT that has 18
2489 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2490 * unknown we fail to light up. Yet the same BIOS boots up with
2491 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2492 * max, not what it tells us to use.
2493 *
2494 * Note: This will still be broken if the eDP panel is not lit
2495 * up by the BIOS, and thus we can't get the mode at module
2496 * load.
2497 */
2498 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2499 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2500 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2501 }
045ac3b5
JB
2502}
2503
e8cb4558 2504static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2505{
e8cb4558 2506 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2507 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2508 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2509
6e3c9717 2510 if (crtc->config->has_audio)
495a5bb8 2511 intel_audio_codec_disable(encoder);
6cb49835 2512
b32c6f48
RV
2513 if (HAS_PSR(dev) && !HAS_DDI(dev))
2514 intel_psr_disable(intel_dp);
2515
6cb49835
DV
2516 /* Make sure the panel is off before trying to change the mode. But also
2517 * ensure that we have vdd while we switch off the panel. */
24f3e092 2518 intel_edp_panel_vdd_on(intel_dp);
4be73780 2519 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2520 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2521 intel_edp_panel_off(intel_dp);
3739850b 2522
08aff3fe
VS
2523 /* disable the port before the pipe on g4x */
2524 if (INTEL_INFO(dev)->gen < 5)
3739850b 2525 intel_dp_link_down(intel_dp);
d240f20f
JB
2526}
2527
08aff3fe 2528static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2529{
2bd2ad64 2530 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2531 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2532
49277c31 2533 intel_dp_link_down(intel_dp);
abfce949
VS
2534
2535 /* Only ilk+ has port A */
08aff3fe
VS
2536 if (port == PORT_A)
2537 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2538}
2539
2540static void vlv_post_disable_dp(struct intel_encoder *encoder)
2541{
2542 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2543
2544 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2545}
2546
a8f327fb
VS
2547static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2548 bool reset)
580d3811 2549{
a8f327fb
VS
2550 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2551 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2552 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2553 enum pipe pipe = crtc->pipe;
2554 uint32_t val;
580d3811 2555
a8f327fb
VS
2556 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2557 if (reset)
2558 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2559 else
2560 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2561 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2562
a8f327fb
VS
2563 if (crtc->config->lane_count > 2) {
2564 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2565 if (reset)
2566 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2567 else
2568 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2569 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2570 }
580d3811 2571
97fd4d5c 2572 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2573 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2574 if (reset)
2575 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2576 else
2577 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2578 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2579
a8f327fb 2580 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2581 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2582 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2583 if (reset)
2584 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2585 else
2586 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2587 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2588 }
a8f327fb 2589}
97fd4d5c 2590
a8f327fb
VS
2591static void chv_post_disable_dp(struct intel_encoder *encoder)
2592{
2593 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2594 struct drm_device *dev = encoder->base.dev;
2595 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2596
a8f327fb
VS
2597 intel_dp_link_down(intel_dp);
2598
2599 mutex_lock(&dev_priv->sb_lock);
2600
2601 /* Assert data lane reset */
2602 chv_data_lane_soft_reset(encoder, true);
580d3811 2603
a580516d 2604 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2605}
2606
7b13b58a
VS
2607static void
2608_intel_dp_set_link_train(struct intel_dp *intel_dp,
2609 uint32_t *DP,
2610 uint8_t dp_train_pat)
2611{
2612 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2613 struct drm_device *dev = intel_dig_port->base.base.dev;
2614 struct drm_i915_private *dev_priv = dev->dev_private;
2615 enum port port = intel_dig_port->port;
2616
2617 if (HAS_DDI(dev)) {
2618 uint32_t temp = I915_READ(DP_TP_CTL(port));
2619
2620 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2621 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2622 else
2623 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2624
2625 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2626 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2627 case DP_TRAINING_PATTERN_DISABLE:
2628 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2629
2630 break;
2631 case DP_TRAINING_PATTERN_1:
2632 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2633 break;
2634 case DP_TRAINING_PATTERN_2:
2635 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2636 break;
2637 case DP_TRAINING_PATTERN_3:
2638 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2639 break;
2640 }
2641 I915_WRITE(DP_TP_CTL(port), temp);
2642
39e5fa88
VS
2643 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2644 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2645 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2646
2647 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2648 case DP_TRAINING_PATTERN_DISABLE:
2649 *DP |= DP_LINK_TRAIN_OFF_CPT;
2650 break;
2651 case DP_TRAINING_PATTERN_1:
2652 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2653 break;
2654 case DP_TRAINING_PATTERN_2:
2655 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2656 break;
2657 case DP_TRAINING_PATTERN_3:
2658 DRM_ERROR("DP training pattern 3 not supported\n");
2659 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2660 break;
2661 }
2662
2663 } else {
2664 if (IS_CHERRYVIEW(dev))
2665 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2666 else
2667 *DP &= ~DP_LINK_TRAIN_MASK;
2668
2669 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2670 case DP_TRAINING_PATTERN_DISABLE:
2671 *DP |= DP_LINK_TRAIN_OFF;
2672 break;
2673 case DP_TRAINING_PATTERN_1:
2674 *DP |= DP_LINK_TRAIN_PAT_1;
2675 break;
2676 case DP_TRAINING_PATTERN_2:
2677 *DP |= DP_LINK_TRAIN_PAT_2;
2678 break;
2679 case DP_TRAINING_PATTERN_3:
2680 if (IS_CHERRYVIEW(dev)) {
2681 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2682 } else {
2683 DRM_ERROR("DP training pattern 3 not supported\n");
2684 *DP |= DP_LINK_TRAIN_PAT_2;
2685 }
2686 break;
2687 }
2688 }
2689}
2690
2691static void intel_dp_enable_port(struct intel_dp *intel_dp)
2692{
2693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2694 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2695 struct intel_crtc *crtc =
2696 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2697
7b13b58a
VS
2698 /* enable with pattern 1 (as per spec) */
2699 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2700 DP_TRAINING_PATTERN_1);
2701
2702 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2703 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2704
2705 /*
2706 * Magic for VLV/CHV. We _must_ first set up the register
2707 * without actually enabling the port, and then do another
2708 * write to enable the port. Otherwise link training will
2709 * fail when the power sequencer is freshly used for this port.
2710 */
2711 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2712 if (crtc->config->has_audio)
2713 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2714
2715 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2716 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2717}
2718
e8cb4558 2719static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2720{
e8cb4558
DV
2721 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2722 struct drm_device *dev = encoder->base.dev;
2723 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2724 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2725 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2726 enum port port = dp_to_dig_port(intel_dp)->port;
2727 enum pipe pipe = crtc->pipe;
5d613501 2728
0c33d8d7
DV
2729 if (WARN_ON(dp_reg & DP_PORT_EN))
2730 return;
5d613501 2731
093e3f13
VS
2732 pps_lock(intel_dp);
2733
666a4537 2734 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
093e3f13
VS
2735 vlv_init_panel_power_sequencer(intel_dp);
2736
7864578a
VS
2737 /*
2738 * We get an occasional spurious underrun between the port
2739 * enable and vdd enable, when enabling port A eDP.
2740 *
2741 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2742 */
2743 if (port == PORT_A)
2744 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2745
7b13b58a 2746 intel_dp_enable_port(intel_dp);
093e3f13 2747
d6fbdd15
VS
2748 if (port == PORT_A && IS_GEN5(dev_priv)) {
2749 /*
2750 * Underrun reporting for the other pipe was disabled in
2751 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2752 * enabled, so it's now safe to re-enable underrun reporting.
2753 */
2754 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2755 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2756 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2757 }
2758
093e3f13
VS
2759 edp_panel_vdd_on(intel_dp);
2760 edp_panel_on(intel_dp);
2761 edp_panel_vdd_off(intel_dp, true);
2762
7864578a
VS
2763 if (port == PORT_A)
2764 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2765
093e3f13
VS
2766 pps_unlock(intel_dp);
2767
666a4537 2768 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e0fce78f
VS
2769 unsigned int lane_mask = 0x0;
2770
2771 if (IS_CHERRYVIEW(dev))
2772 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2773
9b6de0a1
VS
2774 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2775 lane_mask);
e0fce78f 2776 }
61234fa5 2777
f01eca2e 2778 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2779 intel_dp_start_link_train(intel_dp);
3ab9c637 2780 intel_dp_stop_link_train(intel_dp);
c1dec79a 2781
6e3c9717 2782 if (crtc->config->has_audio) {
c1dec79a 2783 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2784 pipe_name(pipe));
c1dec79a
JN
2785 intel_audio_codec_enable(encoder);
2786 }
ab1f90f9 2787}
89b667f8 2788
ecff4f3b
JN
2789static void g4x_enable_dp(struct intel_encoder *encoder)
2790{
828f5c6e
JN
2791 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2792
ecff4f3b 2793 intel_enable_dp(encoder);
4be73780 2794 intel_edp_backlight_on(intel_dp);
ab1f90f9 2795}
89b667f8 2796
ab1f90f9
JN
2797static void vlv_enable_dp(struct intel_encoder *encoder)
2798{
828f5c6e
JN
2799 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2800
4be73780 2801 intel_edp_backlight_on(intel_dp);
b32c6f48 2802 intel_psr_enable(intel_dp);
d240f20f
JB
2803}
2804
ecff4f3b 2805static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2806{
d6fbdd15 2807 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2808 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2809 enum port port = dp_to_dig_port(intel_dp)->port;
2810 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2811
8ac33ed3
DV
2812 intel_dp_prepare(encoder);
2813
d6fbdd15
VS
2814 if (port == PORT_A && IS_GEN5(dev_priv)) {
2815 /*
2816 * We get FIFO underruns on the other pipe when
2817 * enabling the CPU eDP PLL, and when enabling CPU
2818 * eDP port. We could potentially avoid the PLL
2819 * underrun with a vblank wait just prior to enabling
2820 * the PLL, but that doesn't appear to help the port
2821 * enable case. Just sweep it all under the rug.
2822 */
2823 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2824 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2825 }
2826
d41f1efb 2827 /* Only ilk+ has port A */
abfce949 2828 if (port == PORT_A)
ab1f90f9
JN
2829 ironlake_edp_pll_on(intel_dp);
2830}
2831
83b84597
VS
2832static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2833{
2834 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2835 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2836 enum pipe pipe = intel_dp->pps_pipe;
f0f59a00 2837 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
83b84597
VS
2838
2839 edp_panel_vdd_off_sync(intel_dp);
2840
2841 /*
2842 * VLV seems to get confused when multiple power seqeuencers
2843 * have the same port selected (even if only one has power/vdd
2844 * enabled). The failure manifests as vlv_wait_port_ready() failing
2845 * CHV on the other hand doesn't seem to mind having the same port
2846 * selected in multiple power seqeuencers, but let's clear the
2847 * port select always when logically disconnecting a power sequencer
2848 * from a port.
2849 */
2850 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2851 pipe_name(pipe), port_name(intel_dig_port->port));
2852 I915_WRITE(pp_on_reg, 0);
2853 POSTING_READ(pp_on_reg);
2854
2855 intel_dp->pps_pipe = INVALID_PIPE;
2856}
2857
a4a5d2f8
VS
2858static void vlv_steal_power_sequencer(struct drm_device *dev,
2859 enum pipe pipe)
2860{
2861 struct drm_i915_private *dev_priv = dev->dev_private;
2862 struct intel_encoder *encoder;
2863
2864 lockdep_assert_held(&dev_priv->pps_mutex);
2865
ac3c12e4
VS
2866 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2867 return;
2868
19c8054c 2869 for_each_intel_encoder(dev, encoder) {
a4a5d2f8 2870 struct intel_dp *intel_dp;
773538e8 2871 enum port port;
a4a5d2f8
VS
2872
2873 if (encoder->type != INTEL_OUTPUT_EDP)
2874 continue;
2875
2876 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2877 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2878
2879 if (intel_dp->pps_pipe != pipe)
2880 continue;
2881
2882 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2883 pipe_name(pipe), port_name(port));
a4a5d2f8 2884
e02f9a06 2885 WARN(encoder->base.crtc,
034e43c6
VS
2886 "stealing pipe %c power sequencer from active eDP port %c\n",
2887 pipe_name(pipe), port_name(port));
a4a5d2f8 2888
a4a5d2f8 2889 /* make sure vdd is off before we steal it */
83b84597 2890 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2891 }
2892}
2893
2894static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2895{
2896 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2897 struct intel_encoder *encoder = &intel_dig_port->base;
2898 struct drm_device *dev = encoder->base.dev;
2899 struct drm_i915_private *dev_priv = dev->dev_private;
2900 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2901
2902 lockdep_assert_held(&dev_priv->pps_mutex);
2903
093e3f13
VS
2904 if (!is_edp(intel_dp))
2905 return;
2906
a4a5d2f8
VS
2907 if (intel_dp->pps_pipe == crtc->pipe)
2908 return;
2909
2910 /*
2911 * If another power sequencer was being used on this
2912 * port previously make sure to turn off vdd there while
2913 * we still have control of it.
2914 */
2915 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2916 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2917
2918 /*
2919 * We may be stealing the power
2920 * sequencer from another port.
2921 */
2922 vlv_steal_power_sequencer(dev, crtc->pipe);
2923
2924 /* now it's all ours */
2925 intel_dp->pps_pipe = crtc->pipe;
2926
2927 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2928 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2929
2930 /* init power sequencer on this pipe and port */
36b5f425
VS
2931 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2932 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2933}
2934
ab1f90f9 2935static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2936{
2bd2ad64 2937 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2938 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2939 struct drm_device *dev = encoder->base.dev;
89b667f8 2940 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2941 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2942 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2943 int pipe = intel_crtc->pipe;
2944 u32 val;
a4fc5ed6 2945
a580516d 2946 mutex_lock(&dev_priv->sb_lock);
89b667f8 2947
ab3c759a 2948 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2949 val = 0;
2950 if (pipe)
2951 val |= (1<<21);
2952 else
2953 val &= ~(1<<21);
2954 val |= 0x001000c4;
ab3c759a
CML
2955 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2956 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2957 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2958
a580516d 2959 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2960
2961 intel_enable_dp(encoder);
89b667f8
JB
2962}
2963
ecff4f3b 2964static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2965{
2966 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2967 struct drm_device *dev = encoder->base.dev;
2968 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2969 struct intel_crtc *intel_crtc =
2970 to_intel_crtc(encoder->base.crtc);
e4607fcf 2971 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2972 int pipe = intel_crtc->pipe;
89b667f8 2973
8ac33ed3
DV
2974 intel_dp_prepare(encoder);
2975
89b667f8 2976 /* Program Tx lane resets to default */
a580516d 2977 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2978 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2979 DPIO_PCS_TX_LANE2_RESET |
2980 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2981 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2982 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2983 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2984 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2985 DPIO_PCS_CLK_SOFT_RESET);
2986
2987 /* Fix up inter-pair skew failure */
ab3c759a
CML
2988 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2989 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2990 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2991 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2992}
2993
e4a1d846
CML
2994static void chv_pre_enable_dp(struct intel_encoder *encoder)
2995{
2996 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2997 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2998 struct drm_device *dev = encoder->base.dev;
2999 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
3000 struct intel_crtc *intel_crtc =
3001 to_intel_crtc(encoder->base.crtc);
3002 enum dpio_channel ch = vlv_dport_to_channel(dport);
3003 int pipe = intel_crtc->pipe;
2e523e98 3004 int data, i, stagger;
949c1d43 3005 u32 val;
e4a1d846 3006
a580516d 3007 mutex_lock(&dev_priv->sb_lock);
949c1d43 3008
570e2a74
VS
3009 /* allow hardware to manage TX FIFO reset source */
3010 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3011 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3012 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3013
e0fce78f
VS
3014 if (intel_crtc->config->lane_count > 2) {
3015 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3016 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3017 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3018 }
570e2a74 3019
949c1d43 3020 /* Program Tx lane latency optimal setting*/
e0fce78f 3021 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 3022 /* Set the upar bit */
e0fce78f
VS
3023 if (intel_crtc->config->lane_count == 1)
3024 data = 0x0;
3025 else
3026 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
3027 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3028 data << DPIO_UPAR_SHIFT);
3029 }
3030
3031 /* Data lane stagger programming */
2e523e98
VS
3032 if (intel_crtc->config->port_clock > 270000)
3033 stagger = 0x18;
3034 else if (intel_crtc->config->port_clock > 135000)
3035 stagger = 0xd;
3036 else if (intel_crtc->config->port_clock > 67500)
3037 stagger = 0x7;
3038 else if (intel_crtc->config->port_clock > 33750)
3039 stagger = 0x4;
3040 else
3041 stagger = 0x2;
3042
3043 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3044 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3045 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3046
e0fce78f
VS
3047 if (intel_crtc->config->lane_count > 2) {
3048 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3049 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3050 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3051 }
2e523e98
VS
3052
3053 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3054 DPIO_LANESTAGGER_STRAP(stagger) |
3055 DPIO_LANESTAGGER_STRAP_OVRD |
3056 DPIO_TX1_STAGGER_MASK(0x1f) |
3057 DPIO_TX1_STAGGER_MULT(6) |
3058 DPIO_TX2_STAGGER_MULT(0));
3059
e0fce78f
VS
3060 if (intel_crtc->config->lane_count > 2) {
3061 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3062 DPIO_LANESTAGGER_STRAP(stagger) |
3063 DPIO_LANESTAGGER_STRAP_OVRD |
3064 DPIO_TX1_STAGGER_MASK(0x1f) |
3065 DPIO_TX1_STAGGER_MULT(7) |
3066 DPIO_TX2_STAGGER_MULT(5));
3067 }
e4a1d846 3068
a8f327fb
VS
3069 /* Deassert data lane reset */
3070 chv_data_lane_soft_reset(encoder, false);
3071
a580516d 3072 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 3073
e4a1d846 3074 intel_enable_dp(encoder);
b0b33846
VS
3075
3076 /* Second common lane will stay alive on its own now */
3077 if (dport->release_cl2_override) {
3078 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3079 dport->release_cl2_override = false;
3080 }
e4a1d846
CML
3081}
3082
9197c88b
VS
3083static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3084{
3085 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3086 struct drm_device *dev = encoder->base.dev;
3087 struct drm_i915_private *dev_priv = dev->dev_private;
3088 struct intel_crtc *intel_crtc =
3089 to_intel_crtc(encoder->base.crtc);
3090 enum dpio_channel ch = vlv_dport_to_channel(dport);
3091 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
3092 unsigned int lane_mask =
3093 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
3094 u32 val;
3095
625695f8
VS
3096 intel_dp_prepare(encoder);
3097
b0b33846
VS
3098 /*
3099 * Must trick the second common lane into life.
3100 * Otherwise we can't even access the PLL.
3101 */
3102 if (ch == DPIO_CH0 && pipe == PIPE_B)
3103 dport->release_cl2_override =
3104 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3105
e0fce78f
VS
3106 chv_phy_powergate_lanes(encoder, true, lane_mask);
3107
a580516d 3108 mutex_lock(&dev_priv->sb_lock);
9197c88b 3109
a8f327fb
VS
3110 /* Assert data lane reset */
3111 chv_data_lane_soft_reset(encoder, true);
3112
b9e5ac3c
VS
3113 /* program left/right clock distribution */
3114 if (pipe != PIPE_B) {
3115 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3116 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3117 if (ch == DPIO_CH0)
3118 val |= CHV_BUFLEFTENA1_FORCE;
3119 if (ch == DPIO_CH1)
3120 val |= CHV_BUFRIGHTENA1_FORCE;
3121 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3122 } else {
3123 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3124 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3125 if (ch == DPIO_CH0)
3126 val |= CHV_BUFLEFTENA2_FORCE;
3127 if (ch == DPIO_CH1)
3128 val |= CHV_BUFRIGHTENA2_FORCE;
3129 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3130 }
3131
9197c88b
VS
3132 /* program clock channel usage */
3133 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3134 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3135 if (pipe != PIPE_B)
3136 val &= ~CHV_PCS_USEDCLKCHANNEL;
3137 else
3138 val |= CHV_PCS_USEDCLKCHANNEL;
3139 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3140
e0fce78f
VS
3141 if (intel_crtc->config->lane_count > 2) {
3142 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3143 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3144 if (pipe != PIPE_B)
3145 val &= ~CHV_PCS_USEDCLKCHANNEL;
3146 else
3147 val |= CHV_PCS_USEDCLKCHANNEL;
3148 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3149 }
9197c88b
VS
3150
3151 /*
3152 * This a a bit weird since generally CL
3153 * matches the pipe, but here we need to
3154 * pick the CL based on the port.
3155 */
3156 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3157 if (pipe != PIPE_B)
3158 val &= ~CHV_CMN_USEDCLKCHANNEL;
3159 else
3160 val |= CHV_CMN_USEDCLKCHANNEL;
3161 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3162
a580516d 3163 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3164}
3165
d6db995f
VS
3166static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3167{
3168 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3169 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3170 u32 val;
3171
3172 mutex_lock(&dev_priv->sb_lock);
3173
3174 /* disable left/right clock distribution */
3175 if (pipe != PIPE_B) {
3176 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3177 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3178 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3179 } else {
3180 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3181 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3182 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3183 }
3184
3185 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3186
b0b33846
VS
3187 /*
3188 * Leave the power down bit cleared for at least one
3189 * lane so that chv_powergate_phy_ch() will power
3190 * on something when the channel is otherwise unused.
3191 * When the port is off and the override is removed
3192 * the lanes power down anyway, so otherwise it doesn't
3193 * really matter what the state of power down bits is
3194 * after this.
3195 */
e0fce78f 3196 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3197}
3198
a4fc5ed6 3199/*
df0c237d
JB
3200 * Native read with retry for link status and receiver capability reads for
3201 * cases where the sink may still be asleep.
9d1a1031
JN
3202 *
3203 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3204 * supposed to retry 3 times per the spec.
a4fc5ed6 3205 */
9d1a1031
JN
3206static ssize_t
3207intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3208 void *buffer, size_t size)
a4fc5ed6 3209{
9d1a1031
JN
3210 ssize_t ret;
3211 int i;
61da5fab 3212
f6a19066
VS
3213 /*
3214 * Sometime we just get the same incorrect byte repeated
3215 * over the entire buffer. Doing just one throw away read
3216 * initially seems to "solve" it.
3217 */
3218 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3219
61da5fab 3220 for (i = 0; i < 3; i++) {
9d1a1031
JN
3221 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3222 if (ret == size)
3223 return ret;
61da5fab
JB
3224 msleep(1);
3225 }
a4fc5ed6 3226
9d1a1031 3227 return ret;
a4fc5ed6
KP
3228}
3229
3230/*
3231 * Fetch AUX CH registers 0x202 - 0x207 which contain
3232 * link status information
3233 */
94223d04 3234bool
93f62dad 3235intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3236{
9d1a1031
JN
3237 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3238 DP_LANE0_1_STATUS,
3239 link_status,
3240 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3241}
3242
1100244e 3243/* These are source-specific values. */
94223d04 3244uint8_t
1a2eb460 3245intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3246{
30add22d 3247 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3248 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3249 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3250
9314726b
VK
3251 if (IS_BROXTON(dev))
3252 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3253 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3254 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3255 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3256 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
666a4537 3257 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
bd60018a 3258 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3259 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3260 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3261 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3262 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3263 else
bd60018a 3264 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3265}
3266
94223d04 3267uint8_t
1a2eb460
KP
3268intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3269{
30add22d 3270 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3271 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3272
5a9d1f1a
DL
3273 if (INTEL_INFO(dev)->gen >= 9) {
3274 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3276 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3278 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3280 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3282 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3283 default:
3284 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3285 }
3286 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3287 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3289 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3293 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3295 default:
bd60018a 3296 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3297 }
666a4537 3298 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e2fa6fba 3299 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3301 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3305 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3307 default:
bd60018a 3308 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3309 }
bc7d38a4 3310 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3311 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3313 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3316 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3317 default:
bd60018a 3318 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3319 }
3320 } else {
3321 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3323 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3325 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3327 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3329 default:
bd60018a 3330 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3331 }
a4fc5ed6
KP
3332 }
3333}
3334
5829975c 3335static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3336{
3337 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3338 struct drm_i915_private *dev_priv = dev->dev_private;
3339 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3340 struct intel_crtc *intel_crtc =
3341 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3342 unsigned long demph_reg_value, preemph_reg_value,
3343 uniqtranscale_reg_value;
3344 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3345 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3346 int pipe = intel_crtc->pipe;
e2fa6fba
P
3347
3348 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3349 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3350 preemph_reg_value = 0x0004000;
3351 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3353 demph_reg_value = 0x2B405555;
3354 uniqtranscale_reg_value = 0x552AB83A;
3355 break;
bd60018a 3356 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3357 demph_reg_value = 0x2B404040;
3358 uniqtranscale_reg_value = 0x5548B83A;
3359 break;
bd60018a 3360 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3361 demph_reg_value = 0x2B245555;
3362 uniqtranscale_reg_value = 0x5560B83A;
3363 break;
bd60018a 3364 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3365 demph_reg_value = 0x2B405555;
3366 uniqtranscale_reg_value = 0x5598DA3A;
3367 break;
3368 default:
3369 return 0;
3370 }
3371 break;
bd60018a 3372 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3373 preemph_reg_value = 0x0002000;
3374 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3375 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3376 demph_reg_value = 0x2B404040;
3377 uniqtranscale_reg_value = 0x5552B83A;
3378 break;
bd60018a 3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3380 demph_reg_value = 0x2B404848;
3381 uniqtranscale_reg_value = 0x5580B83A;
3382 break;
bd60018a 3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3384 demph_reg_value = 0x2B404040;
3385 uniqtranscale_reg_value = 0x55ADDA3A;
3386 break;
3387 default:
3388 return 0;
3389 }
3390 break;
bd60018a 3391 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3392 preemph_reg_value = 0x0000000;
3393 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3394 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3395 demph_reg_value = 0x2B305555;
3396 uniqtranscale_reg_value = 0x5570B83A;
3397 break;
bd60018a 3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3399 demph_reg_value = 0x2B2B4040;
3400 uniqtranscale_reg_value = 0x55ADDA3A;
3401 break;
3402 default:
3403 return 0;
3404 }
3405 break;
bd60018a 3406 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3407 preemph_reg_value = 0x0006000;
3408 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3409 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3410 demph_reg_value = 0x1B405555;
3411 uniqtranscale_reg_value = 0x55ADDA3A;
3412 break;
3413 default:
3414 return 0;
3415 }
3416 break;
3417 default:
3418 return 0;
3419 }
3420
a580516d 3421 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3422 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3423 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3424 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3425 uniqtranscale_reg_value);
ab3c759a
CML
3426 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3427 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3428 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3429 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3430 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3431
3432 return 0;
3433}
3434
67fa24b4
VS
3435static bool chv_need_uniq_trans_scale(uint8_t train_set)
3436{
3437 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3438 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3439}
3440
5829975c 3441static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3442{
3443 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3444 struct drm_i915_private *dev_priv = dev->dev_private;
3445 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3446 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3447 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3448 uint8_t train_set = intel_dp->train_set[0];
3449 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3450 enum pipe pipe = intel_crtc->pipe;
3451 int i;
e4a1d846
CML
3452
3453 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3454 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3455 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3457 deemph_reg_value = 128;
3458 margin_reg_value = 52;
3459 break;
bd60018a 3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3461 deemph_reg_value = 128;
3462 margin_reg_value = 77;
3463 break;
bd60018a 3464 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3465 deemph_reg_value = 128;
3466 margin_reg_value = 102;
3467 break;
bd60018a 3468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3469 deemph_reg_value = 128;
3470 margin_reg_value = 154;
3471 /* FIXME extra to set for 1200 */
3472 break;
3473 default:
3474 return 0;
3475 }
3476 break;
bd60018a 3477 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3478 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3479 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3480 deemph_reg_value = 85;
3481 margin_reg_value = 78;
3482 break;
bd60018a 3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3484 deemph_reg_value = 85;
3485 margin_reg_value = 116;
3486 break;
bd60018a 3487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3488 deemph_reg_value = 85;
3489 margin_reg_value = 154;
3490 break;
3491 default:
3492 return 0;
3493 }
3494 break;
bd60018a 3495 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3496 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3497 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3498 deemph_reg_value = 64;
3499 margin_reg_value = 104;
3500 break;
bd60018a 3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3502 deemph_reg_value = 64;
3503 margin_reg_value = 154;
3504 break;
3505 default:
3506 return 0;
3507 }
3508 break;
bd60018a 3509 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3510 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3512 deemph_reg_value = 43;
3513 margin_reg_value = 154;
3514 break;
3515 default:
3516 return 0;
3517 }
3518 break;
3519 default:
3520 return 0;
3521 }
3522
a580516d 3523 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3524
3525 /* Clear calc init */
1966e59e
VS
3526 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3527 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3528 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3529 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3530 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3531
e0fce78f
VS
3532 if (intel_crtc->config->lane_count > 2) {
3533 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3534 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3535 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3536 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3537 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3538 }
e4a1d846 3539
a02ef3c7
VS
3540 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3541 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3542 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3543 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3544
e0fce78f
VS
3545 if (intel_crtc->config->lane_count > 2) {
3546 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3547 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3548 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3549 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3550 }
a02ef3c7 3551
e4a1d846 3552 /* Program swing deemph */
e0fce78f 3553 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3554 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3555 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3556 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3557 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3558 }
e4a1d846
CML
3559
3560 /* Program swing margin */
e0fce78f 3561 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3562 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3563
1fb44505
VS
3564 val &= ~DPIO_SWING_MARGIN000_MASK;
3565 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3566
3567 /*
3568 * Supposedly this value shouldn't matter when unique transition
3569 * scale is disabled, but in fact it does matter. Let's just
3570 * always program the same value and hope it's OK.
3571 */
3572 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3573 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3574
f72df8db
VS
3575 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3576 }
e4a1d846 3577
67fa24b4
VS
3578 /*
3579 * The document said it needs to set bit 27 for ch0 and bit 26
3580 * for ch1. Might be a typo in the doc.
3581 * For now, for this unique transition scale selection, set bit
3582 * 27 for ch0 and ch1.
3583 */
e0fce78f 3584 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3585 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3586 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3587 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3588 else
3589 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3590 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3591 }
3592
3593 /* Start swing calculation */
1966e59e
VS
3594 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3595 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3596 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3597
e0fce78f
VS
3598 if (intel_crtc->config->lane_count > 2) {
3599 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3600 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3601 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3602 }
e4a1d846 3603
a580516d 3604 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3605
3606 return 0;
3607}
3608
a4fc5ed6 3609static uint32_t
5829975c 3610gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3611{
3cf2efb1 3612 uint32_t signal_levels = 0;
a4fc5ed6 3613
3cf2efb1 3614 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3616 default:
3617 signal_levels |= DP_VOLTAGE_0_4;
3618 break;
bd60018a 3619 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3620 signal_levels |= DP_VOLTAGE_0_6;
3621 break;
bd60018a 3622 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3623 signal_levels |= DP_VOLTAGE_0_8;
3624 break;
bd60018a 3625 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3626 signal_levels |= DP_VOLTAGE_1_2;
3627 break;
3628 }
3cf2efb1 3629 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3630 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3631 default:
3632 signal_levels |= DP_PRE_EMPHASIS_0;
3633 break;
bd60018a 3634 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3635 signal_levels |= DP_PRE_EMPHASIS_3_5;
3636 break;
bd60018a 3637 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3638 signal_levels |= DP_PRE_EMPHASIS_6;
3639 break;
bd60018a 3640 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3641 signal_levels |= DP_PRE_EMPHASIS_9_5;
3642 break;
3643 }
3644 return signal_levels;
3645}
3646
e3421a18
ZW
3647/* Gen6's DP voltage swing and pre-emphasis control */
3648static uint32_t
5829975c 3649gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3650{
3c5a62b5
YL
3651 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3652 DP_TRAIN_PRE_EMPHASIS_MASK);
3653 switch (signal_levels) {
bd60018a
SJ
3654 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3656 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3657 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3658 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3659 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3661 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3662 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3663 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3664 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3665 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3666 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3667 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3668 default:
3c5a62b5
YL
3669 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3670 "0x%x\n", signal_levels);
3671 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3672 }
3673}
3674
1a2eb460
KP
3675/* Gen7's DP voltage swing and pre-emphasis control */
3676static uint32_t
5829975c 3677gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3678{
3679 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3680 DP_TRAIN_PRE_EMPHASIS_MASK);
3681 switch (signal_levels) {
bd60018a 3682 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3683 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3684 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3685 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3686 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3687 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3688
bd60018a 3689 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3690 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3691 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3692 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3693
bd60018a 3694 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3695 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3696 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3697 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3698
3699 default:
3700 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3701 "0x%x\n", signal_levels);
3702 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3703 }
3704}
3705
94223d04 3706void
f4eb692e 3707intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3708{
3709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3710 enum port port = intel_dig_port->port;
f0a3424e 3711 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3712 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3713 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3714 uint8_t train_set = intel_dp->train_set[0];
3715
f8896f5d
DW
3716 if (HAS_DDI(dev)) {
3717 signal_levels = ddi_signal_levels(intel_dp);
3718
3719 if (IS_BROXTON(dev))
3720 signal_levels = 0;
3721 else
3722 mask = DDI_BUF_EMP_MASK;
e4a1d846 3723 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3724 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3725 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3726 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3727 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3728 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3729 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3730 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3731 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3732 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3733 } else {
5829975c 3734 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3735 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3736 }
3737
96fb9f9b
VK
3738 if (mask)
3739 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3740
3741 DRM_DEBUG_KMS("Using vswing level %d\n",
3742 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3743 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3744 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3745 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3746
f4eb692e 3747 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3748
3749 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3750 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3751}
3752
94223d04 3753void
e9c176d5
ACO
3754intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3755 uint8_t dp_train_pat)
a4fc5ed6 3756{
174edf1f 3757 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3758 struct drm_i915_private *dev_priv =
3759 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3760
f4eb692e 3761 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3762
f4eb692e 3763 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3764 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3765}
3766
94223d04 3767void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3768{
3769 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3770 struct drm_device *dev = intel_dig_port->base.base.dev;
3771 struct drm_i915_private *dev_priv = dev->dev_private;
3772 enum port port = intel_dig_port->port;
3773 uint32_t val;
3774
3775 if (!HAS_DDI(dev))
3776 return;
3777
3778 val = I915_READ(DP_TP_CTL(port));
3779 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3780 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3781 I915_WRITE(DP_TP_CTL(port), val);
3782
3783 /*
3784 * On PORT_A we can have only eDP in SST mode. There the only reason
3785 * we need to set idle transmission mode is to work around a HW issue
3786 * where we enable the pipe while not in idle link-training mode.
3787 * In this case there is requirement to wait for a minimum number of
3788 * idle patterns to be sent.
3789 */
3790 if (port == PORT_A)
3791 return;
3792
3793 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3794 1))
3795 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3796}
3797
a4fc5ed6 3798static void
ea5b213a 3799intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3800{
da63a9f2 3801 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3802 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3803 enum port port = intel_dig_port->port;
da63a9f2 3804 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3805 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3806 uint32_t DP = intel_dp->DP;
a4fc5ed6 3807
bc76e320 3808 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3809 return;
3810
0c33d8d7 3811 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3812 return;
3813
28c97730 3814 DRM_DEBUG_KMS("\n");
32f9d658 3815
39e5fa88
VS
3816 if ((IS_GEN7(dev) && port == PORT_A) ||
3817 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3818 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3819 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3820 } else {
aad3d14d
VS
3821 if (IS_CHERRYVIEW(dev))
3822 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3823 else
3824 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3825 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3826 }
1612c8bd 3827 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3828 POSTING_READ(intel_dp->output_reg);
5eb08b69 3829
1612c8bd
VS
3830 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3831 I915_WRITE(intel_dp->output_reg, DP);
3832 POSTING_READ(intel_dp->output_reg);
3833
3834 /*
3835 * HW workaround for IBX, we need to move the port
3836 * to transcoder A after disabling it to allow the
3837 * matching HDMI port to be enabled on transcoder A.
3838 */
3839 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3840 /*
3841 * We get CPU/PCH FIFO underruns on the other pipe when
3842 * doing the workaround. Sweep them under the rug.
3843 */
3844 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3845 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3846
1612c8bd
VS
3847 /* always enable with pattern 1 (as per spec) */
3848 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3849 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3850 I915_WRITE(intel_dp->output_reg, DP);
3851 POSTING_READ(intel_dp->output_reg);
3852
3853 DP &= ~DP_PORT_EN;
5bddd17f 3854 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3855 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3856
3857 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3858 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3859 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3860 }
3861
f01eca2e 3862 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3863
3864 intel_dp->DP = DP;
a4fc5ed6
KP
3865}
3866
26d61aad
KP
3867static bool
3868intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3869{
a031d709
RV
3870 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3871 struct drm_device *dev = dig_port->base.base.dev;
3872 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3873 uint8_t rev;
a031d709 3874
9d1a1031
JN
3875 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3876 sizeof(intel_dp->dpcd)) < 0)
edb39244 3877 return false; /* aux transfer failed */
92fd8fd1 3878
a8e98153 3879 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3880
edb39244
AJ
3881 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3882 return false; /* DPCD not present */
3883
2293bb5c
SK
3884 /* Check if the panel supports PSR */
3885 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3886 if (is_edp(intel_dp)) {
9d1a1031
JN
3887 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3888 intel_dp->psr_dpcd,
3889 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3890 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3891 dev_priv->psr.sink_support = true;
50003939 3892 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3893 }
474d1ec4
SJ
3894
3895 if (INTEL_INFO(dev)->gen >= 9 &&
3896 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3897 uint8_t frame_sync_cap;
3898
3899 dev_priv->psr.sink_support = true;
3900 intel_dp_dpcd_read_wake(&intel_dp->aux,
3901 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3902 &frame_sync_cap, 1);
3903 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3904 /* PSR2 needs frame sync as well */
3905 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3906 DRM_DEBUG_KMS("PSR2 %s on sink",
3907 dev_priv->psr.psr2_support ? "supported" : "not supported");
3908 }
50003939
JN
3909 }
3910
bc5133d5 3911 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3912 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3913 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3914
fc0f8e25
SJ
3915 /* Intermediate frequency support */
3916 if (is_edp(intel_dp) &&
3917 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3918 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3919 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3920 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3921 int i;
3922
fc0f8e25
SJ
3923 intel_dp_dpcd_read_wake(&intel_dp->aux,
3924 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3925 sink_rates,
3926 sizeof(sink_rates));
ea2d8a42 3927
94ca719e
VS
3928 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3929 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3930
3931 if (val == 0)
3932 break;
3933
af77b974
SJ
3934 /* Value read is in kHz while drm clock is saved in deca-kHz */
3935 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3936 }
94ca719e 3937 intel_dp->num_sink_rates = i;
fc0f8e25 3938 }
0336400e
VS
3939
3940 intel_dp_print_rates(intel_dp);
3941
edb39244
AJ
3942 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3943 DP_DWN_STRM_PORT_PRESENT))
3944 return true; /* native DP sink */
3945
3946 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3947 return true; /* no per-port downstream info */
3948
9d1a1031
JN
3949 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3950 intel_dp->downstream_ports,
3951 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3952 return false; /* downstream port status fetch failed */
3953
3954 return true;
92fd8fd1
KP
3955}
3956
0d198328
AJ
3957static void
3958intel_dp_probe_oui(struct intel_dp *intel_dp)
3959{
3960 u8 buf[3];
3961
3962 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3963 return;
3964
9d1a1031 3965 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3966 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3967 buf[0], buf[1], buf[2]);
3968
9d1a1031 3969 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3970 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3971 buf[0], buf[1], buf[2]);
3972}
3973
0e32b39c
DA
3974static bool
3975intel_dp_probe_mst(struct intel_dp *intel_dp)
3976{
3977 u8 buf[1];
3978
3979 if (!intel_dp->can_mst)
3980 return false;
3981
3982 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3983 return false;
3984
0e32b39c
DA
3985 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3986 if (buf[0] & DP_MST_CAP) {
3987 DRM_DEBUG_KMS("Sink is MST capable\n");
3988 intel_dp->is_mst = true;
3989 } else {
3990 DRM_DEBUG_KMS("Sink is not MST capable\n");
3991 intel_dp->is_mst = false;
3992 }
3993 }
0e32b39c
DA
3994
3995 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3996 return intel_dp->is_mst;
3997}
3998
e5a1cab5 3999static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 4000{
082dcc7c 4001 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 4002 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c 4003 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 4004 u8 buf;
e5a1cab5 4005 int ret = 0;
c6297843
RV
4006 int count = 0;
4007 int attempts = 10;
d2e216d0 4008
082dcc7c
RV
4009 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4010 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4011 ret = -EIO;
4012 goto out;
4373f0f2
PZ
4013 }
4014
082dcc7c 4015 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4016 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4017 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4018 ret = -EIO;
4019 goto out;
4020 }
d2e216d0 4021
c6297843
RV
4022 do {
4023 intel_wait_for_vblank(dev, intel_crtc->pipe);
4024
4025 if (drm_dp_dpcd_readb(&intel_dp->aux,
4026 DP_TEST_SINK_MISC, &buf) < 0) {
4027 ret = -EIO;
4028 goto out;
4029 }
4030 count = buf & DP_TEST_COUNT_MASK;
4031 } while (--attempts && count);
4032
4033 if (attempts == 0) {
dc5a9037 4034 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
c6297843
RV
4035 ret = -ETIMEDOUT;
4036 }
4037
e5a1cab5 4038 out:
082dcc7c 4039 hsw_enable_ips(intel_crtc);
e5a1cab5 4040 return ret;
082dcc7c
RV
4041}
4042
4043static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4044{
4045 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 4046 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c
RV
4047 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4048 u8 buf;
e5a1cab5
RV
4049 int ret;
4050
082dcc7c
RV
4051 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4052 return -EIO;
4053
4054 if (!(buf & DP_TEST_CRC_SUPPORTED))
4055 return -ENOTTY;
4056
4057 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4058 return -EIO;
4059
6d8175da
RV
4060 if (buf & DP_TEST_SINK_START) {
4061 ret = intel_dp_sink_crc_stop(intel_dp);
4062 if (ret)
4063 return ret;
4064 }
4065
082dcc7c 4066 hsw_disable_ips(intel_crtc);
1dda5f93 4067
9d1a1031 4068 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4069 buf | DP_TEST_SINK_START) < 0) {
4070 hsw_enable_ips(intel_crtc);
4071 return -EIO;
4373f0f2
PZ
4072 }
4073
d72f9d91 4074 intel_wait_for_vblank(dev, intel_crtc->pipe);
082dcc7c
RV
4075 return 0;
4076}
4077
4078int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4079{
4080 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4081 struct drm_device *dev = dig_port->base.base.dev;
4082 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4083 u8 buf;
621d4c76 4084 int count, ret;
082dcc7c 4085 int attempts = 6;
082dcc7c
RV
4086
4087 ret = intel_dp_sink_crc_start(intel_dp);
4088 if (ret)
4089 return ret;
4090
ad9dc91b 4091 do {
621d4c76
RV
4092 intel_wait_for_vblank(dev, intel_crtc->pipe);
4093
1dda5f93 4094 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4095 DP_TEST_SINK_MISC, &buf) < 0) {
4096 ret = -EIO;
afe0d67e 4097 goto stop;
4373f0f2 4098 }
621d4c76 4099 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4100
7e38eeff 4101 } while (--attempts && count == 0);
ad9dc91b
RV
4102
4103 if (attempts == 0) {
7e38eeff
RV
4104 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4105 ret = -ETIMEDOUT;
4106 goto stop;
4107 }
4108
4109 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4110 ret = -EIO;
4111 goto stop;
ad9dc91b 4112 }
d2e216d0 4113
afe0d67e 4114stop:
082dcc7c 4115 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4116 return ret;
d2e216d0
RV
4117}
4118
a60f0e38
JB
4119static bool
4120intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4121{
9d1a1031
JN
4122 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4123 DP_DEVICE_SERVICE_IRQ_VECTOR,
4124 sink_irq_vector, 1) == 1;
a60f0e38
JB
4125}
4126
0e32b39c
DA
4127static bool
4128intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4129{
4130 int ret;
4131
4132 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4133 DP_SINK_COUNT_ESI,
4134 sink_irq_vector, 14);
4135 if (ret != 14)
4136 return false;
4137
4138 return true;
4139}
4140
c5d5ab7a
TP
4141static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4142{
4143 uint8_t test_result = DP_TEST_ACK;
4144 return test_result;
4145}
4146
4147static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4148{
4149 uint8_t test_result = DP_TEST_NAK;
4150 return test_result;
4151}
4152
4153static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4154{
c5d5ab7a 4155 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4156 struct intel_connector *intel_connector = intel_dp->attached_connector;
4157 struct drm_connector *connector = &intel_connector->base;
4158
4159 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4160 connector->edid_corrupt ||
559be30c
TP
4161 intel_dp->aux.i2c_defer_count > 6) {
4162 /* Check EDID read for NACKs, DEFERs and corruption
4163 * (DP CTS 1.2 Core r1.1)
4164 * 4.2.2.4 : Failed EDID read, I2C_NAK
4165 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4166 * 4.2.2.6 : EDID corruption detected
4167 * Use failsafe mode for all cases
4168 */
4169 if (intel_dp->aux.i2c_nack_count > 0 ||
4170 intel_dp->aux.i2c_defer_count > 0)
4171 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4172 intel_dp->aux.i2c_nack_count,
4173 intel_dp->aux.i2c_defer_count);
4174 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4175 } else {
f79b468e
TS
4176 struct edid *block = intel_connector->detect_edid;
4177
4178 /* We have to write the checksum
4179 * of the last block read
4180 */
4181 block += intel_connector->detect_edid->extensions;
4182
559be30c
TP
4183 if (!drm_dp_dpcd_write(&intel_dp->aux,
4184 DP_TEST_EDID_CHECKSUM,
f79b468e 4185 &block->checksum,
5a1cc655 4186 1))
559be30c
TP
4187 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4188
4189 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4190 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4191 }
4192
4193 /* Set test active flag here so userspace doesn't interrupt things */
4194 intel_dp->compliance_test_active = 1;
4195
c5d5ab7a
TP
4196 return test_result;
4197}
4198
4199static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4200{
c5d5ab7a
TP
4201 uint8_t test_result = DP_TEST_NAK;
4202 return test_result;
4203}
4204
4205static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4206{
4207 uint8_t response = DP_TEST_NAK;
4208 uint8_t rxdata = 0;
4209 int status = 0;
4210
c5d5ab7a
TP
4211 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4212 if (status <= 0) {
4213 DRM_DEBUG_KMS("Could not read test request from sink\n");
4214 goto update_status;
4215 }
4216
4217 switch (rxdata) {
4218 case DP_TEST_LINK_TRAINING:
4219 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4220 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4221 response = intel_dp_autotest_link_training(intel_dp);
4222 break;
4223 case DP_TEST_LINK_VIDEO_PATTERN:
4224 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4225 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4226 response = intel_dp_autotest_video_pattern(intel_dp);
4227 break;
4228 case DP_TEST_LINK_EDID_READ:
4229 DRM_DEBUG_KMS("EDID test requested\n");
4230 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4231 response = intel_dp_autotest_edid(intel_dp);
4232 break;
4233 case DP_TEST_LINK_PHY_TEST_PATTERN:
4234 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4235 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4236 response = intel_dp_autotest_phy_pattern(intel_dp);
4237 break;
4238 default:
4239 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4240 break;
4241 }
4242
4243update_status:
4244 status = drm_dp_dpcd_write(&intel_dp->aux,
4245 DP_TEST_RESPONSE,
4246 &response, 1);
4247 if (status <= 0)
4248 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4249}
4250
0e32b39c
DA
4251static int
4252intel_dp_check_mst_status(struct intel_dp *intel_dp)
4253{
4254 bool bret;
4255
4256 if (intel_dp->is_mst) {
4257 u8 esi[16] = { 0 };
4258 int ret = 0;
4259 int retry;
4260 bool handled;
4261 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4262go_again:
4263 if (bret == true) {
4264
4265 /* check link status - esi[10] = 0x200c */
90a6b7b0 4266 if (intel_dp->active_mst_links &&
901c2daf 4267 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4268 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4269 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4270 intel_dp_stop_link_train(intel_dp);
4271 }
4272
6f34cc39 4273 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4274 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4275
4276 if (handled) {
4277 for (retry = 0; retry < 3; retry++) {
4278 int wret;
4279 wret = drm_dp_dpcd_write(&intel_dp->aux,
4280 DP_SINK_COUNT_ESI+1,
4281 &esi[1], 3);
4282 if (wret == 3) {
4283 break;
4284 }
4285 }
4286
4287 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4288 if (bret == true) {
6f34cc39 4289 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4290 goto go_again;
4291 }
4292 } else
4293 ret = 0;
4294
4295 return ret;
4296 } else {
4297 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4298 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4299 intel_dp->is_mst = false;
4300 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4301 /* send a hotplug event */
4302 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4303 }
4304 }
4305 return -EINVAL;
4306}
4307
a4fc5ed6
KP
4308/*
4309 * According to DP spec
4310 * 5.1.2:
4311 * 1. Read DPCD
4312 * 2. Configure link according to Receiver Capabilities
4313 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4314 * 4. Check link status on receipt of hot-plug interrupt
4315 */
a5146200 4316static void
ea5b213a 4317intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4318{
5b215bcf 4319 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4320 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4321 u8 sink_irq_vector;
93f62dad 4322 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4323
5b215bcf
DA
4324 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4325
4df6960e
SS
4326 /*
4327 * Clearing compliance test variables to allow capturing
4328 * of values for next automated test request.
4329 */
4330 intel_dp->compliance_test_active = 0;
4331 intel_dp->compliance_test_type = 0;
4332 intel_dp->compliance_test_data = 0;
4333
e02f9a06 4334 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4335 return;
4336
1a125d8a
ID
4337 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4338 return;
4339
92fd8fd1 4340 /* Try to read receiver status if the link appears to be up */
93f62dad 4341 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4342 return;
4343 }
4344
92fd8fd1 4345 /* Now read the DPCD to see if it's actually running */
26d61aad 4346 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4347 return;
4348 }
4349
a60f0e38
JB
4350 /* Try to read the source of the interrupt */
4351 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4352 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4353 /* Clear interrupt source */
9d1a1031
JN
4354 drm_dp_dpcd_writeb(&intel_dp->aux,
4355 DP_DEVICE_SERVICE_IRQ_VECTOR,
4356 sink_irq_vector);
a60f0e38
JB
4357
4358 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4359 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4360 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4361 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4362 }
4363
14631e9d
SS
4364 /* if link training is requested we should perform it always */
4365 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4366 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
92fd8fd1 4367 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4368 intel_encoder->base.name);
33a34e4e 4369 intel_dp_start_link_train(intel_dp);
3ab9c637 4370 intel_dp_stop_link_train(intel_dp);
33a34e4e 4371 }
a4fc5ed6 4372}
a4fc5ed6 4373
caf9ab24 4374/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4375static enum drm_connector_status
26d61aad 4376intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4377{
caf9ab24 4378 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4379 uint8_t type;
4380
4381 if (!intel_dp_get_dpcd(intel_dp))
4382 return connector_status_disconnected;
4383
4384 /* if there's no downstream port, we're done */
4385 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4386 return connector_status_connected;
caf9ab24
AJ
4387
4388 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4389 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4390 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4391 uint8_t reg;
9d1a1031
JN
4392
4393 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4394 &reg, 1) < 0)
caf9ab24 4395 return connector_status_unknown;
9d1a1031 4396
23235177
AJ
4397 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4398 : connector_status_disconnected;
caf9ab24
AJ
4399 }
4400
4401 /* If no HPD, poke DDC gently */
0b99836f 4402 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4403 return connector_status_connected;
caf9ab24
AJ
4404
4405 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4406 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4407 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4408 if (type == DP_DS_PORT_TYPE_VGA ||
4409 type == DP_DS_PORT_TYPE_NON_EDID)
4410 return connector_status_unknown;
4411 } else {
4412 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4413 DP_DWN_STRM_PORT_TYPE_MASK;
4414 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4415 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4416 return connector_status_unknown;
4417 }
caf9ab24
AJ
4418
4419 /* Anything else is out of spec, warn and ignore */
4420 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4421 return connector_status_disconnected;
71ba9000
AJ
4422}
4423
d410b56d
CW
4424static enum drm_connector_status
4425edp_detect(struct intel_dp *intel_dp)
4426{
4427 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4428 enum drm_connector_status status;
4429
4430 status = intel_panel_detect(dev);
4431 if (status == connector_status_unknown)
4432 status = connector_status_connected;
4433
4434 return status;
4435}
4436
b93433cc
JN
4437static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4438 struct intel_digital_port *port)
5eb08b69 4439{
b93433cc 4440 u32 bit;
01cb9ea6 4441
0df53b77
JN
4442 switch (port->port) {
4443 case PORT_A:
4444 return true;
4445 case PORT_B:
4446 bit = SDE_PORTB_HOTPLUG;
4447 break;
4448 case PORT_C:
4449 bit = SDE_PORTC_HOTPLUG;
4450 break;
4451 case PORT_D:
4452 bit = SDE_PORTD_HOTPLUG;
4453 break;
4454 default:
4455 MISSING_CASE(port->port);
4456 return false;
4457 }
4458
4459 return I915_READ(SDEISR) & bit;
4460}
4461
4462static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4463 struct intel_digital_port *port)
4464{
4465 u32 bit;
4466
4467 switch (port->port) {
4468 case PORT_A:
4469 return true;
4470 case PORT_B:
4471 bit = SDE_PORTB_HOTPLUG_CPT;
4472 break;
4473 case PORT_C:
4474 bit = SDE_PORTC_HOTPLUG_CPT;
4475 break;
4476 case PORT_D:
4477 bit = SDE_PORTD_HOTPLUG_CPT;
4478 break;
a78695d3
JN
4479 case PORT_E:
4480 bit = SDE_PORTE_HOTPLUG_SPT;
4481 break;
0df53b77
JN
4482 default:
4483 MISSING_CASE(port->port);
4484 return false;
b93433cc 4485 }
1b469639 4486
b93433cc 4487 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4488}
4489
7e66bcf2 4490static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4491 struct intel_digital_port *port)
a4fc5ed6 4492{
9642c81c 4493 u32 bit;
5eb08b69 4494
9642c81c
JN
4495 switch (port->port) {
4496 case PORT_B:
4497 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4498 break;
4499 case PORT_C:
4500 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4501 break;
4502 case PORT_D:
4503 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4504 break;
4505 default:
4506 MISSING_CASE(port->port);
4507 return false;
4508 }
4509
4510 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4511}
4512
0780cd36
VS
4513static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4514 struct intel_digital_port *port)
9642c81c
JN
4515{
4516 u32 bit;
4517
4518 switch (port->port) {
4519 case PORT_B:
0780cd36 4520 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4521 break;
4522 case PORT_C:
0780cd36 4523 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4524 break;
4525 case PORT_D:
0780cd36 4526 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4527 break;
4528 default:
4529 MISSING_CASE(port->port);
4530 return false;
a4fc5ed6
KP
4531 }
4532
1d245987 4533 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4534}
4535
e464bfde 4536static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4537 struct intel_digital_port *intel_dig_port)
e464bfde 4538{
e2ec35a5
SJ
4539 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4540 enum port port;
e464bfde
JN
4541 u32 bit;
4542
e2ec35a5
SJ
4543 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4544 switch (port) {
e464bfde
JN
4545 case PORT_A:
4546 bit = BXT_DE_PORT_HP_DDIA;
4547 break;
4548 case PORT_B:
4549 bit = BXT_DE_PORT_HP_DDIB;
4550 break;
4551 case PORT_C:
4552 bit = BXT_DE_PORT_HP_DDIC;
4553 break;
4554 default:
e2ec35a5 4555 MISSING_CASE(port);
e464bfde
JN
4556 return false;
4557 }
4558
4559 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4560}
4561
7e66bcf2
JN
4562/*
4563 * intel_digital_port_connected - is the specified port connected?
4564 * @dev_priv: i915 private structure
4565 * @port: the port to test
4566 *
4567 * Return %true if @port is connected, %false otherwise.
4568 */
237ed86c 4569bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4570 struct intel_digital_port *port)
4571{
0df53b77 4572 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4573 return ibx_digital_port_connected(dev_priv, port);
22824fac 4574 else if (HAS_PCH_SPLIT(dev_priv))
0df53b77 4575 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4576 else if (IS_BROXTON(dev_priv))
4577 return bxt_digital_port_connected(dev_priv, port);
0780cd36
VS
4578 else if (IS_GM45(dev_priv))
4579 return gm45_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4580 else
4581 return g4x_digital_port_connected(dev_priv, port);
4582}
4583
8c241fef 4584static struct edid *
beb60608 4585intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4586{
beb60608 4587 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4588
9cd300e0
JN
4589 /* use cached edid if we have one */
4590 if (intel_connector->edid) {
9cd300e0
JN
4591 /* invalid edid */
4592 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4593 return NULL;
4594
55e9edeb 4595 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4596 } else
4597 return drm_get_edid(&intel_connector->base,
4598 &intel_dp->aux.ddc);
4599}
8c241fef 4600
beb60608
CW
4601static void
4602intel_dp_set_edid(struct intel_dp *intel_dp)
4603{
4604 struct intel_connector *intel_connector = intel_dp->attached_connector;
4605 struct edid *edid;
8c241fef 4606
beb60608
CW
4607 edid = intel_dp_get_edid(intel_dp);
4608 intel_connector->detect_edid = edid;
4609
4610 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4611 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4612 else
4613 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4614}
4615
beb60608
CW
4616static void
4617intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4618{
beb60608 4619 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4620
beb60608
CW
4621 kfree(intel_connector->detect_edid);
4622 intel_connector->detect_edid = NULL;
9cd300e0 4623
beb60608
CW
4624 intel_dp->has_audio = false;
4625}
d6f24d0f 4626
a9756bb5
ZW
4627static enum drm_connector_status
4628intel_dp_detect(struct drm_connector *connector, bool force)
4629{
4630 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4631 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4632 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4633 struct drm_device *dev = connector->dev;
a9756bb5 4634 enum drm_connector_status status;
671dedd2 4635 enum intel_display_power_domain power_domain;
0e32b39c 4636 bool ret;
09b1eb13 4637 u8 sink_irq_vector;
a9756bb5 4638
164c8598 4639 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4640 connector->base.id, connector->name);
beb60608 4641 intel_dp_unset_edid(intel_dp);
164c8598 4642
0e32b39c
DA
4643 if (intel_dp->is_mst) {
4644 /* MST devices are disconnected from a monitor POV */
4645 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4646 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4647 return connector_status_disconnected;
0e32b39c
DA
4648 }
4649
25f78f58
VS
4650 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4651 intel_display_power_get(to_i915(dev), power_domain);
a9756bb5 4652
d410b56d
CW
4653 /* Can't disconnect eDP, but you can close the lid... */
4654 if (is_edp(intel_dp))
4655 status = edp_detect(intel_dp);
c555a81d
ACO
4656 else if (intel_digital_port_connected(to_i915(dev),
4657 dp_to_dig_port(intel_dp)))
4658 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 4659 else
c555a81d
ACO
4660 status = connector_status_disconnected;
4661
4df6960e
SS
4662 if (status != connector_status_connected) {
4663 intel_dp->compliance_test_active = 0;
4664 intel_dp->compliance_test_type = 0;
4665 intel_dp->compliance_test_data = 0;
4666
c8c8fb33 4667 goto out;
4df6960e 4668 }
a9756bb5 4669
0d198328
AJ
4670 intel_dp_probe_oui(intel_dp);
4671
0e32b39c
DA
4672 ret = intel_dp_probe_mst(intel_dp);
4673 if (ret) {
4674 /* if we are in MST mode then this connector
4675 won't appear connected or have anything with EDID on it */
4676 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4677 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4678 status = connector_status_disconnected;
4679 goto out;
4680 }
4681
4df6960e
SS
4682 /*
4683 * Clearing NACK and defer counts to get their exact values
4684 * while reading EDID which are required by Compliance tests
4685 * 4.2.2.4 and 4.2.2.5
4686 */
4687 intel_dp->aux.i2c_nack_count = 0;
4688 intel_dp->aux.i2c_defer_count = 0;
4689
beb60608 4690 intel_dp_set_edid(intel_dp);
a9756bb5 4691
d63885da
PZ
4692 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4693 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4694 status = connector_status_connected;
4695
09b1eb13
TP
4696 /* Try to read the source of the interrupt */
4697 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4698 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4699 /* Clear interrupt source */
4700 drm_dp_dpcd_writeb(&intel_dp->aux,
4701 DP_DEVICE_SERVICE_IRQ_VECTOR,
4702 sink_irq_vector);
4703
4704 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4705 intel_dp_handle_test_request(intel_dp);
4706 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4707 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4708 }
4709
c8c8fb33 4710out:
25f78f58 4711 intel_display_power_put(to_i915(dev), power_domain);
c8c8fb33 4712 return status;
a4fc5ed6
KP
4713}
4714
beb60608
CW
4715static void
4716intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4717{
df0e9248 4718 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4719 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
25f78f58 4720 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
671dedd2 4721 enum intel_display_power_domain power_domain;
a4fc5ed6 4722
beb60608
CW
4723 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4724 connector->base.id, connector->name);
4725 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4726
beb60608
CW
4727 if (connector->status != connector_status_connected)
4728 return;
671dedd2 4729
25f78f58
VS
4730 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4731 intel_display_power_get(dev_priv, power_domain);
beb60608
CW
4732
4733 intel_dp_set_edid(intel_dp);
4734
25f78f58 4735 intel_display_power_put(dev_priv, power_domain);
beb60608
CW
4736
4737 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4738 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4739}
4740
4741static int intel_dp_get_modes(struct drm_connector *connector)
4742{
4743 struct intel_connector *intel_connector = to_intel_connector(connector);
4744 struct edid *edid;
4745
4746 edid = intel_connector->detect_edid;
4747 if (edid) {
4748 int ret = intel_connector_update_modes(connector, edid);
4749 if (ret)
4750 return ret;
4751 }
32f9d658 4752
f8779fda 4753 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4754 if (is_edp(intel_attached_dp(connector)) &&
4755 intel_connector->panel.fixed_mode) {
f8779fda 4756 struct drm_display_mode *mode;
beb60608
CW
4757
4758 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4759 intel_connector->panel.fixed_mode);
f8779fda 4760 if (mode) {
32f9d658
ZW
4761 drm_mode_probed_add(connector, mode);
4762 return 1;
4763 }
4764 }
beb60608 4765
32f9d658 4766 return 0;
a4fc5ed6
KP
4767}
4768
1aad7ac0
CW
4769static bool
4770intel_dp_detect_audio(struct drm_connector *connector)
4771{
1aad7ac0 4772 bool has_audio = false;
beb60608 4773 struct edid *edid;
1aad7ac0 4774
beb60608
CW
4775 edid = to_intel_connector(connector)->detect_edid;
4776 if (edid)
1aad7ac0 4777 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4778
1aad7ac0
CW
4779 return has_audio;
4780}
4781
f684960e
CW
4782static int
4783intel_dp_set_property(struct drm_connector *connector,
4784 struct drm_property *property,
4785 uint64_t val)
4786{
e953fd7b 4787 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4788 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4789 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4790 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4791 int ret;
4792
662595df 4793 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4794 if (ret)
4795 return ret;
4796
3f43c48d 4797 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4798 int i = val;
4799 bool has_audio;
4800
4801 if (i == intel_dp->force_audio)
f684960e
CW
4802 return 0;
4803
1aad7ac0 4804 intel_dp->force_audio = i;
f684960e 4805
c3e5f67b 4806 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4807 has_audio = intel_dp_detect_audio(connector);
4808 else
c3e5f67b 4809 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4810
4811 if (has_audio == intel_dp->has_audio)
f684960e
CW
4812 return 0;
4813
1aad7ac0 4814 intel_dp->has_audio = has_audio;
f684960e
CW
4815 goto done;
4816 }
4817
e953fd7b 4818 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4819 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4820 bool old_range = intel_dp->limited_color_range;
ae4edb80 4821
55bc60db
VS
4822 switch (val) {
4823 case INTEL_BROADCAST_RGB_AUTO:
4824 intel_dp->color_range_auto = true;
4825 break;
4826 case INTEL_BROADCAST_RGB_FULL:
4827 intel_dp->color_range_auto = false;
0f2a2a75 4828 intel_dp->limited_color_range = false;
55bc60db
VS
4829 break;
4830 case INTEL_BROADCAST_RGB_LIMITED:
4831 intel_dp->color_range_auto = false;
0f2a2a75 4832 intel_dp->limited_color_range = true;
55bc60db
VS
4833 break;
4834 default:
4835 return -EINVAL;
4836 }
ae4edb80
DV
4837
4838 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4839 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4840 return 0;
4841
e953fd7b
CW
4842 goto done;
4843 }
4844
53b41837
YN
4845 if (is_edp(intel_dp) &&
4846 property == connector->dev->mode_config.scaling_mode_property) {
4847 if (val == DRM_MODE_SCALE_NONE) {
4848 DRM_DEBUG_KMS("no scaling not supported\n");
4849 return -EINVAL;
4850 }
4851
4852 if (intel_connector->panel.fitting_mode == val) {
4853 /* the eDP scaling property is not changed */
4854 return 0;
4855 }
4856 intel_connector->panel.fitting_mode = val;
4857
4858 goto done;
4859 }
4860
f684960e
CW
4861 return -EINVAL;
4862
4863done:
c0c36b94
CW
4864 if (intel_encoder->base.crtc)
4865 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4866
4867 return 0;
4868}
4869
a4fc5ed6 4870static void
73845adf 4871intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4872{
1d508706 4873 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4874
10e972d3 4875 kfree(intel_connector->detect_edid);
beb60608 4876
9cd300e0
JN
4877 if (!IS_ERR_OR_NULL(intel_connector->edid))
4878 kfree(intel_connector->edid);
4879
acd8db10
PZ
4880 /* Can't call is_edp() since the encoder may have been destroyed
4881 * already. */
4882 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4883 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4884
a4fc5ed6 4885 drm_connector_cleanup(connector);
55f78c43 4886 kfree(connector);
a4fc5ed6
KP
4887}
4888
00c09d70 4889void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4890{
da63a9f2
PZ
4891 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4892 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4893
a121f4e5 4894 intel_dp_aux_fini(intel_dp);
0e32b39c 4895 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4896 if (is_edp(intel_dp)) {
4897 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4898 /*
4899 * vdd might still be enabled do to the delayed vdd off.
4900 * Make sure vdd is actually turned off here.
4901 */
773538e8 4902 pps_lock(intel_dp);
4be73780 4903 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4904 pps_unlock(intel_dp);
4905
01527b31
CT
4906 if (intel_dp->edp_notifier.notifier_call) {
4907 unregister_reboot_notifier(&intel_dp->edp_notifier);
4908 intel_dp->edp_notifier.notifier_call = NULL;
4909 }
bd943159 4910 }
c8bd0e49 4911 drm_encoder_cleanup(encoder);
da63a9f2 4912 kfree(intel_dig_port);
24d05927
DV
4913}
4914
07f9cd0b
ID
4915static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4916{
4917 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4918
4919 if (!is_edp(intel_dp))
4920 return;
4921
951468f3
VS
4922 /*
4923 * vdd might still be enabled do to the delayed vdd off.
4924 * Make sure vdd is actually turned off here.
4925 */
afa4e53a 4926 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4927 pps_lock(intel_dp);
07f9cd0b 4928 edp_panel_vdd_off_sync(intel_dp);
773538e8 4929 pps_unlock(intel_dp);
07f9cd0b
ID
4930}
4931
49e6bc51
VS
4932static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4933{
4934 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4935 struct drm_device *dev = intel_dig_port->base.base.dev;
4936 struct drm_i915_private *dev_priv = dev->dev_private;
4937 enum intel_display_power_domain power_domain;
4938
4939 lockdep_assert_held(&dev_priv->pps_mutex);
4940
4941 if (!edp_have_panel_vdd(intel_dp))
4942 return;
4943
4944 /*
4945 * The VDD bit needs a power domain reference, so if the bit is
4946 * already enabled when we boot or resume, grab this reference and
4947 * schedule a vdd off, so we don't hold on to the reference
4948 * indefinitely.
4949 */
4950 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
25f78f58 4951 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
49e6bc51
VS
4952 intel_display_power_get(dev_priv, power_domain);
4953
4954 edp_panel_vdd_schedule_off(intel_dp);
4955}
4956
6d93c0c4
ID
4957static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4958{
49e6bc51
VS
4959 struct intel_dp *intel_dp;
4960
4961 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4962 return;
4963
4964 intel_dp = enc_to_intel_dp(encoder);
4965
4966 pps_lock(intel_dp);
4967
4968 /*
4969 * Read out the current power sequencer assignment,
4970 * in case the BIOS did something with it.
4971 */
666a4537 4972 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
49e6bc51
VS
4973 vlv_initial_power_sequencer_setup(intel_dp);
4974
4975 intel_edp_panel_vdd_sanitize(intel_dp);
4976
4977 pps_unlock(intel_dp);
6d93c0c4
ID
4978}
4979
a4fc5ed6 4980static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4981 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4982 .detect = intel_dp_detect,
beb60608 4983 .force = intel_dp_force,
a4fc5ed6 4984 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4985 .set_property = intel_dp_set_property,
2545e4a6 4986 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4987 .destroy = intel_dp_connector_destroy,
c6f95f27 4988 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4989 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4990};
4991
4992static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4993 .get_modes = intel_dp_get_modes,
4994 .mode_valid = intel_dp_mode_valid,
df0e9248 4995 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4996};
4997
a4fc5ed6 4998static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4999 .reset = intel_dp_encoder_reset,
24d05927 5000 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
5001};
5002
b2c5c181 5003enum irqreturn
13cf5504
DA
5004intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5005{
5006 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 5007 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
5008 struct drm_device *dev = intel_dig_port->base.base.dev;
5009 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 5010 enum intel_display_power_domain power_domain;
b2c5c181 5011 enum irqreturn ret = IRQ_NONE;
1c767b33 5012
2540058f
TI
5013 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5014 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
0e32b39c 5015 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5016
7a7f84cc
VS
5017 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5018 /*
5019 * vdd off can generate a long pulse on eDP which
5020 * would require vdd on to handle it, and thus we
5021 * would end up in an endless cycle of
5022 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5023 */
5024 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5025 port_name(intel_dig_port->port));
a8b3d52f 5026 return IRQ_HANDLED;
7a7f84cc
VS
5027 }
5028
26fbb774
VS
5029 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5030 port_name(intel_dig_port->port),
0e32b39c 5031 long_hpd ? "long" : "short");
13cf5504 5032
25f78f58 5033 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1c767b33
ID
5034 intel_display_power_get(dev_priv, power_domain);
5035
0e32b39c 5036 if (long_hpd) {
5fa836a9
MK
5037 /* indicate that we need to restart link training */
5038 intel_dp->train_set_valid = false;
2a592bec 5039
7e66bcf2
JN
5040 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5041 goto mst_fail;
0e32b39c
DA
5042
5043 if (!intel_dp_get_dpcd(intel_dp)) {
5044 goto mst_fail;
5045 }
5046
5047 intel_dp_probe_oui(intel_dp);
5048
d14e7b6d
VS
5049 if (!intel_dp_probe_mst(intel_dp)) {
5050 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5051 intel_dp_check_link_status(intel_dp);
5052 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5053 goto mst_fail;
d14e7b6d 5054 }
0e32b39c
DA
5055 } else {
5056 if (intel_dp->is_mst) {
1c767b33 5057 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5058 goto mst_fail;
5059 }
5060
5061 if (!intel_dp->is_mst) {
5b215bcf 5062 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5063 intel_dp_check_link_status(intel_dp);
5b215bcf 5064 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5065 }
5066 }
b2c5c181
DV
5067
5068 ret = IRQ_HANDLED;
5069
1c767b33 5070 goto put_power;
0e32b39c
DA
5071mst_fail:
5072 /* if we were in MST mode, and device is not there get out of MST mode */
5073 if (intel_dp->is_mst) {
5074 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5075 intel_dp->is_mst = false;
5076 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5077 }
1c767b33
ID
5078put_power:
5079 intel_display_power_put(dev_priv, power_domain);
5080
5081 return ret;
13cf5504
DA
5082}
5083
477ec328 5084/* check the VBT to see whether the eDP is on another port */
5d8a7752 5085bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5086{
5087 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5088 union child_device_config *p_child;
36e83a18 5089 int i;
5d8a7752 5090 static const short port_mapping[] = {
477ec328
RV
5091 [PORT_B] = DVO_PORT_DPB,
5092 [PORT_C] = DVO_PORT_DPC,
5093 [PORT_D] = DVO_PORT_DPD,
5094 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5095 };
36e83a18 5096
53ce81a7
VS
5097 /*
5098 * eDP not supported on g4x. so bail out early just
5099 * for a bit extra safety in case the VBT is bonkers.
5100 */
5101 if (INTEL_INFO(dev)->gen < 5)
5102 return false;
5103
3b32a35b
VS
5104 if (port == PORT_A)
5105 return true;
5106
41aa3448 5107 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5108 return false;
5109
41aa3448
RV
5110 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5111 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5112
5d8a7752 5113 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5114 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5115 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5116 return true;
5117 }
5118 return false;
5119}
5120
0e32b39c 5121void
f684960e
CW
5122intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5123{
53b41837
YN
5124 struct intel_connector *intel_connector = to_intel_connector(connector);
5125
3f43c48d 5126 intel_attach_force_audio_property(connector);
e953fd7b 5127 intel_attach_broadcast_rgb_property(connector);
55bc60db 5128 intel_dp->color_range_auto = true;
53b41837
YN
5129
5130 if (is_edp(intel_dp)) {
5131 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5132 drm_object_attach_property(
5133 &connector->base,
53b41837 5134 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5135 DRM_MODE_SCALE_ASPECT);
5136 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5137 }
f684960e
CW
5138}
5139
dada1a9f
ID
5140static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5141{
d28d4731 5142 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
5143 intel_dp->last_power_on = jiffies;
5144 intel_dp->last_backlight_off = jiffies;
5145}
5146
67a54566
DV
5147static void
5148intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5149 struct intel_dp *intel_dp)
67a54566
DV
5150{
5151 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5152 struct edp_power_seq cur, vbt, spec,
5153 *final = &intel_dp->pps_delays;
b0a08bec 5154 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
f0f59a00 5155 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5156
e39b999a
VS
5157 lockdep_assert_held(&dev_priv->pps_mutex);
5158
81ddbc69
VS
5159 /* already initialized? */
5160 if (final->t11_t12 != 0)
5161 return;
5162
b0a08bec
VK
5163 if (IS_BROXTON(dev)) {
5164 /*
5165 * TODO: BXT has 2 sets of PPS registers.
5166 * Correct Register for Broxton need to be identified
5167 * using VBT. hardcoding for now
5168 */
5169 pp_ctrl_reg = BXT_PP_CONTROL(0);
5170 pp_on_reg = BXT_PP_ON_DELAYS(0);
5171 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5172 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5173 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5174 pp_on_reg = PCH_PP_ON_DELAYS;
5175 pp_off_reg = PCH_PP_OFF_DELAYS;
5176 pp_div_reg = PCH_PP_DIVISOR;
5177 } else {
bf13e81b
JN
5178 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5179
5180 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5181 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5182 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5183 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5184 }
67a54566
DV
5185
5186 /* Workaround: Need to write PP_CONTROL with the unlock key as
5187 * the very first thing. */
b0a08bec 5188 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5189
453c5420
JB
5190 pp_on = I915_READ(pp_on_reg);
5191 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5192 if (!IS_BROXTON(dev)) {
5193 I915_WRITE(pp_ctrl_reg, pp_ctl);
5194 pp_div = I915_READ(pp_div_reg);
5195 }
67a54566
DV
5196
5197 /* Pull timing values out of registers */
5198 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5199 PANEL_POWER_UP_DELAY_SHIFT;
5200
5201 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5202 PANEL_LIGHT_ON_DELAY_SHIFT;
5203
5204 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5205 PANEL_LIGHT_OFF_DELAY_SHIFT;
5206
5207 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5208 PANEL_POWER_DOWN_DELAY_SHIFT;
5209
b0a08bec
VK
5210 if (IS_BROXTON(dev)) {
5211 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5212 BXT_POWER_CYCLE_DELAY_SHIFT;
5213 if (tmp > 0)
5214 cur.t11_t12 = (tmp - 1) * 1000;
5215 else
5216 cur.t11_t12 = 0;
5217 } else {
5218 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5219 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5220 }
67a54566
DV
5221
5222 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5223 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5224
41aa3448 5225 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5226
5227 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5228 * our hw here, which are all in 100usec. */
5229 spec.t1_t3 = 210 * 10;
5230 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5231 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5232 spec.t10 = 500 * 10;
5233 /* This one is special and actually in units of 100ms, but zero
5234 * based in the hw (so we need to add 100 ms). But the sw vbt
5235 * table multiplies it with 1000 to make it in units of 100usec,
5236 * too. */
5237 spec.t11_t12 = (510 + 100) * 10;
5238
5239 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5240 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5241
5242 /* Use the max of the register settings and vbt. If both are
5243 * unset, fall back to the spec limits. */
36b5f425 5244#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5245 spec.field : \
5246 max(cur.field, vbt.field))
5247 assign_final(t1_t3);
5248 assign_final(t8);
5249 assign_final(t9);
5250 assign_final(t10);
5251 assign_final(t11_t12);
5252#undef assign_final
5253
36b5f425 5254#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5255 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5256 intel_dp->backlight_on_delay = get_delay(t8);
5257 intel_dp->backlight_off_delay = get_delay(t9);
5258 intel_dp->panel_power_down_delay = get_delay(t10);
5259 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5260#undef get_delay
5261
f30d26e4
JN
5262 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5263 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5264 intel_dp->panel_power_cycle_delay);
5265
5266 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5267 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5268}
5269
5270static void
5271intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5272 struct intel_dp *intel_dp)
f30d26e4
JN
5273{
5274 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5275 u32 pp_on, pp_off, pp_div, port_sel = 0;
5276 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
f0f59a00 5277 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
ad933b56 5278 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5279 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5280
e39b999a 5281 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5282
b0a08bec
VK
5283 if (IS_BROXTON(dev)) {
5284 /*
5285 * TODO: BXT has 2 sets of PPS registers.
5286 * Correct Register for Broxton need to be identified
5287 * using VBT. hardcoding for now
5288 */
5289 pp_ctrl_reg = BXT_PP_CONTROL(0);
5290 pp_on_reg = BXT_PP_ON_DELAYS(0);
5291 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5292
5293 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5294 pp_on_reg = PCH_PP_ON_DELAYS;
5295 pp_off_reg = PCH_PP_OFF_DELAYS;
5296 pp_div_reg = PCH_PP_DIVISOR;
5297 } else {
bf13e81b
JN
5298 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5299
5300 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5301 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5302 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5303 }
5304
b2f19d1a
PZ
5305 /*
5306 * And finally store the new values in the power sequencer. The
5307 * backlight delays are set to 1 because we do manual waits on them. For
5308 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5309 * we'll end up waiting for the backlight off delay twice: once when we
5310 * do the manual sleep, and once when we disable the panel and wait for
5311 * the PP_STATUS bit to become zero.
5312 */
f30d26e4 5313 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5314 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5315 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5316 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5317 /* Compute the divisor for the pp clock, simply match the Bspec
5318 * formula. */
b0a08bec
VK
5319 if (IS_BROXTON(dev)) {
5320 pp_div = I915_READ(pp_ctrl_reg);
5321 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5322 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5323 << BXT_POWER_CYCLE_DELAY_SHIFT);
5324 } else {
5325 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5326 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5327 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5328 }
67a54566
DV
5329
5330 /* Haswell doesn't have any port selection bits for the panel
5331 * power sequencer any more. */
666a4537 5332 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ad933b56 5333 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5334 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5335 if (port == PORT_A)
a24c144c 5336 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5337 else
a24c144c 5338 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5339 }
5340
453c5420
JB
5341 pp_on |= port_sel;
5342
5343 I915_WRITE(pp_on_reg, pp_on);
5344 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5345 if (IS_BROXTON(dev))
5346 I915_WRITE(pp_ctrl_reg, pp_div);
5347 else
5348 I915_WRITE(pp_div_reg, pp_div);
67a54566 5349
67a54566 5350 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5351 I915_READ(pp_on_reg),
5352 I915_READ(pp_off_reg),
b0a08bec
VK
5353 IS_BROXTON(dev) ?
5354 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5355 I915_READ(pp_div_reg));
f684960e
CW
5356}
5357
b33a2815
VK
5358/**
5359 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5360 * @dev: DRM device
5361 * @refresh_rate: RR to be programmed
5362 *
5363 * This function gets called when refresh rate (RR) has to be changed from
5364 * one frequency to another. Switches can be between high and low RR
5365 * supported by the panel or to any other RR based on media playback (in
5366 * this case, RR value needs to be passed from user space).
5367 *
5368 * The caller of this function needs to take a lock on dev_priv->drrs.
5369 */
96178eeb 5370static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5371{
5372 struct drm_i915_private *dev_priv = dev->dev_private;
5373 struct intel_encoder *encoder;
96178eeb
VK
5374 struct intel_digital_port *dig_port = NULL;
5375 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5376 struct intel_crtc_state *config = NULL;
439d7ac0 5377 struct intel_crtc *intel_crtc = NULL;
96178eeb 5378 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5379
5380 if (refresh_rate <= 0) {
5381 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5382 return;
5383 }
5384
96178eeb
VK
5385 if (intel_dp == NULL) {
5386 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5387 return;
5388 }
5389
1fcc9d1c 5390 /*
e4d59f6b
RV
5391 * FIXME: This needs proper synchronization with psr state for some
5392 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5393 */
439d7ac0 5394
96178eeb
VK
5395 dig_port = dp_to_dig_port(intel_dp);
5396 encoder = &dig_port->base;
723f9aab 5397 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5398
5399 if (!intel_crtc) {
5400 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5401 return;
5402 }
5403
6e3c9717 5404 config = intel_crtc->config;
439d7ac0 5405
96178eeb 5406 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5407 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5408 return;
5409 }
5410
96178eeb
VK
5411 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5412 refresh_rate)
439d7ac0
PB
5413 index = DRRS_LOW_RR;
5414
96178eeb 5415 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5416 DRM_DEBUG_KMS(
5417 "DRRS requested for previously set RR...ignoring\n");
5418 return;
5419 }
5420
5421 if (!intel_crtc->active) {
5422 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5423 return;
5424 }
5425
44395bfe 5426 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5427 switch (index) {
5428 case DRRS_HIGH_RR:
5429 intel_dp_set_m_n(intel_crtc, M1_N1);
5430 break;
5431 case DRRS_LOW_RR:
5432 intel_dp_set_m_n(intel_crtc, M2_N2);
5433 break;
5434 case DRRS_MAX_RR:
5435 default:
5436 DRM_ERROR("Unsupported refreshrate type\n");
5437 }
5438 } else if (INTEL_INFO(dev)->gen > 6) {
f0f59a00 5439 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
649636ef 5440 u32 val;
a4c30b1d 5441
649636ef 5442 val = I915_READ(reg);
439d7ac0 5443 if (index > DRRS_HIGH_RR) {
666a4537 5444 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5445 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5446 else
5447 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5448 } else {
666a4537 5449 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5450 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5451 else
5452 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5453 }
5454 I915_WRITE(reg, val);
5455 }
5456
4e9ac947
VK
5457 dev_priv->drrs.refresh_rate_type = index;
5458
5459 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5460}
5461
b33a2815
VK
5462/**
5463 * intel_edp_drrs_enable - init drrs struct if supported
5464 * @intel_dp: DP struct
5465 *
5466 * Initializes frontbuffer_bits and drrs.dp
5467 */
c395578e
VK
5468void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5469{
5470 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5471 struct drm_i915_private *dev_priv = dev->dev_private;
5472 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5473 struct drm_crtc *crtc = dig_port->base.base.crtc;
5474 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5475
5476 if (!intel_crtc->config->has_drrs) {
5477 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5478 return;
5479 }
5480
5481 mutex_lock(&dev_priv->drrs.mutex);
5482 if (WARN_ON(dev_priv->drrs.dp)) {
5483 DRM_ERROR("DRRS already enabled\n");
5484 goto unlock;
5485 }
5486
5487 dev_priv->drrs.busy_frontbuffer_bits = 0;
5488
5489 dev_priv->drrs.dp = intel_dp;
5490
5491unlock:
5492 mutex_unlock(&dev_priv->drrs.mutex);
5493}
5494
b33a2815
VK
5495/**
5496 * intel_edp_drrs_disable - Disable DRRS
5497 * @intel_dp: DP struct
5498 *
5499 */
c395578e
VK
5500void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5501{
5502 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5503 struct drm_i915_private *dev_priv = dev->dev_private;
5504 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5505 struct drm_crtc *crtc = dig_port->base.base.crtc;
5506 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5507
5508 if (!intel_crtc->config->has_drrs)
5509 return;
5510
5511 mutex_lock(&dev_priv->drrs.mutex);
5512 if (!dev_priv->drrs.dp) {
5513 mutex_unlock(&dev_priv->drrs.mutex);
5514 return;
5515 }
5516
5517 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5518 intel_dp_set_drrs_state(dev_priv->dev,
5519 intel_dp->attached_connector->panel.
5520 fixed_mode->vrefresh);
5521
5522 dev_priv->drrs.dp = NULL;
5523 mutex_unlock(&dev_priv->drrs.mutex);
5524
5525 cancel_delayed_work_sync(&dev_priv->drrs.work);
5526}
5527
4e9ac947
VK
5528static void intel_edp_drrs_downclock_work(struct work_struct *work)
5529{
5530 struct drm_i915_private *dev_priv =
5531 container_of(work, typeof(*dev_priv), drrs.work.work);
5532 struct intel_dp *intel_dp;
5533
5534 mutex_lock(&dev_priv->drrs.mutex);
5535
5536 intel_dp = dev_priv->drrs.dp;
5537
5538 if (!intel_dp)
5539 goto unlock;
5540
439d7ac0 5541 /*
4e9ac947
VK
5542 * The delayed work can race with an invalidate hence we need to
5543 * recheck.
439d7ac0
PB
5544 */
5545
4e9ac947
VK
5546 if (dev_priv->drrs.busy_frontbuffer_bits)
5547 goto unlock;
439d7ac0 5548
4e9ac947
VK
5549 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5550 intel_dp_set_drrs_state(dev_priv->dev,
5551 intel_dp->attached_connector->panel.
5552 downclock_mode->vrefresh);
439d7ac0 5553
4e9ac947 5554unlock:
4e9ac947 5555 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5556}
5557
b33a2815 5558/**
0ddfd203 5559 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5560 * @dev: DRM device
5561 * @frontbuffer_bits: frontbuffer plane tracking bits
5562 *
0ddfd203
R
5563 * This function gets called everytime rendering on the given planes start.
5564 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5565 *
5566 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5567 */
a93fad0f
VK
5568void intel_edp_drrs_invalidate(struct drm_device *dev,
5569 unsigned frontbuffer_bits)
5570{
5571 struct drm_i915_private *dev_priv = dev->dev_private;
5572 struct drm_crtc *crtc;
5573 enum pipe pipe;
5574
9da7d693 5575 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5576 return;
5577
88f933a8 5578 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5579
a93fad0f 5580 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5581 if (!dev_priv->drrs.dp) {
5582 mutex_unlock(&dev_priv->drrs.mutex);
5583 return;
5584 }
5585
a93fad0f
VK
5586 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5587 pipe = to_intel_crtc(crtc)->pipe;
5588
c1d038c6
DV
5589 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5590 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5591
0ddfd203 5592 /* invalidate means busy screen hence upclock */
c1d038c6 5593 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5594 intel_dp_set_drrs_state(dev_priv->dev,
5595 dev_priv->drrs.dp->attached_connector->panel.
5596 fixed_mode->vrefresh);
a93fad0f 5597
a93fad0f
VK
5598 mutex_unlock(&dev_priv->drrs.mutex);
5599}
5600
b33a2815 5601/**
0ddfd203 5602 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5603 * @dev: DRM device
5604 * @frontbuffer_bits: frontbuffer plane tracking bits
5605 *
0ddfd203
R
5606 * This function gets called every time rendering on the given planes has
5607 * completed or flip on a crtc is completed. So DRRS should be upclocked
5608 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5609 * if no other planes are dirty.
b33a2815
VK
5610 *
5611 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5612 */
a93fad0f
VK
5613void intel_edp_drrs_flush(struct drm_device *dev,
5614 unsigned frontbuffer_bits)
5615{
5616 struct drm_i915_private *dev_priv = dev->dev_private;
5617 struct drm_crtc *crtc;
5618 enum pipe pipe;
5619
9da7d693 5620 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5621 return;
5622
88f933a8 5623 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5624
a93fad0f 5625 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5626 if (!dev_priv->drrs.dp) {
5627 mutex_unlock(&dev_priv->drrs.mutex);
5628 return;
5629 }
5630
a93fad0f
VK
5631 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5632 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5633
5634 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5635 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5636
0ddfd203 5637 /* flush means busy screen hence upclock */
c1d038c6 5638 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5639 intel_dp_set_drrs_state(dev_priv->dev,
5640 dev_priv->drrs.dp->attached_connector->panel.
5641 fixed_mode->vrefresh);
5642
5643 /*
5644 * flush also means no more activity hence schedule downclock, if all
5645 * other fbs are quiescent too
5646 */
5647 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5648 schedule_delayed_work(&dev_priv->drrs.work,
5649 msecs_to_jiffies(1000));
5650 mutex_unlock(&dev_priv->drrs.mutex);
5651}
5652
b33a2815
VK
5653/**
5654 * DOC: Display Refresh Rate Switching (DRRS)
5655 *
5656 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5657 * which enables swtching between low and high refresh rates,
5658 * dynamically, based on the usage scenario. This feature is applicable
5659 * for internal panels.
5660 *
5661 * Indication that the panel supports DRRS is given by the panel EDID, which
5662 * would list multiple refresh rates for one resolution.
5663 *
5664 * DRRS is of 2 types - static and seamless.
5665 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5666 * (may appear as a blink on screen) and is used in dock-undock scenario.
5667 * Seamless DRRS involves changing RR without any visual effect to the user
5668 * and can be used during normal system usage. This is done by programming
5669 * certain registers.
5670 *
5671 * Support for static/seamless DRRS may be indicated in the VBT based on
5672 * inputs from the panel spec.
5673 *
5674 * DRRS saves power by switching to low RR based on usage scenarios.
5675 *
5676 * eDP DRRS:-
5677 * The implementation is based on frontbuffer tracking implementation.
5678 * When there is a disturbance on the screen triggered by user activity or a
5679 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5680 * When there is no movement on screen, after a timeout of 1 second, a switch
5681 * to low RR is made.
5682 * For integration with frontbuffer tracking code,
5683 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5684 *
5685 * DRRS can be further extended to support other internal panels and also
5686 * the scenario of video playback wherein RR is set based on the rate
5687 * requested by userspace.
5688 */
5689
5690/**
5691 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5692 * @intel_connector: eDP connector
5693 * @fixed_mode: preferred mode of panel
5694 *
5695 * This function is called only once at driver load to initialize basic
5696 * DRRS stuff.
5697 *
5698 * Returns:
5699 * Downclock mode if panel supports it, else return NULL.
5700 * DRRS support is determined by the presence of downclock mode (apart
5701 * from VBT setting).
5702 */
4f9db5b5 5703static struct drm_display_mode *
96178eeb
VK
5704intel_dp_drrs_init(struct intel_connector *intel_connector,
5705 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5706{
5707 struct drm_connector *connector = &intel_connector->base;
96178eeb 5708 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5709 struct drm_i915_private *dev_priv = dev->dev_private;
5710 struct drm_display_mode *downclock_mode = NULL;
5711
9da7d693
DV
5712 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5713 mutex_init(&dev_priv->drrs.mutex);
5714
4f9db5b5
PB
5715 if (INTEL_INFO(dev)->gen <= 6) {
5716 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5717 return NULL;
5718 }
5719
5720 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5721 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5722 return NULL;
5723 }
5724
5725 downclock_mode = intel_find_panel_downclock
5726 (dev, fixed_mode, connector);
5727
5728 if (!downclock_mode) {
a1d26342 5729 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5730 return NULL;
5731 }
5732
96178eeb 5733 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5734
96178eeb 5735 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5736 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5737 return downclock_mode;
5738}
5739
ed92f0b2 5740static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5741 struct intel_connector *intel_connector)
ed92f0b2
PZ
5742{
5743 struct drm_connector *connector = &intel_connector->base;
5744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5745 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5746 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5747 struct drm_i915_private *dev_priv = dev->dev_private;
5748 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5749 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5750 bool has_dpcd;
5751 struct drm_display_mode *scan;
5752 struct edid *edid;
6517d273 5753 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5754
5755 if (!is_edp(intel_dp))
5756 return true;
5757
49e6bc51
VS
5758 pps_lock(intel_dp);
5759 intel_edp_panel_vdd_sanitize(intel_dp);
5760 pps_unlock(intel_dp);
63635217 5761
ed92f0b2 5762 /* Cache DPCD and EDID for edp. */
ed92f0b2 5763 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5764
5765 if (has_dpcd) {
5766 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5767 dev_priv->no_aux_handshake =
5768 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5769 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5770 } else {
5771 /* if this fails, presume the device is a ghost */
5772 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5773 return false;
5774 }
5775
5776 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5777 pps_lock(intel_dp);
36b5f425 5778 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5779 pps_unlock(intel_dp);
ed92f0b2 5780
060c8778 5781 mutex_lock(&dev->mode_config.mutex);
0b99836f 5782 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5783 if (edid) {
5784 if (drm_add_edid_modes(connector, edid)) {
5785 drm_mode_connector_update_edid_property(connector,
5786 edid);
5787 drm_edid_to_eld(connector, edid);
5788 } else {
5789 kfree(edid);
5790 edid = ERR_PTR(-EINVAL);
5791 }
5792 } else {
5793 edid = ERR_PTR(-ENOENT);
5794 }
5795 intel_connector->edid = edid;
5796
5797 /* prefer fixed mode from EDID if available */
5798 list_for_each_entry(scan, &connector->probed_modes, head) {
5799 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5800 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5801 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5802 intel_connector, fixed_mode);
ed92f0b2
PZ
5803 break;
5804 }
5805 }
5806
5807 /* fallback to VBT if available for eDP */
5808 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5809 fixed_mode = drm_mode_duplicate(dev,
5810 dev_priv->vbt.lfp_lvds_vbt_mode);
5811 if (fixed_mode)
5812 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5813 }
060c8778 5814 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5815
666a4537 5816 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
01527b31
CT
5817 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5818 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5819
5820 /*
5821 * Figure out the current pipe for the initial backlight setup.
5822 * If the current pipe isn't valid, try the PPS pipe, and if that
5823 * fails just assume pipe A.
5824 */
5825 if (IS_CHERRYVIEW(dev))
5826 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5827 else
5828 pipe = PORT_TO_PIPE(intel_dp->DP);
5829
5830 if (pipe != PIPE_A && pipe != PIPE_B)
5831 pipe = intel_dp->pps_pipe;
5832
5833 if (pipe != PIPE_A && pipe != PIPE_B)
5834 pipe = PIPE_A;
5835
5836 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5837 pipe_name(pipe));
01527b31
CT
5838 }
5839
4f9db5b5 5840 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5841 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5842 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5843
5844 return true;
5845}
5846
16c25533 5847bool
f0fec3f2
PZ
5848intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5849 struct intel_connector *intel_connector)
a4fc5ed6 5850{
f0fec3f2
PZ
5851 struct drm_connector *connector = &intel_connector->base;
5852 struct intel_dp *intel_dp = &intel_dig_port->dp;
5853 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5854 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5855 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5856 enum port port = intel_dig_port->port;
a121f4e5 5857 int type, ret;
a4fc5ed6 5858
ccb1a831
VS
5859 if (WARN(intel_dig_port->max_lanes < 1,
5860 "Not enough lanes (%d) for DP on port %c\n",
5861 intel_dig_port->max_lanes, port_name(port)))
5862 return false;
5863
a4a5d2f8
VS
5864 intel_dp->pps_pipe = INVALID_PIPE;
5865
ec5b01dd 5866 /* intel_dp vfuncs */
b6b5e383
DL
5867 if (INTEL_INFO(dev)->gen >= 9)
5868 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
666a4537 5869 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
ec5b01dd
DL
5870 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5871 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5872 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5873 else if (HAS_PCH_SPLIT(dev))
5874 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5875 else
5876 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5877
b9ca5fad
DL
5878 if (INTEL_INFO(dev)->gen >= 9)
5879 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5880 else
5881 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5882
ad64217b
ACO
5883 if (HAS_DDI(dev))
5884 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5885
0767935e
DV
5886 /* Preserve the current hw state. */
5887 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5888 intel_dp->attached_connector = intel_connector;
3d3dc149 5889
3b32a35b 5890 if (intel_dp_is_edp(dev, port))
b329530c 5891 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5892 else
5893 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5894
f7d24902
ID
5895 /*
5896 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5897 * for DP the encoder type can be set by the caller to
5898 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5899 */
5900 if (type == DRM_MODE_CONNECTOR_eDP)
5901 intel_encoder->type = INTEL_OUTPUT_EDP;
5902
c17ed5b5 5903 /* eDP only on port B and/or C on vlv/chv */
666a4537
WB
5904 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5905 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
c17ed5b5
VS
5906 return false;
5907
e7281eab
ID
5908 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5909 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5910 port_name(port));
5911
b329530c 5912 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5913 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5914
a4fc5ed6
KP
5915 connector->interlace_allowed = true;
5916 connector->doublescan_allowed = 0;
5917
f0fec3f2 5918 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5919 edp_panel_vdd_work);
a4fc5ed6 5920
df0e9248 5921 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5922 drm_connector_register(connector);
a4fc5ed6 5923
affa9354 5924 if (HAS_DDI(dev))
bcbc889b
PZ
5925 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5926 else
5927 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5928 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5929
0b99836f 5930 /* Set up the hotplug pin. */
ab9d7c30
PZ
5931 switch (port) {
5932 case PORT_A:
1d843f9d 5933 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5934 break;
5935 case PORT_B:
1d843f9d 5936 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5937 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5938 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5939 break;
5940 case PORT_C:
1d843f9d 5941 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5942 break;
5943 case PORT_D:
1d843f9d 5944 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5945 break;
26951caf
XZ
5946 case PORT_E:
5947 intel_encoder->hpd_pin = HPD_PORT_E;
5948 break;
ab9d7c30 5949 default:
ad1c0b19 5950 BUG();
5eb08b69
ZW
5951 }
5952
dada1a9f 5953 if (is_edp(intel_dp)) {
773538e8 5954 pps_lock(intel_dp);
1e74a324 5955 intel_dp_init_panel_power_timestamps(intel_dp);
666a4537 5956 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
a4a5d2f8 5957 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5958 else
36b5f425 5959 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5960 pps_unlock(intel_dp);
dada1a9f 5961 }
0095e6dc 5962
a121f4e5
VS
5963 ret = intel_dp_aux_init(intel_dp, intel_connector);
5964 if (ret)
5965 goto fail;
c1f05264 5966
0e32b39c 5967 /* init MST on ports that can support it */
0c9b3715
JN
5968 if (HAS_DP_MST(dev) &&
5969 (port == PORT_B || port == PORT_C || port == PORT_D))
5970 intel_dp_mst_encoder_init(intel_dig_port,
5971 intel_connector->base.base.id);
0e32b39c 5972
36b5f425 5973 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
5974 intel_dp_aux_fini(intel_dp);
5975 intel_dp_mst_encoder_cleanup(intel_dig_port);
5976 goto fail;
b2f246a8 5977 }
32f9d658 5978
f684960e
CW
5979 intel_dp_add_properties(intel_dp, connector);
5980
a4fc5ed6
KP
5981 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5982 * 0xd. Failure to do so will result in spurious interrupts being
5983 * generated on the port when a cable is not attached.
5984 */
5985 if (IS_G4X(dev) && !IS_GM45(dev)) {
5986 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5987 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5988 }
16c25533 5989
aa7471d2
JN
5990 i915_debugfs_connector_add(connector);
5991
16c25533 5992 return true;
a121f4e5
VS
5993
5994fail:
5995 if (is_edp(intel_dp)) {
5996 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5997 /*
5998 * vdd might still be enabled do to the delayed vdd off.
5999 * Make sure vdd is actually turned off here.
6000 */
6001 pps_lock(intel_dp);
6002 edp_panel_vdd_off_sync(intel_dp);
6003 pps_unlock(intel_dp);
6004 }
6005 drm_connector_unregister(connector);
6006 drm_connector_cleanup(connector);
6007
6008 return false;
a4fc5ed6 6009}
f0fec3f2
PZ
6010
6011void
f0f59a00
VS
6012intel_dp_init(struct drm_device *dev,
6013 i915_reg_t output_reg, enum port port)
f0fec3f2 6014{
13cf5504 6015 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6016 struct intel_digital_port *intel_dig_port;
6017 struct intel_encoder *intel_encoder;
6018 struct drm_encoder *encoder;
6019 struct intel_connector *intel_connector;
6020
b14c5679 6021 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6022 if (!intel_dig_port)
6023 return;
6024
08d9bc92 6025 intel_connector = intel_connector_alloc();
11aee0f6
SM
6026 if (!intel_connector)
6027 goto err_connector_alloc;
f0fec3f2
PZ
6028
6029 intel_encoder = &intel_dig_port->base;
6030 encoder = &intel_encoder->base;
6031
893da0c9 6032 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
ade1ba73 6033 DRM_MODE_ENCODER_TMDS, NULL))
893da0c9 6034 goto err_encoder_init;
f0fec3f2 6035
5bfe2ac0 6036 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6037 intel_encoder->disable = intel_disable_dp;
00c09d70 6038 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6039 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6040 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6041 if (IS_CHERRYVIEW(dev)) {
9197c88b 6042 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6043 intel_encoder->pre_enable = chv_pre_enable_dp;
6044 intel_encoder->enable = vlv_enable_dp;
580d3811 6045 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6046 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6047 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6048 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6049 intel_encoder->pre_enable = vlv_pre_enable_dp;
6050 intel_encoder->enable = vlv_enable_dp;
49277c31 6051 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6052 } else {
ecff4f3b
JN
6053 intel_encoder->pre_enable = g4x_pre_enable_dp;
6054 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6055 if (INTEL_INFO(dev)->gen >= 5)
6056 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6057 }
f0fec3f2 6058
174edf1f 6059 intel_dig_port->port = port;
0bdf5a05 6060 dev_priv->dig_port_map[port] = intel_encoder;
f0fec3f2 6061 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 6062 intel_dig_port->max_lanes = 4;
f0fec3f2 6063
00c09d70 6064 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6065 if (IS_CHERRYVIEW(dev)) {
6066 if (port == PORT_D)
6067 intel_encoder->crtc_mask = 1 << 2;
6068 else
6069 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6070 } else {
6071 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6072 }
bc079e8b 6073 intel_encoder->cloneable = 0;
f0fec3f2 6074
13cf5504 6075 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6076 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6077
11aee0f6
SM
6078 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6079 goto err_init_connector;
6080
6081 return;
6082
6083err_init_connector:
6084 drm_encoder_cleanup(encoder);
893da0c9 6085err_encoder_init:
11aee0f6
SM
6086 kfree(intel_connector);
6087err_connector_alloc:
6088 kfree(intel_dig_port);
6089
6090 return;
f0fec3f2 6091}
0e32b39c
DA
6092
6093void intel_dp_mst_suspend(struct drm_device *dev)
6094{
6095 struct drm_i915_private *dev_priv = dev->dev_private;
6096 int i;
6097
6098 /* disable MST */
6099 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6100 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6101 if (!intel_dig_port)
6102 continue;
6103
6104 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6105 if (!intel_dig_port->dp.can_mst)
6106 continue;
6107 if (intel_dig_port->dp.is_mst)
6108 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6109 }
6110 }
6111}
6112
6113void intel_dp_mst_resume(struct drm_device *dev)
6114{
6115 struct drm_i915_private *dev_priv = dev->dev_private;
6116 int i;
6117
6118 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6119 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6120 if (!intel_dig_port)
6121 continue;
6122 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6123 int ret;
6124
6125 if (!intel_dig_port->dp.can_mst)
6126 continue;
6127
6128 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6129 if (ret != 0) {
6130 intel_dp_check_mst_status(&intel_dig_port->dp);
6131 }
6132 }
6133 }
6134}